1diff --git a/Makefile b/Makefile
2index 64db4e99e..937095621 100644
3--- a/Makefile
4+++ b/Makefile
5@@ -493,14 +493,16 @@ LINUXINCLUDE    := \
6 		-I$(objtree)/arch/$(SRCARCH)/include/generated \
7 		$(if $(building_out_of_srctree),-I$(srctree)/include) \
8 		-I$(objtree)/include \
9-		$(USERINCLUDE)
10+		$(USERINCLUDE) \
11+		-I$(srctree)/vendor/include \
12+		-I$(srctree)/vendor/include/linux
13 
14 KBUILD_AFLAGS   := -D__ASSEMBLY__ -fno-PIE
15 KBUILD_CFLAGS   := -Wall -Wundef -Werror=strict-prototypes -Wno-trigraphs \
16 		   -fno-strict-aliasing -fno-common -fshort-wchar -fno-PIE \
17 		   -Werror=implicit-function-declaration -Werror=implicit-int \
18 		   -Werror=return-type -Wno-format-security \
19-		   -std=gnu89
20+		   -std=gnu99
21 KBUILD_CPPFLAGS := -D__KERNEL__
22 KBUILD_AFLAGS_KERNEL :=
23 KBUILD_CFLAGS_KERNEL :=
24@@ -956,9 +958,6 @@ endif
25 # arch Makefile may override CC so keep this after arch Makefile is included
26 NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include)
27 
28-# warn about C99 declaration after statement
29-KBUILD_CFLAGS += -Wdeclaration-after-statement
30-
31 # Variable Length Arrays (VLAs) should not be used anywhere in the kernel
32 KBUILD_CFLAGS += -Wvla
33 
34@@ -1396,7 +1395,7 @@ kselftest-merge:
35 # Devicetree files
36 
37 ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/boot/dts/),)
38-dtstree := arch/$(SRCARCH)/boot/dts
39+dtstree := vendor/arch/$(SRCARCH)/boot/dts
40 endif
41 
42 ifneq ($(dtstree),)
43diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
44index 485b7dbd4..5e039976c 100644
45--- a/arch/arm64/Makefile
46+++ b/arch/arm64/Makefile
47@@ -44,8 +44,7 @@ ifeq ($(CONFIG_BROKEN_GAS_INST),y)
48 $(warning Detected assembler with broken .inst; disassembly will be unreliable)
49 endif
50 
51-KBUILD_CFLAGS	+= -mgeneral-regs-only	\
52-		   $(compat_vdso) $(cc_has_k_constraint)
53+KBUILD_CFLAGS	+= $(compat_vdso) $(cc_has_k_constraint)
54 KBUILD_CFLAGS	+= $(call cc-disable-warning, psabi)
55 KBUILD_AFLAGS	+= $(compat_vdso)
56 
57@@ -198,3 +197,16 @@ define archhelp
58   echo  '                  (distribution) /sbin/installkernel or'
59   echo  '                  install to $$(INSTALL_PATH) and run lilo'
60 endef
61+
62+MAKE_MODULES ?= y
63+
64+%.img:
65+ifeq ("$(CONFIG_MODULES)$(MAKE_MODULES)$(srctree)","yy$(objtree)")
66+	$(Q)$(MAKE) rockchip/$*.dtb Image.lz4 modules
67+else
68+	$(Q)$(MAKE) rockchip/$*.dtb Image.lz4
69+endif
70+	$(Q)$(srctree)/vendor/scripts/mkimg --dtb $*.dtb
71+
72+CLEAN_DIRS += out
73+CLEAN_FILES += boot.img kernel.img resource.img zboot.img
74diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
75index cd3414898..7469148c3 100644
76--- a/arch/arm64/boot/Makefile
77+++ b/arch/arm64/boot/Makefile
78@@ -28,7 +28,7 @@ $(obj)/Image.gz: $(obj)/Image FORCE
79 	$(call if_changed,gzip)
80 
81 $(obj)/Image.lz4: $(obj)/Image FORCE
82-	$(call if_changed,lz4)
83+	$(call if_changed,lz4c)
84 
85 $(obj)/Image.lzma: $(obj)/Image FORCE
86 	$(call if_changed,lzma)
87diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
88index 4c0e72781..2094dce73 100644
89--- a/arch/arm64/kernel/cpuinfo.c
90+++ b/arch/arm64/kernel/cpuinfo.c
91@@ -25,6 +25,13 @@
92 #include <linux/smp.h>
93 #include <linux/delay.h>
94 
95+#ifdef CONFIG_ARCH_ROCKCHIP
96+unsigned int system_serial_low;
97+EXPORT_SYMBOL(system_serial_low);
98+
99+unsigned int system_serial_high;
100+EXPORT_SYMBOL(system_serial_high);
101+#endif
102 /*
103  * In case the boot CPU is hotpluggable, we record its initial state and
104  * current state separately. Certain system registers may contain different
105@@ -200,6 +207,10 @@ static int c_show(struct seq_file *m, void *v)
106 		seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
107 	}
108 
109+#ifdef CONFIG_ARCH_ROCKCHIP
110+	seq_printf(m, "Serial\t\t: %08x%08x\n",
111+		   system_serial_high, system_serial_low);
112+#endif
113 	return 0;
114 }
115 
116diff --git a/drivers/clk/rockchip/Kconfig b/drivers/clk/rockchip/Kconfig
117index 47cd6c5de..99e66263a 100644
118--- a/drivers/clk/rockchip/Kconfig
119+++ b/drivers/clk/rockchip/Kconfig
120@@ -2,7 +2,7 @@
121 # common clock support for ROCKCHIP SoC family.
122 
123 config COMMON_CLK_ROCKCHIP
124-	bool "Rockchip clock controller common support"
125+	tristate "Rockchip clock controller common support"
126 	depends on ARCH_ROCKCHIP
127 	default ARCH_ROCKCHIP
128 	help
129@@ -10,69 +10,79 @@ config COMMON_CLK_ROCKCHIP
130 
131 if COMMON_CLK_ROCKCHIP
132 config CLK_PX30
133-	bool "Rockchip PX30 clock controller support"
134-	default y
135+	tristate "Rockchip PX30 clock controller support"
136+	depends on ARM64 || COMPILE_TEST
137+	default n
138 	help
139 	  Build the driver for PX30 Clock Driver.
140 
141 config CLK_RV110X
142-	bool "Rockchip RV110x clock controller support"
143-	default y
144+	tristate "Rockchip RV110x clock controller support"
145+	depends on ARM || COMPILE_TEST
146+	default n
147 	help
148 	  Build the driver for RV110x Clock Driver.
149 
150 config CLK_RK3036
151-	bool "Rockchip RK3036 clock controller support"
152-	default y
153+	tristate "Rockchip RK3036 clock controller support"
154+	depends on ARM || COMPILE_TEST
155+	default n
156 	help
157 	  Build the driver for RK3036 Clock Driver.
158 
159 config CLK_RK312X
160-	bool "Rockchip RK312x clock controller support"
161-	default y
162+	tristate "Rockchip RK312x clock controller support"
163+	depends on ARM || COMPILE_TEST
164+	default n
165 	help
166 	  Build the driver for RK312x Clock Driver.
167 
168 config CLK_RK3188
169-	bool "Rockchip RK3188 clock controller support"
170-	default y
171+	tristate "Rockchip RK3188 clock controller support"
172+	depends on ARM || COMPILE_TEST
173+	default n
174 	help
175 	  Build the driver for RK3188 Clock Driver.
176 
177 config CLK_RK322X
178-	bool "Rockchip RK322x clock controller support"
179-	default y
180+	tristate "Rockchip RK322x clock controller support"
181+	depends on ARM || COMPILE_TEST
182+	default n
183 	help
184 	  Build the driver for RK322x Clock Driver.
185 
186 config CLK_RK3288
187-	bool "Rockchip RK3288 clock controller support"
188-	depends on ARM
189-	default y
190+	tristate "Rockchip RK3288 clock controller support"
191+	depends on ARM || COMPILE_TEST
192+	default n
193 	help
194 	  Build the driver for RK3288 Clock Driver.
195 
196 config CLK_RK3308
197-	bool "Rockchip RK3308 clock controller support"
198-	default y
199+	tristate "Rockchip RK3308 clock controller support"
200+	depends on ARM64 || COMPILE_TEST
201+	default n
202 	help
203 	  Build the driver for RK3308 Clock Driver.
204 
205 config CLK_RK3328
206-	bool "Rockchip RK3328 clock controller support"
207-	default y
208+	tristate "Rockchip RK3328 clock controller support"
209+	depends on ARM64 || COMPILE_TEST
210+	default n
211 	help
212 	  Build the driver for RK3328 Clock Driver.
213 
214 config CLK_RK3368
215-	bool "Rockchip RK3368 clock controller support"
216-	default y
217+	tristate "Rockchip RK3368 clock controller support"
218+	depends on ARM64 || COMPILE_TEST
219+	default n
220 	help
221 	  Build the driver for RK3368 Clock Driver.
222 
223 config CLK_RK3399
224 	tristate "Rockchip RK3399 clock controller support"
225-	default y
226+	depends on ARM64 || COMPILE_TEST
227+	default n
228 	help
229 	  Build the driver for RK3399 Clock Driver.
230 endif
231diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c
232index 0dc478a19..3293174a6 100644
233--- a/drivers/clk/rockchip/clk-cpu.c
234+++ b/drivers/clk/rockchip/clk-cpu.c
235@@ -51,6 +51,7 @@
236  */
237 struct rockchip_cpuclk {
238 	struct clk_hw				hw;
239+	struct clk_hw				*pll_hw;
240 
241 	struct clk_mux				cpu_mux;
242 	const struct clk_ops			*cpu_mux_ops;
243@@ -88,10 +89,10 @@ static unsigned long rockchip_cpuclk_recalc_rate(struct clk_hw *hw,
244 {
245 	struct rockchip_cpuclk *cpuclk = to_rockchip_cpuclk_hw(hw);
246 	const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data;
247-	u32 clksel0 = readl_relaxed(cpuclk->reg_base + reg_data->core_reg);
248+	u32 clksel0 = readl_relaxed(cpuclk->reg_base + reg_data->core_reg[0]);
249 
250-	clksel0 >>= reg_data->div_core_shift;
251-	clksel0 &= reg_data->div_core_mask;
252+	clksel0 >>= reg_data->div_core_shift[0];
253+	clksel0 &= reg_data->div_core_mask[0];
254 	return parent_rate / (clksel0 + 1);
255 }
256 
257@@ -117,6 +118,42 @@ static void rockchip_cpuclk_set_dividers(struct rockchip_cpuclk *cpuclk,
258 	}
259 }
260 
261+static void rockchip_cpuclk_set_pre_muxs(struct rockchip_cpuclk *cpuclk,
262+					 const struct rockchip_cpuclk_rate_table *rate)
263+{
264+	int i;
265+
266+	/* alternate parent is active now. set the pre_muxs */
267+	for (i = 0; i < ARRAY_SIZE(rate->pre_muxs); i++) {
268+		const struct rockchip_cpuclk_clksel *clksel = &rate->pre_muxs[i];
269+
270+		if (!clksel->reg)
271+			break;
272+
273+		pr_debug("%s: setting reg 0x%x to 0x%x\n",
274+			 __func__, clksel->reg, clksel->val);
275+		writel(clksel->val, cpuclk->reg_base + clksel->reg);
276+	}
277+}
278+
279+static void rockchip_cpuclk_set_post_muxs(struct rockchip_cpuclk *cpuclk,
280+					  const struct rockchip_cpuclk_rate_table *rate)
281+{
282+	int i;
283+
284+	/* alternate parent is active now. set the muxs */
285+	for (i = 0; i < ARRAY_SIZE(rate->post_muxs); i++) {
286+		const struct rockchip_cpuclk_clksel *clksel = &rate->post_muxs[i];
287+
288+		if (!clksel->reg)
289+			break;
290+
291+		pr_debug("%s: setting reg 0x%x to 0x%x\n",
292+			 __func__, clksel->reg, clksel->val);
293+		writel(clksel->val, cpuclk->reg_base + clksel->reg);
294+	}
295+}
296+
297 static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk,
298 					   struct clk_notifier_data *ndata)
299 {
300@@ -124,6 +161,7 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk,
301 	const struct rockchip_cpuclk_rate_table *rate;
302 	unsigned long alt_prate, alt_div;
303 	unsigned long flags;
304+	int i = 0;
305 
306 	/* check validity of the new rate */
307 	rate = rockchip_get_cpuclk_settings(cpuclk, ndata->new_rate);
308@@ -133,6 +171,8 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk,
309 		return -EINVAL;
310 	}
311 
312+	rockchip_boost_enable_recovery_sw_low(cpuclk->pll_hw);
313+
314 	alt_prate = clk_get_rate(cpuclk->alt_parent);
315 
316 	spin_lock_irqsave(cpuclk->lock, flags);
317@@ -146,10 +186,10 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk,
318 	if (alt_prate > ndata->old_rate) {
319 		/* calculate dividers */
320 		alt_div =  DIV_ROUND_UP(alt_prate, ndata->old_rate) - 1;
321-		if (alt_div > reg_data->div_core_mask) {
322+		if (alt_div > reg_data->div_core_mask[0]) {
323 			pr_warn("%s: limiting alt-divider %lu to %d\n",
324-				__func__, alt_div, reg_data->div_core_mask);
325-			alt_div = reg_data->div_core_mask;
326+				__func__, alt_div, reg_data->div_core_mask[0]);
327+			alt_div = reg_data->div_core_mask[0];
328 		}
329 
330 		/*
331@@ -162,19 +202,28 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk,
332 		pr_debug("%s: setting div %lu as alt-rate %lu > old-rate %lu\n",
333 			 __func__, alt_div, alt_prate, ndata->old_rate);
334 
335-		writel(HIWORD_UPDATE(alt_div, reg_data->div_core_mask,
336-					      reg_data->div_core_shift) |
337-		       HIWORD_UPDATE(reg_data->mux_core_alt,
338+		for (i = 0; i < reg_data->num_cores; i++) {
339+			writel(HIWORD_UPDATE(alt_div, reg_data->div_core_mask[i],
340+					     reg_data->div_core_shift[i]),
341+			       cpuclk->reg_base + reg_data->core_reg[i]);
342+		}
343+	}
344+
345+	rockchip_boost_add_core_div(cpuclk->pll_hw, alt_prate);
346+
347+	rockchip_cpuclk_set_pre_muxs(cpuclk, rate);
348+
349+	/* select alternate parent */
350+	if (reg_data->mux_core_reg)
351+		writel(HIWORD_UPDATE(reg_data->mux_core_alt,
352 				     reg_data->mux_core_mask,
353 				     reg_data->mux_core_shift),
354-		       cpuclk->reg_base + reg_data->core_reg);
355-	} else {
356-		/* select alternate parent */
357+		       cpuclk->reg_base + reg_data->mux_core_reg);
358+	else
359 		writel(HIWORD_UPDATE(reg_data->mux_core_alt,
360 				     reg_data->mux_core_mask,
361 				     reg_data->mux_core_shift),
362-		       cpuclk->reg_base + reg_data->core_reg);
363-	}
364+		       cpuclk->reg_base + reg_data->core_reg[0]);
365 
366 	spin_unlock_irqrestore(cpuclk->lock, flags);
367 	return 0;
368@@ -186,6 +235,7 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk,
369 	const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data;
370 	const struct rockchip_cpuclk_rate_table *rate;
371 	unsigned long flags;
372+	int i = 0;
373 
374 	rate = rockchip_get_cpuclk_settings(cpuclk, ndata->new_rate);
375 	if (!rate) {
376@@ -206,16 +256,31 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk,
377 	 * primary parent by the extra dividers that were needed for the alt.
378 	 */
379 
380-	writel(HIWORD_UPDATE(0, reg_data->div_core_mask,
381-				reg_data->div_core_shift) |
382-	       HIWORD_UPDATE(reg_data->mux_core_main,
383-				reg_data->mux_core_mask,
384-				reg_data->mux_core_shift),
385-	       cpuclk->reg_base + reg_data->core_reg);
386+	if (reg_data->mux_core_reg)
387+		writel(HIWORD_UPDATE(reg_data->mux_core_main,
388+				     reg_data->mux_core_mask,
389+				     reg_data->mux_core_shift),
390+		       cpuclk->reg_base + reg_data->mux_core_reg);
391+	else
392+		writel(HIWORD_UPDATE(reg_data->mux_core_main,
393+				     reg_data->mux_core_mask,
394+				     reg_data->mux_core_shift),
395+		       cpuclk->reg_base + reg_data->core_reg[0]);
396+
397+	rockchip_cpuclk_set_post_muxs(cpuclk, rate);
398+
399+	/* remove dividers */
400+	for (i = 0; i < reg_data->num_cores; i++) {
401+		writel(HIWORD_UPDATE(0, reg_data->div_core_mask[i],
402+				     reg_data->div_core_shift[i]),
403+		       cpuclk->reg_base + reg_data->core_reg[i]);
404+	}
405 
406 	if (ndata->old_rate > ndata->new_rate)
407 		rockchip_cpuclk_set_dividers(cpuclk, rate);
408 
409+	rockchip_boost_disable_recovery_sw(cpuclk->pll_hw);
410+
411 	spin_unlock_irqrestore(cpuclk->lock, flags);
412 	return 0;
413 }
414@@ -244,14 +309,16 @@ static int rockchip_cpuclk_notifier_cb(struct notifier_block *nb,
415 }
416 
417 struct clk *rockchip_clk_register_cpuclk(const char *name,
418-			const char *const *parent_names, u8 num_parents,
419+			u8 num_parents,
420+			struct clk *parent, struct clk *alt_parent,
421 			const struct rockchip_cpuclk_reg_data *reg_data,
422 			const struct rockchip_cpuclk_rate_table *rates,
423 			int nrates, void __iomem *reg_base, spinlock_t *lock)
424 {
425 	struct rockchip_cpuclk *cpuclk;
426 	struct clk_init_data init;
427-	struct clk *clk, *cclk;
428+	struct clk *clk, *cclk, *pll_clk;
429+	const char *parent_name;
430 	int ret;
431 
432 	if (num_parents < 2) {
433@@ -259,12 +326,18 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
434 		return ERR_PTR(-EINVAL);
435 	}
436 
437+	if (IS_ERR(parent) || IS_ERR(alt_parent)) {
438+		pr_err("%s: invalid parent clock(s)\n", __func__);
439+		return ERR_PTR(-EINVAL);
440+	}
441+
442 	cpuclk = kzalloc(sizeof(*cpuclk), GFP_KERNEL);
443 	if (!cpuclk)
444 		return ERR_PTR(-ENOMEM);
445 
446+	parent_name = clk_hw_get_name(__clk_get_hw(parent));
447 	init.name = name;
448-	init.parent_names = &parent_names[reg_data->mux_core_main];
449+	init.parent_names = &parent_name;
450 	init.num_parents = 1;
451 	init.ops = &rockchip_cpuclk_ops;
452 
453@@ -281,8 +354,19 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
454 	cpuclk->reg_data = reg_data;
455 	cpuclk->clk_nb.notifier_call = rockchip_cpuclk_notifier_cb;
456 	cpuclk->hw.init = &init;
457+	if (reg_data->pll_name) {
458+		pll_clk = clk_get_parent(parent);
459+		if (!pll_clk) {
460+			pr_err("%s: could not lookup pll clock: (%s)\n",
461+			       __func__, reg_data->pll_name);
462+			ret = -EINVAL;
463+			goto free_cpuclk;
464+		}
465+		cpuclk->pll_hw = __clk_get_hw(pll_clk);
466+		rockchip_boost_init(cpuclk->pll_hw);
467+	}
468 
469-	cpuclk->alt_parent = __clk_lookup(parent_names[reg_data->mux_core_alt]);
470+	cpuclk->alt_parent = alt_parent;
471 	if (!cpuclk->alt_parent) {
472 		pr_err("%s: could not lookup alternate parent: (%d)\n",
473 		       __func__, reg_data->mux_core_alt);
474@@ -297,11 +381,11 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
475 		goto free_cpuclk;
476 	}
477 
478-	clk = __clk_lookup(parent_names[reg_data->mux_core_main]);
479+	clk = parent;
480 	if (!clk) {
481 		pr_err("%s: could not lookup parent clock: (%d) %s\n",
482 		       __func__, reg_data->mux_core_main,
483-		       parent_names[reg_data->mux_core_main]);
484+		       parent_name);
485 		ret = -EINVAL;
486 		goto free_alt_parent;
487 	}
488diff --git a/drivers/clk/rockchip/clk-ddr.c b/drivers/clk/rockchip/clk-ddr.c
489index 86718c54e..3c8bcbee2 100644
490--- a/drivers/clk/rockchip/clk-ddr.c
491+++ b/drivers/clk/rockchip/clk-ddr.c
492@@ -8,10 +8,20 @@
493 #include <linux/clk.h>
494 #include <linux/clk-provider.h>
495 #include <linux/io.h>
496+#include <linux/of.h>
497+#include <linux/rockchip/rockchip_sip.h>
498 #include <linux/slab.h>
499 #include <soc/rockchip/rockchip_sip.h>
500+#include <soc/rockchip/scpi.h>
501+#include <uapi/drm/drm_mode.h>
502+#ifdef CONFIG_ARM
503+#include <asm/psci.h>
504+#endif
505+
506 #include "clk.h"
507 
508+#define MHZ		(1000000)
509+
510 struct rockchip_ddrclk {
511 	struct clk_hw	hw;
512 	void __iomem	*reg_base;
513@@ -21,25 +31,47 @@ struct rockchip_ddrclk {
514 	int		div_shift;
515 	int		div_width;
516 	int		ddr_flag;
517-	spinlock_t	*lock;
518 };
519 
520 #define to_rockchip_ddrclk_hw(hw) container_of(hw, struct rockchip_ddrclk, hw)
521 
522+struct share_params_ddrclk {
523+	u32 hz;
524+	u32 lcdc_type;
525+};
526+
527+struct rockchip_ddrclk_data {
528+	void __iomem *params;
529+	int (*dmcfreq_wait_complete)(void);
530+};
531+
532+static struct rockchip_ddrclk_data ddr_data = {NULL, NULL};
533+
534+void rockchip_set_ddrclk_params(void __iomem *params)
535+{
536+	ddr_data.params = params;
537+}
538+EXPORT_SYMBOL(rockchip_set_ddrclk_params);
539+
540+void rockchip_set_ddrclk_dmcfreq_wait_complete(int (*func)(void))
541+{
542+	ddr_data.dmcfreq_wait_complete = func;
543+}
544+EXPORT_SYMBOL(rockchip_set_ddrclk_dmcfreq_wait_complete);
545+
546 static int rockchip_ddrclk_sip_set_rate(struct clk_hw *hw, unsigned long drate,
547 					unsigned long prate)
548 {
549-	struct rockchip_ddrclk *ddrclk = to_rockchip_ddrclk_hw(hw);
550-	unsigned long flags;
551 	struct arm_smccc_res res;
552 
553-	spin_lock_irqsave(ddrclk->lock, flags);
554 	arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, drate, 0,
555 		      ROCKCHIP_SIP_CONFIG_DRAM_SET_RATE,
556 		      0, 0, 0, 0, &res);
557-	spin_unlock_irqrestore(ddrclk->lock, flags);
558 
559-	return res.a0;
560+	if (res.a0)
561+		return 0;
562+	else
563+		return -EPERM;
564 }
565 
566 static unsigned long
567@@ -87,18 +119,134 @@ static const struct clk_ops rockchip_ddrclk_sip_ops = {
568 	.get_parent = rockchip_ddrclk_get_parent,
569 };
570 
571+static u32 ddr_clk_cached;
572+
573+static int rockchip_ddrclk_scpi_set_rate(struct clk_hw *hw, unsigned long drate,
574+					 unsigned long prate)
575+{
576+	u32 ret;
577+	u32 lcdc_type = 0;
578+	struct share_params_ddrclk *p;
579+
580+	p = (struct share_params_ddrclk *)ddr_data.params;
581+	if (p)
582+		lcdc_type = p->lcdc_type;
583+
584+	ret = scpi_ddr_set_clk_rate(drate / MHZ, lcdc_type);
585+	if (ret) {
586+		ddr_clk_cached = ret;
587+		ret = 0;
588+	} else {
589+		ddr_clk_cached = 0;
590+		ret = -1;
591+	}
592+
593+	return ret;
594+}
595+
596+static unsigned long rockchip_ddrclk_scpi_recalc_rate(struct clk_hw *hw,
597+						      unsigned long parent_rate)
598+{
599+	if (ddr_clk_cached)
600+		return (MHZ * ddr_clk_cached);
601+	else
602+		return (MHZ * scpi_ddr_get_clk_rate());
603+}
604+
605+static long rockchip_ddrclk_scpi_round_rate(struct clk_hw *hw,
606+					    unsigned long rate,
607+					    unsigned long *prate)
608+{
609+	rate = rate / MHZ;
610+	rate = (rate / 12) * 12;
611+
612+	return (rate * MHZ);
613+}
614+
615+static const struct clk_ops rockchip_ddrclk_scpi_ops = {
616+	.recalc_rate = rockchip_ddrclk_scpi_recalc_rate,
617+	.set_rate = rockchip_ddrclk_scpi_set_rate,
618+	.round_rate = rockchip_ddrclk_scpi_round_rate,
619+	.get_parent = rockchip_ddrclk_get_parent,
620+};
621+
622+static int rockchip_ddrclk_sip_set_rate_v2(struct clk_hw *hw,
623+					   unsigned long drate,
624+					   unsigned long prate)
625+{
626+	struct share_params_ddrclk *p;
627+	struct arm_smccc_res res;
628+
629+	p = (struct share_params_ddrclk *)ddr_data.params;
630+	if (p)
631+		p->hz = drate;
632+
633+	res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
634+			   ROCKCHIP_SIP_CONFIG_DRAM_SET_RATE);
635+
636+	if ((int)res.a1 == SIP_RET_SET_RATE_TIMEOUT) {
637+		if (ddr_data.dmcfreq_wait_complete)
638+			ddr_data.dmcfreq_wait_complete();
639+	}
640+
641+	return res.a0;
642+}
643+
644+static unsigned long rockchip_ddrclk_sip_recalc_rate_v2
645+			(struct clk_hw *hw, unsigned long parent_rate)
646+{
647+	struct arm_smccc_res res;
648+
649+	res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
650+			   ROCKCHIP_SIP_CONFIG_DRAM_GET_RATE);
651+	if (!res.a0)
652+		return res.a1;
653+	else
654+		return 0;
655+}
656+
657+static long rockchip_ddrclk_sip_round_rate_v2(struct clk_hw *hw,
658+					      unsigned long rate,
659+					      unsigned long *prate)
660+{
661+	struct share_params_ddrclk *p;
662+	struct arm_smccc_res res;
663+
664+	p = (struct share_params_ddrclk *)ddr_data.params;
665+	if (p)
666+		p->hz = rate;
667+
668+	res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
669+			   ROCKCHIP_SIP_CONFIG_DRAM_ROUND_RATE);
670+	if (!res.a0)
671+		return res.a1;
672+	else
673+		return 0;
674+}
675+
676+static const struct clk_ops rockchip_ddrclk_sip_ops_v2 = {
677+	.recalc_rate = rockchip_ddrclk_sip_recalc_rate_v2,
678+	.set_rate = rockchip_ddrclk_sip_set_rate_v2,
679+	.round_rate = rockchip_ddrclk_sip_round_rate_v2,
680+	.get_parent = rockchip_ddrclk_get_parent,
681+};
682+
683 struct clk *rockchip_clk_register_ddrclk(const char *name, int flags,
684 					 const char *const *parent_names,
685 					 u8 num_parents, int mux_offset,
686 					 int mux_shift, int mux_width,
687 					 int div_shift, int div_width,
688-					 int ddr_flag, void __iomem *reg_base,
689-					 spinlock_t *lock)
690+					 int ddr_flag, void __iomem *reg_base)
691 {
692 	struct rockchip_ddrclk *ddrclk;
693 	struct clk_init_data init;
694 	struct clk *clk;
695 
696+#ifdef CONFIG_ARM
697+	if (!psci_smp_available())
698+		return NULL;
699+#endif
700+
701 	ddrclk = kzalloc(sizeof(*ddrclk), GFP_KERNEL);
702 	if (!ddrclk)
703 		return ERR_PTR(-ENOMEM);
704@@ -114,6 +262,12 @@ struct clk *rockchip_clk_register_ddrclk(const char *name, int flags,
705 	case ROCKCHIP_DDRCLK_SIP:
706 		init.ops = &rockchip_ddrclk_sip_ops;
707 		break;
708+	case ROCKCHIP_DDRCLK_SCPI:
709+		init.ops = &rockchip_ddrclk_scpi_ops;
710+		break;
711+	case ROCKCHIP_DDRCLK_SIP_V2:
712+		init.ops = &rockchip_ddrclk_sip_ops_v2;
713+		break;
714 	default:
715 		pr_err("%s: unsupported ddrclk type %d\n", __func__, ddr_flag);
716 		kfree(ddrclk);
717@@ -121,7 +275,6 @@ struct clk *rockchip_clk_register_ddrclk(const char *name, int flags,
718 	}
719 
720 	ddrclk->reg_base = reg_base;
721-	ddrclk->lock = lock;
722 	ddrclk->hw.init = &init;
723 	ddrclk->mux_offset = mux_offset;
724 	ddrclk->mux_shift = mux_shift;
725diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c
726index ccd5c270c..b978af08d 100644
727--- a/drivers/clk/rockchip/clk-half-divider.c
728+++ b/drivers/clk/rockchip/clk-half-divider.c
729@@ -14,9 +14,9 @@ static bool _is_best_half_div(unsigned long rate, unsigned long now,
730 			      unsigned long best, unsigned long flags)
731 {
732 	if (flags & CLK_DIVIDER_ROUND_CLOSEST)
733-		return abs(rate - now) < abs(rate - best);
734+		return abs(rate - now) <= abs(rate - best);
735 
736-	return now <= rate && now > best;
737+	return now <= rate && now >= best;
738 }
739 
740 static unsigned long clk_half_divider_recalc_rate(struct clk_hw *hw,
741@@ -38,7 +38,7 @@ static int clk_half_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
742 {
743 	unsigned int i, bestdiv = 0;
744 	unsigned long parent_rate, best = 0, now, maxdiv;
745-	unsigned long parent_rate_saved = *best_parent_rate;
746+	bool is_bestdiv = false;
747 
748 	if (!rate)
749 		rate = 1;
750@@ -51,7 +51,7 @@ static int clk_half_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
751 		if (bestdiv < 3)
752 			bestdiv = 0;
753 		else
754-			bestdiv = (bestdiv - 3) / 2;
755+			bestdiv = DIV_ROUND_UP(bestdiv - 3, 2);
756 		bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
757 		return bestdiv;
758 	}
759@@ -63,28 +63,20 @@ static int clk_half_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
760 	maxdiv = min(ULONG_MAX / rate, maxdiv);
761 
762 	for (i = 0; i <= maxdiv; i++) {
763-		if (((u64)rate * (i * 2 + 3)) == ((u64)parent_rate_saved * 2)) {
764-			/*
765-			 * It's the most ideal case if the requested rate can be
766-			 * divided from parent clock without needing to change
767-			 * parent rate, so return the divider immediately.
768-			 */
769-			*best_parent_rate = parent_rate_saved;
770-			return i;
771-		}
772 		parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
773 						((u64)rate * (i * 2 + 3)) / 2);
774 		now = DIV_ROUND_UP_ULL(((u64)parent_rate * 2),
775 				       (i * 2 + 3));
776 
777 		if (_is_best_half_div(rate, now, best, flags)) {
778+			is_bestdiv = true;
779 			bestdiv = i;
780 			best = now;
781 			*best_parent_rate = parent_rate;
782 		}
783 	}
784 
785-	if (!bestdiv) {
786+	if (!is_bestdiv) {
787 		bestdiv = div_mask(width);
788 		*best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), 1);
789 	}
790@@ -114,7 +106,7 @@ static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate,
791 	u32 val;
792 
793 	value = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), rate);
794-	value = (value - 3) / 2;
795+	value = DIV_ROUND_UP(value - 3, 2);
796 	value =  min_t(unsigned int, value, div_mask(divider->width));
797 
798 	if (divider->lock)
799@@ -160,10 +152,10 @@ struct clk *rockchip_clk_register_halfdiv(const char *name,
800 					  u8 num_parents, void __iomem *base,
801 					  int muxdiv_offset, u8 mux_shift,
802 					  u8 mux_width, u8 mux_flags,
803-					  u8 div_shift, u8 div_width,
804-					  u8 div_flags, int gate_offset,
805-					  u8 gate_shift, u8 gate_flags,
806-					  unsigned long flags,
807+					  int div_offset, u8 div_shift,
808+					  u8 div_width, u8 div_flags,
809+					  int gate_offset, u8 gate_shift,
810+					  u8 gate_flags, unsigned long flags,
811 					  spinlock_t *lock)
812 {
813 	struct clk_hw *hw = ERR_PTR(-ENOMEM);
814@@ -205,7 +197,10 @@ struct clk *rockchip_clk_register_halfdiv(const char *name,
815 			goto err_div;
816 
817 		div->flags = div_flags;
818-		div->reg = base + muxdiv_offset;
819+		if (div_offset)
820+			div->reg = base + div_offset;
821+		else
822+			div->reg = base + muxdiv_offset;
823 		div->shift = div_shift;
824 		div->width = div_width;
825 		div->lock = lock;
826diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
827index d0bd513ff..5687b5d8f 100644
828--- a/drivers/clk/rockchip/clk-pll.c
829+++ b/drivers/clk/rockchip/clk-pll.c
830@@ -15,6 +15,9 @@
831 #include <linux/iopoll.h>
832 #include <linux/regmap.h>
833 #include <linux/clk.h>
834+#include <linux/gcd.h>
835+#include <linux/clk/rockchip.h>
836+#include <linux/mfd/syscon.h>
837 #include "clk.h"
838 
839 #define PLL_MODE_MASK		0x3
840@@ -38,15 +41,352 @@ struct rockchip_clk_pll {
841 	u8			flags;
842 	const struct rockchip_pll_rate_table *rate_table;
843 	unsigned int		rate_count;
844+	int			sel;
845+	unsigned long		scaling;
846 	spinlock_t		*lock;
847 
848 	struct rockchip_clk_provider *ctx;
849+
850+	bool			boost_enabled;
851+	u32			boost_backup_pll_usage;
852+	unsigned long		boost_backup_pll_rate;
853+	unsigned long		boost_low_rate;
854+	unsigned long		boost_high_rate;
855+	struct regmap		*boost;
856+#ifdef CONFIG_DEBUG_FS
857+	struct hlist_node	debug_node;
858+#endif
859 };
860 
861 #define to_rockchip_clk_pll(_hw) container_of(_hw, struct rockchip_clk_pll, hw)
862 #define to_rockchip_clk_pll_nb(nb) \
863 			container_of(nb, struct rockchip_clk_pll, clk_nb)
864 
865+static void rockchip_boost_disable_low(struct rockchip_clk_pll *pll);
866+
867+#define MHZ			(1000UL * 1000UL)
868+#define KHZ			(1000UL)
869+
870+/* CLK_PLL_TYPE_RK3066_AUTO type ops */
871+#define PLL_FREF_MIN		(269 * KHZ)
872+#define PLL_FREF_MAX		(2200 * MHZ)
873+
874+#define PLL_FVCO_MIN		(440 * MHZ)
875+#define PLL_FVCO_MAX		(2200 * MHZ)
876+
877+#define PLL_FOUT_MIN		(27500 * KHZ)
878+#define PLL_FOUT_MAX		(2200 * MHZ)
879+
880+#define PLL_NF_MAX		(4096)
881+#define PLL_NR_MAX		(64)
882+#define PLL_NO_MAX		(16)
883+
884+/* CLK_PLL_TYPE_RK3036/3366/3399_AUTO type ops */
885+#define MIN_FOUTVCO_FREQ	(800 * MHZ)
886+#define MAX_FOUTVCO_FREQ	(2000 * MHZ)
887+
888+static struct rockchip_pll_rate_table auto_table;
889+#ifdef CONFIG_DEBUG_FS
890+static HLIST_HEAD(clk_boost_list);
891+static DEFINE_MUTEX(clk_boost_lock);
892+#endif
893+
894+int rockchip_pll_clk_adaptive_scaling(struct clk *clk, int sel)
895+{
896+	struct clk *parent = clk_get_parent(clk);
897+	struct rockchip_clk_pll *pll;
898+
899+	if (IS_ERR_OR_NULL(parent))
900+		return -EINVAL;
901+
902+	pll = to_rockchip_clk_pll(__clk_get_hw(parent));
903+	if (!pll)
904+		return -EINVAL;
905+
906+	pll->sel = sel;
907+
908+	return 0;
909+}
910+EXPORT_SYMBOL(rockchip_pll_clk_adaptive_scaling);
911+
912+int rockchip_pll_clk_rate_to_scale(struct clk *clk, unsigned long rate)
913+{
914+	const struct rockchip_pll_rate_table *rate_table;
915+	struct clk *parent = clk_get_parent(clk);
916+	struct rockchip_clk_pll *pll;
917+	unsigned int i;
918+
919+	if (IS_ERR_OR_NULL(parent))
920+		return -EINVAL;
921+
922+	pll = to_rockchip_clk_pll(__clk_get_hw(parent));
923+	if (!pll)
924+		return -EINVAL;
925+
926+	rate_table = pll->rate_table;
927+	for (i = 0; i < pll->rate_count; i++) {
928+		if (rate >= rate_table[i].rate)
929+			return i;
930+	}
931+
932+	return -EINVAL;
933+}
934+EXPORT_SYMBOL(rockchip_pll_clk_rate_to_scale);
935+
936+int rockchip_pll_clk_scale_to_rate(struct clk *clk, unsigned int scale)
937+{
938+	const struct rockchip_pll_rate_table *rate_table;
939+	struct clk *parent = clk_get_parent(clk);
940+	struct rockchip_clk_pll *pll;
941+	unsigned int i;
942+
943+	if (IS_ERR_OR_NULL(parent))
944+		return -EINVAL;
945+
946+	pll = to_rockchip_clk_pll(__clk_get_hw(parent));
947+	if (!pll)
948+		return -EINVAL;
949+
950+	rate_table = pll->rate_table;
951+	for (i = 0; i < pll->rate_count; i++) {
952+		if (i == scale)
953+			return rate_table[i].rate;
954+	}
955+
956+	return -EINVAL;
957+}
958+EXPORT_SYMBOL(rockchip_pll_clk_scale_to_rate);
959+
960+static struct rockchip_pll_rate_table *rk_pll_rate_table_get(void)
961+{
962+	return &auto_table;
963+}
964+
965+static int rockchip_pll_clk_set_postdiv(unsigned long fout_hz,
966+					u32 *postdiv1,
967+					u32 *postdiv2,
968+					u32 *foutvco)
969+{
970+	unsigned long freq;
971+
972+	if (fout_hz < MIN_FOUTVCO_FREQ) {
973+		for (*postdiv1 = 1; *postdiv1 <= 7; (*postdiv1)++) {
974+			for (*postdiv2 = 1; *postdiv2 <= 7; (*postdiv2)++) {
975+				freq = fout_hz * (*postdiv1) * (*postdiv2);
976+				if (freq >= MIN_FOUTVCO_FREQ &&
977+				    freq <= MAX_FOUTVCO_FREQ) {
978+					*foutvco = freq;
979+					return 0;
980+				}
981+			}
982+		}
983+		pr_err("CANNOT FIND postdiv1/2 to make fout in range from 800M to 2000M,fout = %lu\n",
984+		       fout_hz);
985+	} else {
986+		*postdiv1 = 1;
987+		*postdiv2 = 1;
988+	}
989+	return 0;
990+}
991+
992+static struct rockchip_pll_rate_table *
993+rockchip_pll_clk_set_by_auto(struct rockchip_clk_pll *pll,
994+			     unsigned long fin_hz,
995+			     unsigned long fout_hz)
996+{
997+	struct rockchip_pll_rate_table *rate_table = rk_pll_rate_table_get();
998+	/* FIXME set postdiv1/2 always 1*/
999+	u32 foutvco = fout_hz;
1000+	u64 fin_64, frac_64;
1001+	u32 f_frac, postdiv1, postdiv2;
1002+	unsigned long clk_gcd = 0;
1003+
1004+	if (fin_hz == 0 || fout_hz == 0 || fout_hz == fin_hz)
1005+		return NULL;
1006+
1007+	rockchip_pll_clk_set_postdiv(fout_hz, &postdiv1, &postdiv2, &foutvco);
1008+	rate_table->postdiv1 = postdiv1;
1009+	rate_table->postdiv2 = postdiv2;
1010+	rate_table->dsmpd = 1;
1011+
1012+	if (fin_hz / MHZ * MHZ == fin_hz && fout_hz / MHZ * MHZ == fout_hz) {
1013+		fin_hz /= MHZ;
1014+		foutvco /= MHZ;
1015+		clk_gcd = gcd(fin_hz, foutvco);
1016+		rate_table->refdiv = fin_hz / clk_gcd;
1017+		rate_table->fbdiv = foutvco / clk_gcd;
1018+
1019+		rate_table->frac = 0;
1020+
1021+		pr_debug("fin = %lu, fout = %lu, clk_gcd = %lu, refdiv = %u, fbdiv = %u, postdiv1 = %u, postdiv2 = %u, frac = %u\n",
1022+			 fin_hz, fout_hz, clk_gcd, rate_table->refdiv,
1023+			 rate_table->fbdiv, rate_table->postdiv1,
1024+			 rate_table->postdiv2, rate_table->frac);
1025+	} else {
1026+		pr_debug("frac div running, fin_hz = %lu, fout_hz = %lu, fin_INT_mhz = %lu, fout_INT_mhz = %lu\n",
1027+			 fin_hz, fout_hz,
1028+			 fin_hz / MHZ * MHZ,
1029+			 fout_hz / MHZ * MHZ);
1030+		pr_debug("frac get postdiv1 = %u,  postdiv2 = %u, foutvco = %u\n",
1031+			 rate_table->postdiv1, rate_table->postdiv2, foutvco);
1032+		clk_gcd = gcd(fin_hz / MHZ, foutvco / MHZ);
1033+		rate_table->refdiv = fin_hz / MHZ / clk_gcd;
1034+		rate_table->fbdiv = foutvco / MHZ / clk_gcd;
1035+		pr_debug("frac get refdiv = %u,  fbdiv = %u\n",
1036+			 rate_table->refdiv, rate_table->fbdiv);
1037+
1038+		rate_table->frac = 0;
1039+
1040+		f_frac = (foutvco % MHZ);
1041+		fin_64 = fin_hz;
1042+		do_div(fin_64, (u64)rate_table->refdiv);
1043+		frac_64 = (u64)f_frac << 24;
1044+		do_div(frac_64, fin_64);
1045+		rate_table->frac = (u32)frac_64;
1046+		if (rate_table->frac > 0)
1047+			rate_table->dsmpd = 0;
1048+		pr_debug("frac = %x\n", rate_table->frac);
1049+	}
1050+	return rate_table;
1051+}
1052+
1053+static struct rockchip_pll_rate_table *
1054+rockchip_rk3066_pll_clk_set_by_auto(struct rockchip_clk_pll *pll,
1055+				    unsigned long fin_hz,
1056+				    unsigned long fout_hz)
1057+{
1058+	struct rockchip_pll_rate_table *rate_table = rk_pll_rate_table_get();
1059+	u32 nr, nf, no, nonr;
1060+	u32 nr_out, nf_out, no_out;
1061+	u32 n;
1062+	u32 numerator, denominator;
1063+	u64 fref, fvco, fout;
1064+	unsigned long clk_gcd = 0;
1065+
1066+	nr_out = PLL_NR_MAX + 1;
1067+	no_out = 0;
1068+	nf_out = 0;
1069+
1070+	if (fin_hz == 0 || fout_hz == 0 || fout_hz == fin_hz)
1071+		return NULL;
1072+
1073+	clk_gcd = gcd(fin_hz, fout_hz);
1074+
1075+	numerator = fout_hz / clk_gcd;
1076+	denominator = fin_hz / clk_gcd;
1077+
1078+	for (n = 1;; n++) {
1079+		nf = numerator * n;
1080+		nonr = denominator * n;
1081+		if (nf > PLL_NF_MAX || nonr > (PLL_NO_MAX * PLL_NR_MAX))
1082+			break;
1083+
1084+		for (no = 1; no <= PLL_NO_MAX; no++) {
1085+			if (!(no == 1 || !(no % 2)))
1086+				continue;
1087+
1088+			if (nonr % no)
1089+				continue;
1090+			nr = nonr / no;
1091+
1092+			if (nr > PLL_NR_MAX)
1093+				continue;
1094+
1095+			fref = fin_hz / nr;
1096+			if (fref < PLL_FREF_MIN || fref > PLL_FREF_MAX)
1097+				continue;
1098+
1099+			fvco = fref * nf;
1100+			if (fvco < PLL_FVCO_MIN || fvco > PLL_FVCO_MAX)
1101+				continue;
1102+
1103+			fout = fvco / no;
1104+			if (fout < PLL_FOUT_MIN || fout > PLL_FOUT_MAX)
1105+				continue;
1106+
1107+			/* select the best from all available PLL settings */
1108+			if ((no > no_out) ||
1109+			    ((no == no_out) && (nr < nr_out))) {
1110+				nr_out = nr;
1111+				nf_out = nf;
1112+				no_out = no;
1113+			}
1114+		}
1115+	}
1116+
1117+	/* output the best PLL setting */
1118+	if ((nr_out <= PLL_NR_MAX) && (no_out > 0)) {
1119+		rate_table->nr = nr_out;
1120+		rate_table->nf = nf_out;
1121+		rate_table->no = no_out;
1122+	} else {
1123+		return NULL;
1124+	}
1125+
1126+	return rate_table;
1127+}
1128+
1129+static struct rockchip_pll_rate_table *
1130+rockchip_rk3588_pll_clk_set_by_auto(struct rockchip_clk_pll *pll,
1131+				    unsigned long fin_hz,
1132+				    unsigned long fout_hz)
1133+{
1134+	struct rockchip_pll_rate_table *rate_table = rk_pll_rate_table_get();
1135+	u64 fvco_min = 2250 * MHZ, fvco_max = 4500 * MHZ;
1136+	u64 fout_min = 37 * MHZ, fout_max = 4500 * MHZ;
1137+	u32 p, m, s;
1138+	u64 fvco, fref, fout, ffrac;
1139+
1140+	if (fin_hz == 0 || fout_hz == 0 || fout_hz == fin_hz)
1141+		return NULL;
1142+
1143+	if (fout_hz > fout_max || fout_hz < fout_min)
1144+		return NULL;
1145+
1146+	if (fin_hz / MHZ * MHZ == fin_hz && fout_hz / MHZ * MHZ == fout_hz) {
1147+		for (s = 0; s <= 6; s++) {
1148+			fvco = fout_hz << s;
1149+			if (fvco < fvco_min || fvco > fvco_max)
1150+				continue;
1151+			for (p = 2; p <= 4; p++) {
1152+				for (m = 64; m <= 1023; m++) {
1153+					if (fvco == m * fin_hz / p) {
1154+						rate_table->p = p;
1155+						rate_table->m = m;
1156+						rate_table->s = s;
1157+						rate_table->k = 0;
1158+						return rate_table;
1159+					}
1160+				}
1161+			}
1162+		}
1163+		pr_err("CANNOT FIND Fout by auto,fout = %lu\n", fout_hz);
1164+	} else {
1165+		fout = (fout_hz / MHZ) * MHZ;
1166+		ffrac = (fout_hz % MHZ);
1167+		for (s = 0; s <= 6; s++) {
1168+			fvco = fout << s;
1169+			if (fvco < fvco_min || fvco > fvco_max)
1170+				continue;
1171+			for (p = 1; p <= 4; p++) {
1172+				for (m = 64; m <= 1023; m++) {
1173+					if (fvco == m * fin_hz / p) {
1174+						rate_table->p = p;
1175+						rate_table->m = m;
1176+						rate_table->s = s;
1177+						fref = fin_hz / p;
1178+						fout = (ffrac << s) * 65535;
1179+						rate_table->k = fout / fref;
1180+						return rate_table;
1181+					}
1182+				}
1183+			}
1184+		}
1185+		pr_err("CANNOT FIND Fout by auto,fout = %lu\n", fout_hz);
1186+	}
1187+	return NULL;
1188+}
1189+
1190 static const struct rockchip_pll_rate_table *rockchip_get_pll_settings(
1191 			    struct rockchip_clk_pll *pll, unsigned long rate)
1192 {
1193@@ -54,28 +394,29 @@ static const struct rockchip_pll_rate_table *rockchip_get_pll_settings(
1194 	int i;
1195 
1196 	for (i = 0; i < pll->rate_count; i++) {
1197-		if (rate == rate_table[i].rate)
1198+		if (rate == rate_table[i].rate) {
1199+			if (i < pll->sel) {
1200+				pll->scaling = rate;
1201+				return &rate_table[pll->sel];
1202+			}
1203+			pll->scaling = 0;
1204 			return &rate_table[i];
1205+		}
1206 	}
1207+	pll->scaling = 0;
1208 
1209-	return NULL;
1210+	if (pll->type == pll_rk3066)
1211+		return rockchip_rk3066_pll_clk_set_by_auto(pll, 24 * MHZ, rate);
1212+	else if (pll->type == pll_rk3588 || pll->type == pll_rk3588_core)
1213+		return rockchip_rk3588_pll_clk_set_by_auto(pll, 24 * MHZ, rate);
1214+	else
1215+		return rockchip_pll_clk_set_by_auto(pll, 24 * MHZ, rate);
1216 }
1217 
1218 static long rockchip_pll_round_rate(struct clk_hw *hw,
1219 			    unsigned long drate, unsigned long *prate)
1220 {
1221-	struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
1222-	const struct rockchip_pll_rate_table *rate_table = pll->rate_table;
1223-	int i;
1224-
1225-	/* Assumming rate_table is in descending order */
1226-	for (i = 0; i < pll->rate_count; i++) {
1227-		if (drate >= rate_table[i].rate)
1228-			return rate_table[i].rate;
1229-	}
1230-
1231-	/* return minimum supported value */
1232-	return rate_table[i - 1].rate;
1233+	return drate;
1234 }
1235 
1236 /*
1237@@ -136,6 +477,30 @@ static int rockchip_rk3036_pll_wait_lock(struct rockchip_clk_pll *pll)
1238 	return ret;
1239 }
1240 
1241+static unsigned long
1242+rockchip_rk3036_pll_con_to_rate(struct rockchip_clk_pll *pll,
1243+				u32 con0, u32 con1)
1244+{
1245+	unsigned int fbdiv, postdiv1, refdiv, postdiv2;
1246+	u64 rate64 = 24000000;
1247+
1248+	fbdiv = ((con0 >> RK3036_PLLCON0_FBDIV_SHIFT) &
1249+		  RK3036_PLLCON0_FBDIV_MASK);
1250+	postdiv1 = ((con0 >> RK3036_PLLCON0_POSTDIV1_SHIFT) &
1251+		     RK3036_PLLCON0_POSTDIV1_MASK);
1252+	refdiv = ((con1 >> RK3036_PLLCON1_REFDIV_SHIFT) &
1253+		   RK3036_PLLCON1_REFDIV_MASK);
1254+	postdiv2 = ((con1 >> RK3036_PLLCON1_POSTDIV2_SHIFT) &
1255+		     RK3036_PLLCON1_POSTDIV2_MASK);
1256+
1257+	rate64 *= fbdiv;
1258+	do_div(rate64, refdiv);
1259+	do_div(rate64, postdiv1);
1260+	do_div(rate64, postdiv2);
1261+
1262+	return (unsigned long)rate64;
1263+}
1264+
1265 static void rockchip_rk3036_pll_get_params(struct rockchip_clk_pll *pll,
1266 					struct rockchip_pll_rate_table *rate)
1267 {
1268@@ -165,7 +530,10 @@ static unsigned long rockchip_rk3036_pll_recalc_rate(struct clk_hw *hw,
1269 {
1270 	struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
1271 	struct rockchip_pll_rate_table cur;
1272-	u64 rate64 = prate;
1273+	u64 rate64 = prate, frac_rate64 = prate;
1274+
1275+	if (pll->sel && pll->scaling)
1276+		return pll->scaling;
1277 
1278 	rockchip_rk3036_pll_get_params(pll, &cur);
1279 
1280@@ -174,7 +542,7 @@ static unsigned long rockchip_rk3036_pll_recalc_rate(struct clk_hw *hw,
1281 
1282 	if (cur.dsmpd == 0) {
1283 		/* fractional mode */
1284-		u64 frac_rate64 = prate * cur.frac;
1285+		frac_rate64 *= cur.frac;
1286 
1287 		do_div(frac_rate64, cur.refdiv);
1288 		rate64 += frac_rate64 >> 24;
1289@@ -231,6 +599,8 @@ static int rockchip_rk3036_pll_set_params(struct rockchip_clk_pll *pll,
1290 	pllcon |= rate->frac << RK3036_PLLCON2_FRAC_SHIFT;
1291 	writel_relaxed(pllcon, pll->reg_base + RK3036_PLLCON(2));
1292 
1293+	rockchip_boost_disable_low(pll);
1294+
1295 	/* wait for the pll to lock */
1296 	ret = rockchip_rk3036_pll_wait_lock(pll);
1297 	if (ret) {
1298@@ -412,6 +782,9 @@ static unsigned long rockchip_rk3066_pll_recalc_rate(struct clk_hw *hw,
1299 		return prate;
1300 	}
1301 
1302+	if (pll->sel && pll->scaling)
1303+		return pll->scaling;
1304+
1305 	rockchip_rk3066_pll_get_params(pll, &cur);
1306 
1307 	rate64 *= cur.nf;
1308@@ -485,9 +858,18 @@ static int rockchip_rk3066_pll_set_rate(struct clk_hw *hw, unsigned long drate,
1309 {
1310 	struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
1311 	const struct rockchip_pll_rate_table *rate;
1312+	unsigned long old_rate = rockchip_rk3066_pll_recalc_rate(hw, prate);
1313+	struct regmap *grf = pll->ctx->grf;
1314+	int ret;
1315 
1316-	pr_debug("%s: changing %s to %lu with a parent rate of %lu\n",
1317-		 __func__, clk_hw_get_name(hw), drate, prate);
1318+	if (IS_ERR(grf)) {
1319+		pr_debug("%s: grf regmap not available, aborting rate change\n",
1320+			 __func__);
1321+		return PTR_ERR(grf);
1322+	}
1323+
1324+	pr_debug("%s: changing %s from %lu to %lu with a parent rate of %lu\n",
1325+		 __func__, clk_hw_get_name(hw), old_rate, drate, prate);
1326 
1327 	/* Get required rate settings from table */
1328 	rate = rockchip_get_pll_settings(pll, drate);
1329@@ -497,7 +879,11 @@ static int rockchip_rk3066_pll_set_rate(struct clk_hw *hw, unsigned long drate,
1330 		return -EINVAL;
1331 	}
1332 
1333-	return rockchip_rk3066_pll_set_params(pll, rate);
1334+	ret = rockchip_rk3066_pll_set_params(pll, rate);
1335+	if (ret)
1336+		pll->scaling = 0;
1337+
1338+	return ret;
1339 }
1340 
1341 static int rockchip_rk3066_pll_enable(struct clk_hw *hw)
1342@@ -649,6 +1035,9 @@ static unsigned long rockchip_rk3399_pll_recalc_rate(struct clk_hw *hw,
1343 	struct rockchip_pll_rate_table cur;
1344 	u64 rate64 = prate;
1345 
1346+	if (pll->sel && pll->scaling)
1347+		return pll->scaling;
1348+
1349 	rockchip_rk3399_pll_get_params(pll, &cur);
1350 
1351 	rate64 *= cur.fbdiv;
1352@@ -692,6 +1081,11 @@ static int rockchip_rk3399_pll_set_params(struct rockchip_clk_pll *pll,
1353 		rate_change_remuxed = 1;
1354 	}
1355 
1356+	/* set pll power down */
1357+	writel(HIWORD_UPDATE(RK3399_PLLCON3_PWRDOWN,
1358+			     RK3399_PLLCON3_PWRDOWN, 0),
1359+	       pll->reg_base + RK3399_PLLCON(3));
1360+
1361 	/* update pll values */
1362 	writel_relaxed(HIWORD_UPDATE(rate->fbdiv, RK3399_PLLCON0_FBDIV_MASK,
1363 						  RK3399_PLLCON0_FBDIV_SHIFT),
1364@@ -715,6 +1109,11 @@ static int rockchip_rk3399_pll_set_params(struct rockchip_clk_pll *pll,
1365 					    RK3399_PLLCON3_DSMPD_SHIFT),
1366 		       pll->reg_base + RK3399_PLLCON(3));
1367 
1368+	/* set pll power up */
1369+	writel(HIWORD_UPDATE(0,
1370+			     RK3399_PLLCON3_PWRDOWN, 0),
1371+	       pll->reg_base + RK3399_PLLCON(3));
1372+
1373 	/* wait for the pll to lock */
1374 	ret = rockchip_rk3399_pll_wait_lock(pll);
1375 	if (ret) {
1376@@ -734,9 +1133,11 @@ static int rockchip_rk3399_pll_set_rate(struct clk_hw *hw, unsigned long drate,
1377 {
1378 	struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
1379 	const struct rockchip_pll_rate_table *rate;
1380+	unsigned long old_rate = rockchip_rk3399_pll_recalc_rate(hw, prate);
1381+	int ret;
1382 
1383-	pr_debug("%s: changing %s to %lu with a parent rate of %lu\n",
1384-		 __func__, __clk_get_name(hw->clk), drate, prate);
1385+	pr_debug("%s: changing %s from %lu to %lu with a parent rate of %lu\n",
1386+		 __func__, __clk_get_name(hw->clk), old_rate, drate, prate);
1387 
1388 	/* Get required rate settings from table */
1389 	rate = rockchip_get_pll_settings(pll, drate);
1390@@ -746,7 +1147,11 @@ static int rockchip_rk3399_pll_set_rate(struct clk_hw *hw, unsigned long drate,
1391 		return -EINVAL;
1392 	}
1393 
1394-	return rockchip_rk3399_pll_set_params(pll, rate);
1395+	ret = rockchip_rk3399_pll_set_params(pll, rate);
1396+	if (ret)
1397+		pll->scaling = 0;
1398+
1399+	return ret;
1400 }
1401 
1402 static int rockchip_rk3399_pll_enable(struct clk_hw *hw)
1403@@ -842,6 +1247,307 @@ static const struct clk_ops rockchip_rk3399_pll_clk_ops = {
1404 	.init = rockchip_rk3399_pll_init,
1405 };
1406 
1407+/**
1408+ * PLL used in RK3588
1409+ */
1410+
1411+#define RK3588_PLLCON(i)		(i * 0x4)
1412+#define RK3588_PLLCON0_M_MASK		0x3ff
1413+#define RK3588_PLLCON0_M_SHIFT		0
1414+#define RK3588_PLLCON1_P_MASK		0x3f
1415+#define RK3588_PLLCON1_P_SHIFT		0
1416+#define RK3588_PLLCON1_S_MASK		0x7
1417+#define RK3588_PLLCON1_S_SHIFT		6
1418+#define RK3588_PLLCON2_K_MASK		0xffff
1419+#define RK3588_PLLCON2_K_SHIFT		0
1420+#define RK3588_PLLCON1_PWRDOWN		BIT(13)
1421+#define RK3588_PLLCON6_LOCK_STATUS	BIT(15)
1422+
1423+static int rockchip_rk3588_pll_wait_lock(struct rockchip_clk_pll *pll)
1424+{
1425+	u32 pllcon;
1426+	int ret;
1427+
1428+	/*
1429+	 * Lock time typical 250, max 500 input clock cycles @24MHz
1430+	 * So define a very safe maximum of 1000us, meaning 24000 cycles.
1431+	 */
1432+	ret = readl_relaxed_poll_timeout(pll->reg_base + RK3588_PLLCON(6),
1433+					 pllcon,
1434+					 pllcon & RK3588_PLLCON6_LOCK_STATUS,
1435+					 0, 1000);
1436+	if (ret)
1437+		pr_err("%s: timeout waiting for pll to lock\n", __func__);
1438+
1439+	return ret;
1440+}
1441+
1442+static void rockchip_rk3588_pll_get_params(struct rockchip_clk_pll *pll,
1443+					struct rockchip_pll_rate_table *rate)
1444+{
1445+	u32 pllcon;
1446+
1447+	pllcon = readl_relaxed(pll->reg_base + RK3588_PLLCON(0));
1448+	rate->m = ((pllcon >> RK3588_PLLCON0_M_SHIFT)
1449+				& RK3588_PLLCON0_M_MASK);
1450+
1451+	pllcon = readl_relaxed(pll->reg_base + RK3588_PLLCON(1));
1452+	rate->p = ((pllcon >> RK3588_PLLCON1_P_SHIFT)
1453+				& RK3588_PLLCON1_P_MASK);
1454+	rate->s = ((pllcon >> RK3588_PLLCON1_S_SHIFT)
1455+				& RK3588_PLLCON1_S_MASK);
1456+
1457+	pllcon = readl_relaxed(pll->reg_base + RK3588_PLLCON(2));
1458+	rate->k = ((pllcon >> RK3588_PLLCON2_K_SHIFT)
1459+				& RK3588_PLLCON2_K_MASK);
1460+}
1461+
1462+static unsigned long rockchip_rk3588_pll_recalc_rate(struct clk_hw *hw,
1463+						     unsigned long prate)
1464+{
1465+	struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
1466+	struct rockchip_pll_rate_table cur;
1467+	u64 rate64 = prate, postdiv;
1468+
1469+	if (pll->sel && pll->scaling)
1470+		return pll->scaling;
1471+
1472+	rockchip_rk3588_pll_get_params(pll, &cur);
1473+
1474+	rate64 *= cur.m;
1475+	do_div(rate64, cur.p);
1476+
1477+	if (cur.k) {
1478+		/* fractional mode */
1479+		u64 frac_rate64 = prate * cur.k;
1480+
1481+		postdiv = cur.p * 65535;
1482+		do_div(frac_rate64, postdiv);
1483+		rate64 += frac_rate64;
1484+	}
1485+	rate64 = rate64 >> cur.s;
1486+
1487+	return (unsigned long)rate64;
1488+}
1489+
1490+static int rockchip_rk3588_pll_set_params(struct rockchip_clk_pll *pll,
1491+				const struct rockchip_pll_rate_table *rate)
1492+{
1493+	const struct clk_ops *pll_mux_ops = pll->pll_mux_ops;
1494+	struct clk_mux *pll_mux = &pll->pll_mux;
1495+	struct rockchip_pll_rate_table cur;
1496+	int rate_change_remuxed = 0;
1497+	int cur_parent;
1498+	int ret;
1499+
1500+	pr_debug("%s: rate settings for %lu p: %d, m: %d, s: %d, k: %d\n",
1501+		__func__, rate->rate, rate->p, rate->m, rate->s, rate->k);
1502+
1503+	rockchip_rk3588_pll_get_params(pll, &cur);
1504+	cur.rate = 0;
1505+
1506+	if (pll->type == pll_rk3588) {
1507+		cur_parent = pll_mux_ops->get_parent(&pll_mux->hw);
1508+		if (cur_parent == PLL_MODE_NORM) {
1509+			pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_SLOW);
1510+			rate_change_remuxed = 1;
1511+		}
1512+	}
1513+
1514+	/* set pll power down */
1515+	writel(HIWORD_UPDATE(RK3588_PLLCON1_PWRDOWN,
1516+			     RK3588_PLLCON1_PWRDOWN, 0),
1517+	       pll->reg_base + RK3399_PLLCON(1));
1518+
1519+	/* update pll values */
1520+	writel_relaxed(HIWORD_UPDATE(rate->m, RK3588_PLLCON0_M_MASK,
1521+						  RK3588_PLLCON0_M_SHIFT),
1522+		       pll->reg_base + RK3399_PLLCON(0));
1523+
1524+	writel_relaxed(HIWORD_UPDATE(rate->p, RK3588_PLLCON1_P_MASK,
1525+						   RK3588_PLLCON1_P_SHIFT) |
1526+		       HIWORD_UPDATE(rate->s, RK3588_PLLCON1_S_MASK,
1527+						     RK3588_PLLCON1_S_SHIFT),
1528+		       pll->reg_base + RK3399_PLLCON(1));
1529+
1530+	writel_relaxed(HIWORD_UPDATE(rate->k, RK3588_PLLCON2_K_MASK,
1531+				     RK3588_PLLCON2_K_SHIFT),
1532+		       pll->reg_base + RK3399_PLLCON(2));
1533+
1534+	/* set pll power up */
1535+	writel(HIWORD_UPDATE(0,
1536+			     RK3588_PLLCON1_PWRDOWN, 0),
1537+	       pll->reg_base + RK3588_PLLCON(1));
1538+
1539+	/* wait for the pll to lock */
1540+	ret = rockchip_rk3588_pll_wait_lock(pll);
1541+	if (ret) {
1542+		pr_warn("%s: pll update unsuccessful, trying to restore old params\n",
1543+			__func__);
1544+		rockchip_rk3588_pll_set_params(pll, &cur);
1545+	}
1546+
1547+	if ((pll->type == pll_rk3588) && rate_change_remuxed)
1548+		pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_NORM);
1549+
1550+	return ret;
1551+}
1552+
1553+static int rockchip_rk3588_pll_set_rate(struct clk_hw *hw, unsigned long drate,
1554+					unsigned long prate)
1555+{
1556+	struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
1557+	const struct rockchip_pll_rate_table *rate;
1558+	unsigned long old_rate = rockchip_rk3588_pll_recalc_rate(hw, prate);
1559+	int ret;
1560+
1561+	pr_debug("%s: changing %s from %lu to %lu with a parent rate of %lu\n",
1562+		 __func__, __clk_get_name(hw->clk), old_rate, drate, prate);
1563+
1564+	/* Get required rate settings from table */
1565+	rate = rockchip_get_pll_settings(pll, drate);
1566+	if (!rate) {
1567+		pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
1568+			drate, __clk_get_name(hw->clk));
1569+		return -EINVAL;
1570+	}
1571+
1572+	ret = rockchip_rk3588_pll_set_params(pll, rate);
1573+	if (ret)
1574+		pll->scaling = 0;
1575+
1576+	return ret;
1577+}
1578+
1579+static int rockchip_rk3588_pll_enable(struct clk_hw *hw)
1580+{
1581+	struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
1582+
1583+	writel(HIWORD_UPDATE(0, RK3588_PLLCON1_PWRDOWN, 0),
1584+	       pll->reg_base + RK3588_PLLCON(1));
1585+	rockchip_rk3588_pll_wait_lock(pll);
1586+
1587+	return 0;
1588+}
1589+
1590+static void rockchip_rk3588_pll_disable(struct clk_hw *hw)
1591+{
1592+	struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
1593+
1594+	writel(HIWORD_UPDATE(RK3588_PLLCON1_PWRDOWN,
1595+			     RK3588_PLLCON1_PWRDOWN, 0),
1596+	       pll->reg_base + RK3588_PLLCON(1));
1597+}
1598+
1599+static int rockchip_rk3588_pll_is_enabled(struct clk_hw *hw)
1600+{
1601+	struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
1602+	u32 pllcon = readl(pll->reg_base + RK3588_PLLCON(1));
1603+
1604+	return !(pllcon & RK3588_PLLCON1_PWRDOWN);
1605+}
1606+
1607+static int rockchip_rk3588_pll_init(struct clk_hw *hw)
1608+{
1609+	struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
1610+
1611+	if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
1612+		return 0;
1613+
1614+	return 0;
1615+}
1616+
1617+static const struct clk_ops rockchip_rk3588_pll_clk_norate_ops = {
1618+	.recalc_rate = rockchip_rk3588_pll_recalc_rate,
1619+	.enable = rockchip_rk3588_pll_enable,
1620+	.disable = rockchip_rk3588_pll_disable,
1621+	.is_enabled = rockchip_rk3588_pll_is_enabled,
1622+};
1623+
1624+static const struct clk_ops rockchip_rk3588_pll_clk_ops = {
1625+	.recalc_rate = rockchip_rk3588_pll_recalc_rate,
1626+	.round_rate = rockchip_pll_round_rate,
1627+	.set_rate = rockchip_rk3588_pll_set_rate,
1628+	.enable = rockchip_rk3588_pll_enable,
1629+	.disable = rockchip_rk3588_pll_disable,
1630+	.is_enabled = rockchip_rk3588_pll_is_enabled,
1631+	.init = rockchip_rk3588_pll_init,
1632+};
1633+
1634+#ifdef CONFIG_ROCKCHIP_CLK_COMPENSATION
1635+int rockchip_pll_clk_compensation(struct clk *clk, int ppm)
1636+{
1637+	struct clk *parent = clk_get_parent(clk);
1638+	struct rockchip_clk_pll *pll;
1639+	static u32 frac, fbdiv;
1640+	bool negative;
1641+	u32 pllcon, pllcon0, pllcon2, fbdiv_mask, frac_mask, frac_shift;
1642+	u64 fracdiv, m, n;
1643+
1644+	if ((ppm > 1000) || (ppm < -1000))
1645+		return -EINVAL;
1646+
1647+	if (IS_ERR_OR_NULL(parent))
1648+		return -EINVAL;
1649+
1650+	pll = to_rockchip_clk_pll(__clk_get_hw(parent));
1651+	if (!pll)
1652+		return -EINVAL;
1653+
1654+	switch (pll->type) {
1655+	case pll_rk3036:
1656+	case pll_rk3328:
1657+		pllcon0 = RK3036_PLLCON(0);
1658+		pllcon2 = RK3036_PLLCON(2);
1659+		fbdiv_mask = RK3036_PLLCON0_FBDIV_MASK;
1660+		frac_mask = RK3036_PLLCON2_FRAC_MASK;
1661+		frac_shift = RK3036_PLLCON2_FRAC_SHIFT;
1662+		break;
1663+	case pll_rk3066:
1664+		return -EINVAL;
1665+	case pll_rk3399:
1666+		pllcon0 = RK3399_PLLCON(0);
1667+		pllcon2 = RK3399_PLLCON(2);
1668+		fbdiv_mask = RK3399_PLLCON0_FBDIV_MASK;
1669+		frac_mask = RK3399_PLLCON2_FRAC_MASK;
1670+		frac_shift = RK3399_PLLCON2_FRAC_SHIFT;
1671+		break;
1672+	default:
1673+		return -EINVAL;
1674+	}
1675+
1676+	negative = !!(ppm & BIT(31));
1677+	ppm = negative ? ~ppm + 1 : ppm;
1678+
1679+	if (!frac) {
1680+		frac = readl_relaxed(pll->reg_base + pllcon2) & frac_mask;
1681+		fbdiv = readl_relaxed(pll->reg_base + pllcon0) & fbdiv_mask;
1682+	}
1683+
1684+	/*
1685+	 *   delta frac                 frac          ppm
1686+	 * -------------- = (fbdiv + ----------) * ---------
1687+	 *    1 << 24                 1 << 24       1000000
1688+	 *
1689+	 */
1690+	m = div64_u64((uint64_t)frac * ppm, 1000000);
1691+	n = div64_u64((uint64_t)ppm << 24, 1000000) * fbdiv;
1692+
1693+	fracdiv = negative ? frac - (m + n) : frac + (m + n);
1694+
1695+	if (!frac || fracdiv > frac_mask)
1696+		return -EINVAL;
1697+
1698+	pllcon = readl_relaxed(pll->reg_base + pllcon2);
1699+	pllcon &= ~(frac_mask << frac_shift);
1700+	pllcon |= fracdiv << frac_shift;
1701+	writel_relaxed(pllcon, pll->reg_base + pllcon2);
1702+
1703+	return  0;
1704+}
1705+EXPORT_SYMBOL(rockchip_pll_clk_compensation);
1706+#endif
1707+
1708 /*
1709  * Common registering of pll clocks
1710  */
1711@@ -890,7 +1596,8 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx,
1712 	if (pll_type == pll_rk3036 ||
1713 	    pll_type == pll_rk3066 ||
1714 	    pll_type == pll_rk3328 ||
1715-	    pll_type == pll_rk3399)
1716+	    pll_type == pll_rk3399 ||
1717+	    pll_type == pll_rk3588)
1718 		pll_mux->flags |= CLK_MUX_HIWORD_MASK;
1719 
1720 	/* the actual muxing is xin24m, pll-output, xin32k */
1721@@ -914,8 +1621,12 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx,
1722 	/* now create the actual pll */
1723 	init.name = pll_name;
1724 
1725+#ifndef CONFIG_ROCKCHIP_LOW_PERFORMANCE
1726 	/* keep all plls untouched for now */
1727 	init.flags = flags | CLK_IGNORE_UNUSED;
1728+#else
1729+	init.flags = flags;
1730+#endif
1731 
1732 	init.parent_names = &parent_names[0];
1733 	init.num_parents = 1;
1734@@ -940,7 +1651,7 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx,
1735 	switch (pll_type) {
1736 	case pll_rk3036:
1737 	case pll_rk3328:
1738-		if (!pll->rate_table)
1739+		if (!pll->rate_table || IS_ERR(ctx->grf))
1740 			init.ops = &rockchip_rk3036_pll_clk_norate_ops;
1741 		else
1742 			init.ops = &rockchip_rk3036_pll_clk_ops;
1743@@ -957,6 +1668,14 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx,
1744 		else
1745 			init.ops = &rockchip_rk3399_pll_clk_ops;
1746 		break;
1747+	case pll_rk3588:
1748+	case pll_rk3588_core:
1749+		if (!pll->rate_table)
1750+			init.ops = &rockchip_rk3588_pll_clk_norate_ops;
1751+		else
1752+			init.ops = &rockchip_rk3588_pll_clk_ops;
1753+		init.flags = flags;
1754+		break;
1755 	default:
1756 		pr_warn("%s: Unknown pll type for pll clk %s\n",
1757 			__func__, name);
1758@@ -988,3 +1707,316 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx,
1759 	kfree(pll);
1760 	return mux_clk;
1761 }
1762+
1763+static unsigned long rockchip_pll_con_to_rate(struct rockchip_clk_pll *pll,
1764+					      u32 con0, u32 con1)
1765+{
1766+	switch (pll->type) {
1767+	case pll_rk3036:
1768+	case pll_rk3328:
1769+		return rockchip_rk3036_pll_con_to_rate(pll, con0, con1);
1770+	case pll_rk3066:
1771+		break;
1772+	case pll_rk3399:
1773+		break;
1774+	default:
1775+		pr_warn("%s: Unknown pll type\n", __func__);
1776+	}
1777+
1778+	return 0;
1779+}
1780+
1781+void rockchip_boost_init(struct clk_hw *hw)
1782+{
1783+	struct rockchip_clk_pll *pll;
1784+	struct device_node *np;
1785+	u32 value, con0, con1;
1786+
1787+	if (!hw)
1788+		return;
1789+	pll = to_rockchip_clk_pll(hw);
1790+	np = of_parse_phandle(pll->ctx->cru_node, "rockchip,boost", 0);
1791+	if (!np) {
1792+		pr_debug("%s: failed to get boost np\n", __func__);
1793+		return;
1794+	}
1795+	pll->boost = syscon_node_to_regmap(np);
1796+	if (IS_ERR(pll->boost)) {
1797+		pr_debug("%s: failed to get boost regmap\n", __func__);
1798+		return;
1799+	}
1800+
1801+	if (!of_property_read_u32(np, "rockchip,boost-low-con0", &con0) &&
1802+	    !of_property_read_u32(np, "rockchip,boost-low-con1", &con1)) {
1803+		pr_debug("boost-low-con=0x%x 0x%x\n", con0, con1);
1804+		regmap_write(pll->boost, BOOST_PLL_L_CON(0),
1805+			     HIWORD_UPDATE(con0, BOOST_PLL_CON_MASK, 0));
1806+		regmap_write(pll->boost, BOOST_PLL_L_CON(1),
1807+			     HIWORD_UPDATE(con1, BOOST_PLL_CON_MASK, 0));
1808+		pll->boost_low_rate = rockchip_pll_con_to_rate(pll, con0,
1809+							       con1);
1810+		pr_debug("boost-low-rate=%lu\n", pll->boost_low_rate);
1811+	}
1812+	if (!of_property_read_u32(np, "rockchip,boost-high-con0", &con0) &&
1813+	    !of_property_read_u32(np, "rockchip,boost-high-con1", &con1)) {
1814+		pr_debug("boost-high-con=0x%x 0x%x\n", con0, con1);
1815+		regmap_write(pll->boost, BOOST_PLL_H_CON(0),
1816+			     HIWORD_UPDATE(con0, BOOST_PLL_CON_MASK, 0));
1817+		regmap_write(pll->boost, BOOST_PLL_H_CON(1),
1818+			     HIWORD_UPDATE(con1, BOOST_PLL_CON_MASK, 0));
1819+		pll->boost_high_rate = rockchip_pll_con_to_rate(pll, con0,
1820+								con1);
1821+		pr_debug("boost-high-rate=%lu\n", pll->boost_high_rate);
1822+	}
1823+	if (!of_property_read_u32(np, "rockchip,boost-backup-pll", &value)) {
1824+		pr_debug("boost-backup-pll=0x%x\n", value);
1825+		regmap_write(pll->boost, BOOST_CLK_CON,
1826+			     HIWORD_UPDATE(value, BOOST_BACKUP_PLL_MASK,
1827+					   BOOST_BACKUP_PLL_SHIFT));
1828+	}
1829+	if (!of_property_read_u32(np, "rockchip,boost-backup-pll-usage",
1830+				  &pll->boost_backup_pll_usage)) {
1831+		pr_debug("boost-backup-pll-usage=0x%x\n",
1832+			 pll->boost_backup_pll_usage);
1833+		regmap_write(pll->boost, BOOST_CLK_CON,
1834+			     HIWORD_UPDATE(pll->boost_backup_pll_usage,
1835+					   BOOST_BACKUP_PLL_USAGE_MASK,
1836+					   BOOST_BACKUP_PLL_USAGE_SHIFT));
1837+	}
1838+	if (!of_property_read_u32(np, "rockchip,boost-switch-threshold",
1839+				  &value)) {
1840+		pr_debug("boost-switch-threshold=0x%x\n", value);
1841+		regmap_write(pll->boost, BOOST_SWITCH_THRESHOLD, value);
1842+	}
1843+	if (!of_property_read_u32(np, "rockchip,boost-statis-threshold",
1844+				  &value)) {
1845+		pr_debug("boost-statis-threshold=0x%x\n", value);
1846+		regmap_write(pll->boost, BOOST_STATIS_THRESHOLD, value);
1847+	}
1848+	if (!of_property_read_u32(np, "rockchip,boost-statis-enable",
1849+				  &value)) {
1850+		pr_debug("boost-statis-enable=0x%x\n", value);
1851+		regmap_write(pll->boost, BOOST_BOOST_CON,
1852+			     HIWORD_UPDATE(value, BOOST_STATIS_ENABLE_MASK,
1853+					   BOOST_STATIS_ENABLE_SHIFT));
1854+	}
1855+	if (!of_property_read_u32(np, "rockchip,boost-enable", &value)) {
1856+		pr_debug("boost-enable=0x%x\n", value);
1857+		regmap_write(pll->boost, BOOST_BOOST_CON,
1858+			     HIWORD_UPDATE(value, BOOST_ENABLE_MASK,
1859+					   BOOST_ENABLE_SHIFT));
1860+		if (value)
1861+			pll->boost_enabled = true;
1862+	}
1863+#ifdef CONFIG_DEBUG_FS
1864+	if (pll->boost_enabled) {
1865+		mutex_lock(&clk_boost_lock);
1866+		hlist_add_head(&pll->debug_node, &clk_boost_list);
1867+		mutex_unlock(&clk_boost_lock);
1868+	}
1869+#endif
1870+}
1871+
1872+void rockchip_boost_enable_recovery_sw_low(struct clk_hw *hw)
1873+{
1874+	struct rockchip_clk_pll *pll;
1875+	unsigned int val;
1876+
1877+	if (!hw)
1878+		return;
1879+	pll = to_rockchip_clk_pll(hw);
1880+	if (!pll->boost_enabled)
1881+		return;
1882+
1883+	regmap_write(pll->boost, BOOST_BOOST_CON,
1884+		     HIWORD_UPDATE(1, BOOST_RECOVERY_MASK,
1885+				   BOOST_RECOVERY_SHIFT));
1886+	do {
1887+		regmap_read(pll->boost, BOOST_FSM_STATUS, &val);
1888+	} while (!(val & BOOST_BUSY_STATE));
1889+
1890+	regmap_write(pll->boost, BOOST_BOOST_CON,
1891+		     HIWORD_UPDATE(1, BOOST_SW_CTRL_MASK,
1892+				   BOOST_SW_CTRL_SHIFT) |
1893+		     HIWORD_UPDATE(1, BOOST_LOW_FREQ_EN_MASK,
1894+				   BOOST_LOW_FREQ_EN_SHIFT));
1895+}
1896+
1897+static void rockchip_boost_disable_low(struct rockchip_clk_pll *pll)
1898+{
1899+	if (!pll->boost_enabled)
1900+		return;
1901+
1902+	regmap_write(pll->boost, BOOST_BOOST_CON,
1903+		     HIWORD_UPDATE(0, BOOST_LOW_FREQ_EN_MASK,
1904+				   BOOST_LOW_FREQ_EN_SHIFT));
1905+}
1906+
1907+void rockchip_boost_disable_recovery_sw(struct clk_hw *hw)
1908+{
1909+	struct rockchip_clk_pll *pll;
1910+
1911+	if (!hw)
1912+		return;
1913+	pll = to_rockchip_clk_pll(hw);
1914+	if (!pll->boost_enabled)
1915+		return;
1916+
1917+	regmap_write(pll->boost, BOOST_BOOST_CON,
1918+		     HIWORD_UPDATE(0, BOOST_RECOVERY_MASK,
1919+				   BOOST_RECOVERY_SHIFT));
1920+	regmap_write(pll->boost, BOOST_BOOST_CON,
1921+		     HIWORD_UPDATE(0, BOOST_SW_CTRL_MASK,
1922+				   BOOST_SW_CTRL_SHIFT));
1923+}
1924+
1925+void rockchip_boost_add_core_div(struct clk_hw *hw, unsigned long prate)
1926+{
1927+	struct rockchip_clk_pll *pll;
1928+	unsigned int div;
1929+
1930+	if (!hw)
1931+		return;
1932+	pll = to_rockchip_clk_pll(hw);
1933+	if (!pll->boost_enabled || pll->boost_backup_pll_rate == prate)
1934+		return;
1935+
1936+	/* todo */
1937+	if (pll->boost_backup_pll_usage == BOOST_BACKUP_PLL_USAGE_TARGET)
1938+		return;
1939+	/*
1940+	 * cpu clock rate should be less than or equal to
1941+	 * low rate when change pll rate in boost module
1942+	 */
1943+	if (pll->boost_low_rate && prate > pll->boost_low_rate) {
1944+		div =  DIV_ROUND_UP(prate, pll->boost_low_rate) - 1;
1945+		regmap_write(pll->boost, BOOST_CLK_CON,
1946+			     HIWORD_UPDATE(div, BOOST_CORE_DIV_MASK,
1947+					   BOOST_CORE_DIV_SHIFT));
1948+		pll->boost_backup_pll_rate = prate;
1949+	}
1950+}
1951+
1952+#ifdef CONFIG_DEBUG_FS
1953+#include <linux/debugfs.h>
1954+
1955+#ifndef MODULE
1956+static int boost_summary_show(struct seq_file *s, void *data)
1957+{
1958+	struct rockchip_clk_pll *pll = (struct rockchip_clk_pll *)s->private;
1959+	u32 boost_count = 0;
1960+	u32 freq_cnt0 = 0, freq_cnt1 = 0;
1961+	u64 freq_cnt = 0, high_freq_time = 0;
1962+	u32 short_count = 0, short_threshold = 0;
1963+	u32 interval_time = 0;
1964+
1965+	seq_puts(s, " device    boost_count   high_freq_count  high_freq_time  short_count  short_threshold  interval_count\n");
1966+	seq_puts(s, "------------------------------------------------------------------------------------------------------\n");
1967+	seq_printf(s, " %s\n", clk_hw_get_name(&pll->hw));
1968+
1969+	regmap_read(pll->boost, BOOST_SWITCH_CNT, &boost_count);
1970+
1971+	regmap_read(pll->boost, BOOST_HIGH_PERF_CNT0, &freq_cnt0);
1972+	regmap_read(pll->boost, BOOST_HIGH_PERF_CNT1, &freq_cnt1);
1973+	freq_cnt = ((u64)freq_cnt1 << 32) + (u64)freq_cnt0;
1974+	high_freq_time = freq_cnt;
1975+	do_div(high_freq_time, 24);
1976+
1977+	regmap_read(pll->boost, BOOST_SHORT_SWITCH_CNT, &short_count);
1978+	regmap_read(pll->boost, BOOST_STATIS_THRESHOLD, &short_threshold);
1979+	regmap_read(pll->boost, BOOST_SWITCH_THRESHOLD, &interval_time);
1980+
1981+	seq_printf(s, "%22u %17llu %15llu %12u %16u %15u\n",
1982+		   boost_count, freq_cnt, high_freq_time, short_count,
1983+		   short_threshold, interval_time);
1984+
1985+	return 0;
1986+}
1987+
1988+static int boost_summary_open(struct inode *inode, struct file *file)
1989+{
1990+	return single_open(file, boost_summary_show, inode->i_private);
1991+}
1992+
1993+static const struct file_operations boost_summary_fops = {
1994+	.open		= boost_summary_open,
1995+	.read		= seq_read,
1996+	.llseek		= seq_lseek,
1997+	.release	= single_release,
1998+};
1999+
2000+static int boost_config_show(struct seq_file *s, void *data)
2001+{
2002+	struct rockchip_clk_pll *pll = (struct rockchip_clk_pll *)s->private;
2003+
2004+	seq_printf(s, "boost_enabled:   %d\n", pll->boost_enabled);
2005+	seq_printf(s, "boost_low_rate:  %lu\n", pll->boost_low_rate);
2006+	seq_printf(s, "boost_high_rate: %lu\n", pll->boost_high_rate);
2007+
2008+	return 0;
2009+}
2010+
2011+static int boost_config_open(struct inode *inode, struct file *file)
2012+{
2013+	return single_open(file, boost_config_show, inode->i_private);
2014+}
2015+
2016+static const struct file_operations boost_config_fops = {
2017+	.open		= boost_config_open,
2018+	.read		= seq_read,
2019+	.llseek		= seq_lseek,
2020+	.release	= single_release,
2021+};
2022+
2023+static int boost_debug_create_one(struct rockchip_clk_pll *pll,
2024+				  struct dentry *rootdir)
2025+{
2026+	struct dentry *pdentry, *d;
2027+
2028+	pdentry = debugfs_lookup(clk_hw_get_name(&pll->hw), rootdir);
2029+	if (!pdentry) {
2030+		pr_err("%s: failed to lookup %s dentry\n", __func__,
2031+		       clk_hw_get_name(&pll->hw));
2032+		return -ENOMEM;
2033+	}
2034+
2035+	d = debugfs_create_file("boost_summary", 0444, pdentry,
2036+				pll, &boost_summary_fops);
2037+	if (!d) {
2038+		pr_err("%s: failed to create boost_summary file\n", __func__);
2039+		return -ENOMEM;
2040+	}
2041+
2042+	d = debugfs_create_file("boost_config", 0444, pdentry,
2043+				pll, &boost_config_fops);
2044+	if (!d) {
2045+		pr_err("%s: failed to create boost config file\n", __func__);
2046+		return -ENOMEM;
2047+	}
2048+
2049+	return 0;
2050+}
2051+
2052+static int __init boost_debug_init(void)
2053+{
2054+	struct rockchip_clk_pll *pll;
2055+	struct dentry *rootdir;
2056+
2057+	rootdir = debugfs_lookup("clk", NULL);
2058+	if (!rootdir) {
2059+		pr_err("%s: failed to lookup clk dentry\n", __func__);
2060+		return -ENOMEM;
2061+	}
2062+
2063+	mutex_lock(&clk_boost_lock);
2064+
2065+	hlist_for_each_entry(pll, &clk_boost_list, debug_node)
2066+		boost_debug_create_one(pll, rootdir);
2067+
2068+	mutex_unlock(&clk_boost_lock);
2069+
2070+	return 0;
2071+}
2072+late_initcall(boost_debug_init);
2073+#endif /* MODULE */
2074+#endif /* CONFIG_DEBUG_FS */
2075diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
2076index b443169dd..6c8e47067 100644
2077--- a/drivers/clk/rockchip/clk.c
2078+++ b/drivers/clk/rockchip/clk.c
2079@@ -38,6 +38,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
2080 		const char *const *parent_names, u8 num_parents,
2081 		void __iomem *base,
2082 		int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags,
2083+		u32 *mux_table,
2084 		int div_offset, u8 div_shift, u8 div_width, u8 div_flags,
2085 		struct clk_div_table *div_table, int gate_offset,
2086 		u8 gate_shift, u8 gate_flags, unsigned long flags,
2087@@ -60,6 +61,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
2088 		mux->shift = mux_shift;
2089 		mux->mask = BIT(mux_width) - 1;
2090 		mux->flags = mux_flags;
2091+		mux->table = mux_table;
2092 		mux->lock = lock;
2093 		mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
2094 							: &clk_mux_ops;
2095@@ -182,12 +184,43 @@ static void rockchip_fractional_approximation(struct clk_hw *hw,
2096 	unsigned long p_rate, p_parent_rate;
2097 	struct clk_hw *p_parent;
2098 	unsigned long scale;
2099+	u32 div;
2100 
2101 	p_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
2102-	if ((rate * 20 > p_rate) && (p_rate % rate != 0)) {
2103+	if (((rate * 20 > p_rate) && (p_rate % rate != 0)) ||
2104+	    (fd->max_prate && fd->max_prate < p_rate)) {
2105 		p_parent = clk_hw_get_parent(clk_hw_get_parent(hw));
2106-		p_parent_rate = clk_hw_get_rate(p_parent);
2107-		*parent_rate = p_parent_rate;
2108+		if (!p_parent) {
2109+			*parent_rate = p_rate;
2110+		} else {
2111+			p_parent_rate = clk_hw_get_rate(p_parent);
2112+			*parent_rate = p_parent_rate;
2113+			if (fd->max_prate && p_parent_rate > fd->max_prate) {
2114+				div = DIV_ROUND_UP(p_parent_rate,
2115+						   fd->max_prate);
2116+				*parent_rate = p_parent_rate / div;
2117+			}
2118+		}
2119+
2120+		if (*parent_rate < rate * 20) {
2121+			/*
2122+			 * Fractional frequency divider to do
2123+			 * integer frequency divider does not
2124+			 * need 20 times the limit.
2125+			 */
2126+			if (!(*parent_rate % rate)) {
2127+				*m = 1;
2128+				*n = *parent_rate / rate;
2129+				return;
2130+			} else if (!(fd->flags & CLK_FRAC_DIVIDER_NO_LIMIT)) {
2131+				pr_warn("%s p_rate(%ld) is low than rate(%ld)*20, use integer or half-div\n",
2132+					clk_hw_get_name(hw),
2133+					*parent_rate, rate);
2134+				*m = 0;
2135+				*n = 1;
2136+				return;
2137+			}
2138+		}
2139 	}
2140 
2141 	/*
2142@@ -210,7 +243,7 @@ static struct clk *rockchip_clk_register_frac_branch(
2143 		void __iomem *base, int muxdiv_offset, u8 div_flags,
2144 		int gate_offset, u8 gate_shift, u8 gate_flags,
2145 		unsigned long flags, struct rockchip_clk_branch *child,
2146-		spinlock_t *lock)
2147+		unsigned long max_prate, spinlock_t *lock)
2148 {
2149 	struct clk_hw *hw;
2150 	struct rockchip_clk_frac *frac;
2151@@ -251,6 +284,7 @@ static struct clk *rockchip_clk_register_frac_branch(
2152 	div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift;
2153 	div->lock = lock;
2154 	div->approximation = rockchip_fractional_approximation;
2155+	div->max_prate = max_prate;
2156 	div_ops = &clk_fractional_divider_ops;
2157 
2158 	hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
2159@@ -278,6 +312,8 @@ static struct clk *rockchip_clk_register_frac_branch(
2160 		frac_mux->shift = child->mux_shift;
2161 		frac_mux->mask = BIT(child->mux_width) - 1;
2162 		frac_mux->flags = child->mux_flags;
2163+		if (child->mux_table)
2164+			frac_mux->table = child->mux_table;
2165 		frac_mux->lock = lock;
2166 		frac_mux->hw.init = &init;
2167 
2168@@ -360,6 +396,61 @@ static struct clk *rockchip_clk_register_factor_branch(const char *name,
2169 	return hw->clk;
2170 }
2171 
2172+static struct clk *rockchip_clk_register_composite_brother_branch(
2173+		struct rockchip_clk_provider *ctx, const char *name,
2174+		const char *const *parent_names, u8 num_parents,
2175+		void __iomem *base, int muxdiv_offset, u8 mux_shift,
2176+		u8 mux_width, u8 mux_flags, u32 *mux_table,
2177+		int div_offset, u8 div_shift, u8 div_width, u8 div_flags,
2178+		struct clk_div_table *div_table, int gate_offset,
2179+		u8 gate_shift, u8 gate_flags, unsigned long flags,
2180+		struct rockchip_clk_branch *brother, spinlock_t *lock)
2181+{
2182+	struct clk *clk, *brother_clk;
2183+	struct clk_composite *composite, *brother_composite;
2184+	struct clk_hw *hw, *brother_hw;
2185+
2186+	if (brother && brother->branch_type != branch_half_divider) {
2187+		pr_err("%s: composite brother for %s can only be a halfdiv\n",
2188+		       __func__, name);
2189+		return ERR_PTR(-EINVAL);
2190+	}
2191+
2192+	clk = rockchip_clk_register_branch(name, parent_names, num_parents,
2193+					   base, muxdiv_offset, mux_shift,
2194+					   mux_width, mux_flags, mux_table,
2195+					   div_offset, div_shift, div_width,
2196+					   div_flags, div_table,
2197+					   gate_offset, gate_shift, gate_flags,
2198+					   flags, lock);
2199+	if (IS_ERR(clk))
2200+		return clk;
2201+
2202+	brother_clk = rockchip_clk_register_halfdiv(brother->name,
2203+				brother->parent_names, brother->num_parents,
2204+				base, brother->muxdiv_offset,
2205+				brother->mux_shift, brother->mux_width,
2206+				brother->mux_flags, brother->div_offset,
2207+				brother->div_shift, brother->div_width,
2208+				brother->div_flags, brother->gate_offset,
2209+				brother->gate_shift, brother->gate_flags,
2210+				flags, lock);
2211+	if (IS_ERR(brother_clk))
2212+		return brother_clk;
2213+	rockchip_clk_add_lookup(ctx, brother_clk, brother->id);
2214+
2215+	hw = __clk_get_hw(clk);
2216+	brother_hw = __clk_get_hw(brother_clk);
2217+	if (hw && brother_hw) {
2218+		composite = to_clk_composite(hw);
2219+		brother_composite = to_clk_composite(brother_hw);
2220+		composite->brother_hw = brother_hw;
2221+		brother_composite->brother_hw = hw;
2222+	}
2223+
2224+	return clk;
2225+}
2226+
2227 struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
2228 						void __iomem *base,
2229 						unsigned long nr_clks)
2230@@ -387,6 +478,8 @@ struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
2231 
2232 	ctx->grf = syscon_regmap_lookup_by_phandle(ctx->cru_node,
2233 						   "rockchip,grf");
2234+	ctx->pmugrf = syscon_regmap_lookup_by_phandle(ctx->cru_node,
2235+						   "rockchip,pmugrf");
2236 
2237 	return ctx;
2238 
2239@@ -452,11 +545,22 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
2240 		/* catch simple muxes */
2241 		switch (list->branch_type) {
2242 		case branch_mux:
2243-			clk = clk_register_mux(NULL, list->name,
2244-				list->parent_names, list->num_parents,
2245-				flags, ctx->reg_base + list->muxdiv_offset,
2246-				list->mux_shift, list->mux_width,
2247-				list->mux_flags, &ctx->lock);
2248+			if (list->mux_table)
2249+				clk = clk_register_mux_table(NULL, list->name,
2250+					list->parent_names, list->num_parents,
2251+					flags,
2252+					ctx->reg_base + list->muxdiv_offset,
2253+					list->mux_shift,
2254+					BIT(list->mux_width) - 1,
2255+					list->mux_flags, list->mux_table,
2256+					&ctx->lock);
2257+			else
2258+				clk = clk_register_mux(NULL, list->name,
2259+					list->parent_names, list->num_parents,
2260+					flags,
2261+					ctx->reg_base + list->muxdiv_offset,
2262+					list->mux_shift, list->mux_width,
2263+					list->mux_flags, &ctx->lock);
2264 			break;
2265 		case branch_muxgrf:
2266 			clk = rockchip_clk_register_muxgrf(list->name,
2267@@ -465,6 +569,13 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
2268 				list->mux_shift, list->mux_width,
2269 				list->mux_flags);
2270 			break;
2271+		case branch_muxpmugrf:
2272+			clk = rockchip_clk_register_muxgrf(list->name,
2273+				list->parent_names, list->num_parents,
2274+				flags, ctx->pmugrf, list->muxdiv_offset,
2275+				list->mux_shift, list->mux_width,
2276+				list->mux_flags);
2277+			break;
2278 		case branch_divider:
2279 			if (list->div_table)
2280 				clk = clk_register_divider_table(NULL,
2281@@ -488,17 +599,18 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
2282 				list->div_flags,
2283 				list->gate_offset, list->gate_shift,
2284 				list->gate_flags, flags, list->child,
2285-				&ctx->lock);
2286+				list->max_prate, &ctx->lock);
2287 			break;
2288 		case branch_half_divider:
2289 			clk = rockchip_clk_register_halfdiv(list->name,
2290 				list->parent_names, list->num_parents,
2291 				ctx->reg_base, list->muxdiv_offset,
2292 				list->mux_shift, list->mux_width,
2293-				list->mux_flags, list->div_shift,
2294-				list->div_width, list->div_flags,
2295-				list->gate_offset, list->gate_shift,
2296-				list->gate_flags, flags, &ctx->lock);
2297+				list->mux_flags, list->div_offset,
2298+				list->div_shift, list->div_width,
2299+				list->div_flags, list->gate_offset,
2300+				list->gate_shift, list->gate_flags,
2301+				flags, &ctx->lock);
2302 			break;
2303 		case branch_gate:
2304 			flags |= CLK_SET_RATE_PARENT;
2305@@ -514,11 +626,25 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
2306 				ctx->reg_base, list->muxdiv_offset,
2307 				list->mux_shift,
2308 				list->mux_width, list->mux_flags,
2309-				list->div_offset, list->div_shift, list->div_width,
2310+				list->mux_table, list->div_offset,
2311+				list->div_shift, list->div_width,
2312 				list->div_flags, list->div_table,
2313 				list->gate_offset, list->gate_shift,
2314 				list->gate_flags, flags, &ctx->lock);
2315 			break;
2316+		case branch_composite_brother:
2317+			clk = rockchip_clk_register_composite_brother_branch(
2318+				ctx, list->name, list->parent_names,
2319+				list->num_parents, ctx->reg_base,
2320+				list->muxdiv_offset, list->mux_shift,
2321+				list->mux_width, list->mux_flags,
2322+				list->mux_table, list->div_offset,
2323+				list->div_shift, list->div_width,
2324+				list->div_flags, list->div_table,
2325+				list->gate_offset, list->gate_shift,
2326+				list->gate_flags, flags, list->child,
2327+				&ctx->lock);
2328+			break;
2329 		case branch_mmc:
2330 			clk = rockchip_clk_register_mmc(
2331 				list->name,
2332@@ -549,7 +675,17 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
2333 				list->muxdiv_offset, list->mux_shift,
2334 				list->mux_width, list->div_shift,
2335 				list->div_width, list->div_flags,
2336-				ctx->reg_base, &ctx->lock);
2337+				ctx->reg_base);
2338+			break;
2339+		case branch_dclk_divider:
2340+			clk = rockchip_clk_register_dclk_branch(list->name,
2341+				list->parent_names, list->num_parents,
2342+				ctx->reg_base, list->muxdiv_offset, list->mux_shift,
2343+				list->mux_width, list->mux_flags,
2344+				list->div_offset, list->div_shift, list->div_width,
2345+				list->div_flags, list->div_table,
2346+				list->gate_offset, list->gate_shift,
2347+				list->gate_flags, flags, list->max_prate, &ctx->lock);
2348 			break;
2349 		}
2350 
2351@@ -573,15 +709,17 @@ EXPORT_SYMBOL_GPL(rockchip_clk_register_branches);
2352 
2353 void rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
2354 				  unsigned int lookup_id,
2355-				  const char *name, const char *const *parent_names,
2356+				  const char *name,
2357 				  u8 num_parents,
2358+				  struct clk *parent, struct clk *alt_parent,
2359 				  const struct rockchip_cpuclk_reg_data *reg_data,
2360 				  const struct rockchip_cpuclk_rate_table *rates,
2361 				  int nrates)
2362 {
2363 	struct clk *clk;
2364 
2365-	clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents,
2366+	clk = rockchip_clk_register_cpuclk(name, num_parents,
2367+		parent, alt_parent,
2368 					   reg_data, rates, nrates,
2369 					   ctx->reg_base, &ctx->lock);
2370 	if (IS_ERR(clk)) {
2371@@ -594,20 +732,20 @@ void rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
2372 }
2373 EXPORT_SYMBOL_GPL(rockchip_clk_register_armclk);
2374 
2375-void rockchip_clk_protect_critical(const char *const clocks[],
2376-				   int nclocks)
2377-{
2378-	int i;
2379-
2380-	/* Protect the clocks that needs to stay on */
2381-	for (i = 0; i < nclocks; i++) {
2382-		struct clk *clk = __clk_lookup(clocks[i]);
2383+void (*rk_dump_cru)(void);
2384+EXPORT_SYMBOL(rk_dump_cru);
2385 
2386-		if (clk)
2387-			clk_prepare_enable(clk);
2388-	}
2389+static int rk_clk_panic(struct notifier_block *this,
2390+			unsigned long ev, void *ptr)
2391+{
2392+	if (rk_dump_cru)
2393+		rk_dump_cru();
2394+	return NOTIFY_DONE;
2395 }
2396-EXPORT_SYMBOL_GPL(rockchip_clk_protect_critical);
2397+
2398+static struct notifier_block rk_clk_panic_block = {
2399+	.notifier_call = rk_clk_panic,
2400+};
2401 
2402 static void __iomem *rst_base;
2403 static unsigned int reg_restart;
2404@@ -641,5 +779,7 @@ rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx,
2405 	if (ret)
2406 		pr_err("%s: cannot register restart handler, %d\n",
2407 		       __func__, ret);
2408+	atomic_notifier_chain_register(&panic_notifier_list,
2409+				       &rk_clk_panic_block);
2410 }
2411 EXPORT_SYMBOL_GPL(rockchip_register_restart_notifier);
2412diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
2413index 2271a8412..26cf2553c 100644
2414--- a/drivers/clk/rockchip/clk.h
2415+++ b/drivers/clk/rockchip/clk.h
2416@@ -37,12 +37,25 @@ struct clk;
2417 #define BOOST_SWITCH_THRESHOLD		0x0024
2418 #define BOOST_FSM_STATUS		0x0028
2419 #define BOOST_PLL_L_CON(x)		((x) * 0x4 + 0x2c)
2420+#define BOOST_PLL_CON_MASK		0xffff
2421+#define BOOST_CORE_DIV_MASK		0x1f
2422+#define BOOST_CORE_DIV_SHIFT		0
2423+#define BOOST_BACKUP_PLL_MASK		0x3
2424+#define BOOST_BACKUP_PLL_SHIFT		8
2425+#define BOOST_BACKUP_PLL_USAGE_MASK	0x1
2426+#define BOOST_BACKUP_PLL_USAGE_SHIFT	12
2427+#define BOOST_BACKUP_PLL_USAGE_BORROW	0
2428+#define BOOST_BACKUP_PLL_USAGE_TARGET	1
2429+#define BOOST_ENABLE_MASK		0x1
2430+#define BOOST_ENABLE_SHIFT		0
2431 #define BOOST_RECOVERY_MASK		0x1
2432 #define BOOST_RECOVERY_SHIFT		1
2433 #define BOOST_SW_CTRL_MASK		0x1
2434 #define BOOST_SW_CTRL_SHIFT		2
2435 #define BOOST_LOW_FREQ_EN_MASK		0x1
2436 #define BOOST_LOW_FREQ_EN_SHIFT		3
2437+#define BOOST_STATIS_ENABLE_MASK	0x1
2438+#define BOOST_STATIS_ENABLE_SHIFT	4
2439 #define BOOST_BUSY_STATE		BIT(8)
2440 
2441 #define PX30_PLL_CON(x)			((x) * 0x4)
2442@@ -79,6 +92,51 @@ struct clk;
2443 #define RV1108_EMMC_CON0		0x1e8
2444 #define RV1108_EMMC_CON1		0x1ec
2445 
2446+#define RV1126_PMU_MODE			0x0
2447+#define RV1126_PMU_PLL_CON(x)		((x) * 0x4 + 0x10)
2448+#define RV1126_PMU_CLKSEL_CON(x)	((x) * 0x4 + 0x100)
2449+#define RV1126_PMU_CLKGATE_CON(x)	((x) * 0x4 + 0x180)
2450+#define RV1126_PMU_SOFTRST_CON(x)	((x) * 0x4 + 0x200)
2451+#define RV1126_PLL_CON(x)		((x) * 0x4)
2452+#define RV1126_MODE_CON			0x90
2453+#define RV1126_CLKSEL_CON(x)		((x) * 0x4 + 0x100)
2454+#define RV1126_CLKGATE_CON(x)		((x) * 0x4 + 0x280)
2455+#define RV1126_SOFTRST_CON(x)		((x) * 0x4 + 0x300)
2456+#define RV1126_GLB_SRST_FST		0x408
2457+#define RV1126_GLB_SRST_SND		0x40c
2458+#define RV1126_SDMMC_CON0		0x440
2459+#define RV1126_SDMMC_CON1		0x444
2460+#define RV1126_SDIO_CON0		0x448
2461+#define RV1126_SDIO_CON1		0x44c
2462+#define RV1126_EMMC_CON0		0x450
2463+#define RV1126_EMMC_CON1		0x454
2464+
2465+/*
2466+ * register positions shared by RK1808 RK2928, RK3036,
2467+ * RK3066, RK3188 and RK3228
2468+ */
2469+
2470+#define RK1808_PLL_CON(x)		((x) * 0x4)
2471+#define RK1808_MODE_CON			0xa0
2472+#define RK1808_MISC_CON			0xa4
2473+#define RK1808_MISC1_CON		0xa8
2474+#define RK1808_GLB_SRST_FST		0xb8
2475+#define RK1808_GLB_SRST_SND		0xbc
2476+#define RK1808_CLKSEL_CON(x)		((x) * 0x4 + 0x100)
2477+#define RK1808_CLKGATE_CON(x)		((x) * 0x4 + 0x230)
2478+#define RK1808_SOFTRST_CON(x)		((x) * 0x4 + 0x300)
2479+#define RK1808_SDMMC_CON0		0x380
2480+#define RK1808_SDMMC_CON1		0x384
2481+#define RK1808_SDIO_CON0		0x388
2482+#define RK1808_SDIO_CON1		0x38c
2483+#define RK1808_EMMC_CON0		0x390
2484+#define RK1808_EMMC_CON1		0x394
2485+
2486+#define RK1808_PMU_PLL_CON(x)		((x) * 0x4 + 0x4000)
2487+#define RK1808_PMU_MODE_CON		0x4020
2488+#define RK1808_PMU_CLKSEL_CON(x)	((x) * 0x4 + 0x4040)
2489+#define RK1808_PMU_CLKGATE_CON(x)	((x) * 0x4 + 0x4080)
2490+
2491 #define RK2928_PLL_CON(x)		((x) * 0x4)
2492 #define RK2928_MODE_CON		0x40
2493 #define RK2928_CLKSEL_CON(x)	((x) * 0x4 + 0x44)
2494@@ -188,11 +246,83 @@ struct clk;
2495 #define RK3399_PMU_CLKGATE_CON(x)	((x) * 0x4 + 0x100)
2496 #define RK3399_PMU_SOFTRST_CON(x)	((x) * 0x4 + 0x110)
2497 
2498+#define RK3568_PLL_CON(x)		RK2928_PLL_CON(x)
2499+#define RK3568_MODE_CON0		0xc0
2500+#define RK3568_MISC_CON0		0xc4
2501+#define RK3568_MISC_CON1		0xc8
2502+#define RK3568_MISC_CON2		0xcc
2503+#define RK3568_GLB_CNT_TH		0xd0
2504+#define RK3568_GLB_SRST_FST		0xd4
2505+#define RK3568_GLB_SRST_SND		0xd8
2506+#define RK3568_GLB_RST_CON		0xdc
2507+#define RK3568_GLB_RST_ST		0xe0
2508+#define RK3568_CLKSEL_CON(x)		((x) * 0x4 + 0x100)
2509+#define RK3568_CLKGATE_CON(x)		((x) * 0x4 + 0x300)
2510+#define RK3568_SOFTRST_CON(x)		((x) * 0x4 + 0x400)
2511+#define RK3568_SDMMC0_CON0		0x580
2512+#define RK3568_SDMMC0_CON1		0x584
2513+#define RK3568_SDMMC1_CON0		0x588
2514+#define RK3568_SDMMC1_CON1		0x58c
2515+#define RK3568_SDMMC2_CON0		0x590
2516+#define RK3568_SDMMC2_CON1		0x594
2517+#define RK3568_EMMC_CON0		0x598
2518+#define RK3568_EMMC_CON1		0x59c
2519+
2520+#define RK3568_PMU_PLL_CON(x)		RK2928_PLL_CON(x)
2521+#define RK3568_PMU_MODE_CON0		0x80
2522+#define RK3568_PMU_CLKSEL_CON(x)	((x) * 0x4 + 0x100)
2523+#define RK3568_PMU_CLKGATE_CON(x)	((x) * 0x4 + 0x180)
2524+#define RK3568_PMU_SOFTRST_CON(x)	((x) * 0x4 + 0x200)
2525+
2526+#define RK3588_PHP_CRU_BASE		0x8000
2527+#define RK3588_PMU_CRU_BASE		0x30000
2528+#define RK3588_BIGCORE0_CRU_BASE	0x50000
2529+#define RK3588_BIGCORE1_CRU_BASE	0x52000
2530+#define RK3588_DSU_CRU_BASE		0x58000
2531+
2532+#define RK3588_PLL_CON(x)		RK2928_PLL_CON(x)
2533+#define RK3588_MODE_CON0		0x280
2534+#define RK3588_CLKSEL_CON(x)		((x) * 0x4 + 0x300)
2535+#define RK3588_CLKGATE_CON(x)		((x) * 0x4 + 0x800)
2536+#define RK3588_SOFTRST_CON(x)		((x) * 0x4 + 0xa00)
2537+#define RK3588_GLB_CNT_TH		0xc00
2538+#define RK3588_GLB_SRST_FST		0xc08
2539+#define RK3588_GLB_SRST_SND		0xc0c
2540+#define RK3588_GLB_RST_CON		0xc10
2541+#define RK3588_GLB_RST_ST		0xc04
2542+#define RK3588_SDIO_CON0		0xC24
2543+#define RK3588_SDIO_CON1		0xC28
2544+#define RK3588_SDMMC_CON0		0xC30
2545+#define RK3588_SDMMC_CON1		0xC34
2546+
2547+#define RK3588_PHP_CLKGATE_CON(x)	((x) * 0x4 + RK3588_PHP_CRU_BASE + 0x800)
2548+#define RK3588_PHP_SOFTRST_CON(x)	((x) * 0x4 + RK3588_PHP_CRU_BASE + 0xa00)
2549+
2550+#define RK3588_PMU_PLL_CON(x)		((x) * 0x4 + RK3588_PHP_CRU_BASE)
2551+#define RK3588_PMU_CLKSEL_CON(x)	((x) * 0x4 + RK3588_PMU_CRU_BASE + 0x300)
2552+#define RK3588_PMU_CLKGATE_CON(x)	((x) * 0x4 + RK3588_PMU_CRU_BASE + 0x800)
2553+#define RK3588_PMU_SOFTRST_CON(x)	((x) * 0x4 + RK3588_PMU_CRU_BASE + 0xa00)
2554+
2555+#define RK3588_B0_PLL_CON(x)		((x) * 0x4 + RK3588_BIGCORE0_CRU_BASE)
2556+#define RK3588_BIGCORE0_CLKSEL_CON(x)	((x) * 0x4 + RK3588_BIGCORE0_CRU_BASE + 0x300)
2557+#define RK3588_BIGCORE0_CLKGATE_CON(x)	((x) * 0x4 + RK3588_BIGCORE0_CRU_BASE + 0x800)
2558+#define RK3588_BIGCORE0_SOFTRST_CON(x)	((x) * 0x4 + RK3588_BIGCORE0_CRU_BASE + 0xa00)
2559+#define RK3588_B1_PLL_CON(x)		((x) * 0x4 + RK3588_BIGCORE1_CRU_BASE)
2560+#define RK3588_BIGCORE1_CLKSEL_CON(x)	((x) * 0x4 + RK3588_BIGCORE1_CRU_BASE + 0x300)
2561+#define RK3588_BIGCORE1_CLKGATE_CON(x)	((x) * 0x4 + RK3588_BIGCORE1_CRU_BASE + 0x800)
2562+#define RK3588_BIGCORE1_SOFTRST_CON(x)	((x) * 0x4 + RK3588_BIGCORE1_CRU_BASE + 0xa00)
2563+#define RK3588_LPLL_CON(x)		((x) * 0x4 + RK3588_DSU_CRU_BASE)
2564+#define RK3588_DSU_CLKSEL_CON(x)	((x) * 0x4 + RK3588_DSU_CRU_BASE + 0x300)
2565+#define RK3588_DSU_CLKGATE_CON(x)	((x) * 0x4 + RK3588_DSU_CRU_BASE + 0x800)
2566+#define RK3588_DSU_SOFTRST_CON(x)	((x) * 0x4 + RK3588_DSU_CRU_BASE + 0xa00)
2567+
2568 enum rockchip_pll_type {
2569 	pll_rk3036,
2570 	pll_rk3066,
2571 	pll_rk3328,
2572 	pll_rk3399,
2573+	pll_rk3588,
2574+	pll_rk3588_core,
2575 };
2576 
2577 #define RK3036_PLL_RATE(_rate, _refdiv, _fbdiv, _postdiv1,	\
2578@@ -225,6 +355,15 @@ enum rockchip_pll_type {
2579 	.nb = _nb,						\
2580 }
2581 
2582+#define RK3588_PLL_RATE(_rate, _p, _m, _s, _k)			\
2583+{								\
2584+	.rate	= _rate##U,					\
2585+	.p = _p,						\
2586+	.m = _m,						\
2587+	.s = _s,						\
2588+	.k = _k,						\
2589+}
2590+
2591 /**
2592  * struct rockchip_clk_provider - information about clock provider
2593  * @reg_base: virtual address for the register base.
2594@@ -238,22 +377,37 @@ struct rockchip_clk_provider {
2595 	struct clk_onecell_data clk_data;
2596 	struct device_node *cru_node;
2597 	struct regmap *grf;
2598+	struct regmap *pmugrf;
2599 	spinlock_t lock;
2600 };
2601 
2602 struct rockchip_pll_rate_table {
2603 	unsigned long rate;
2604-	unsigned int nr;
2605-	unsigned int nf;
2606-	unsigned int no;
2607-	unsigned int nb;
2608-	/* for RK3036/RK3399 */
2609-	unsigned int fbdiv;
2610-	unsigned int postdiv1;
2611-	unsigned int refdiv;
2612-	unsigned int postdiv2;
2613-	unsigned int dsmpd;
2614-	unsigned int frac;
2615+	union {
2616+		struct {
2617+			/* for RK3066 */
2618+			unsigned int nr;
2619+			unsigned int nf;
2620+			unsigned int no;
2621+			unsigned int nb;
2622+		};
2623+		struct {
2624+			/* for RK3036/RK3399 */
2625+			unsigned int fbdiv;
2626+			unsigned int postdiv1;
2627+			unsigned int refdiv;
2628+			unsigned int postdiv2;
2629+			unsigned int dsmpd;
2630+			unsigned int frac;
2631+		};
2632+		struct {
2633+			/* for RK3588 */
2634+			unsigned int m;
2635+			unsigned int p;
2636+			unsigned int s;
2637+			unsigned int k;
2638+		};
2639+	};
2640 };
2641 
2642 /**
2643@@ -317,39 +471,56 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx,
2644 		struct rockchip_pll_rate_table *rate_table,
2645 		unsigned long flags, u8 clk_pll_flags);
2646 
2647+void rockchip_boost_init(struct clk_hw *hw);
2648+
2649+void rockchip_boost_enable_recovery_sw_low(struct clk_hw *hw);
2650+
2651+void rockchip_boost_disable_recovery_sw(struct clk_hw *hw);
2652+
2653+void rockchip_boost_add_core_div(struct clk_hw *hw, unsigned long prate);
2654+
2655 struct rockchip_cpuclk_clksel {
2656 	int reg;
2657 	u32 val;
2658 };
2659 
2660-#define ROCKCHIP_CPUCLK_NUM_DIVIDERS	2
2661+#define ROCKCHIP_CPUCLK_NUM_DIVIDERS	6
2662+#define ROCKCHIP_CPUCLK_MAX_CORES	4
2663 struct rockchip_cpuclk_rate_table {
2664 	unsigned long prate;
2665 	struct rockchip_cpuclk_clksel divs[ROCKCHIP_CPUCLK_NUM_DIVIDERS];
2666+	struct rockchip_cpuclk_clksel pre_muxs[ROCKCHIP_CPUCLK_NUM_DIVIDERS];
2667+	struct rockchip_cpuclk_clksel post_muxs[ROCKCHIP_CPUCLK_NUM_DIVIDERS];
2668 };
2669 
2670 /**
2671  * struct rockchip_cpuclk_reg_data - register offsets and masks of the cpuclock
2672- * @core_reg:		register offset of the core settings register
2673- * @div_core_shift:	core divider offset used to divide the pll value
2674- * @div_core_mask:	core divider mask
2675- * @mux_core_alt:	mux value to select alternate parent
2676+ * @core_reg[]:	register offset of the cores setting register
2677+ * @div_core_shift[]:	cores divider offset used to divide the pll value
2678+ * @div_core_mask[]:	cores divider mask
2679+ * @num_cores:	number of cpu cores
2680+ * @mux_core_reg:       register offset of the cores select parent
2681+ * @mux_core_alt:       mux value to select alternate parent
2682  * @mux_core_main:	mux value to select main parent of core
2683  * @mux_core_shift:	offset of the core multiplexer
2684  * @mux_core_mask:	core multiplexer mask
2685  */
2686 struct rockchip_cpuclk_reg_data {
2687-	int		core_reg;
2688-	u8		div_core_shift;
2689-	u32		div_core_mask;
2690-	u8		mux_core_alt;
2691-	u8		mux_core_main;
2692-	u8		mux_core_shift;
2693-	u32		mux_core_mask;
2694+	int	core_reg[ROCKCHIP_CPUCLK_MAX_CORES];
2695+	u8	div_core_shift[ROCKCHIP_CPUCLK_MAX_CORES];
2696+	u32	div_core_mask[ROCKCHIP_CPUCLK_MAX_CORES];
2697+	int	num_cores;
2698+	int	mux_core_reg;
2699+	u8	mux_core_alt;
2700+	u8	mux_core_main;
2701+	u8	mux_core_shift;
2702+	u32	mux_core_mask;
2703+	const char	*pll_name;
2704 };
2705 
2706 struct clk *rockchip_clk_register_cpuclk(const char *name,
2707-			const char *const *parent_names, u8 num_parents,
2708+			u8 num_parents,
2709+			struct clk *parent, struct clk *alt_parent,
2710 			const struct rockchip_cpuclk_reg_data *reg_data,
2711 			const struct rockchip_cpuclk_rate_table *rates,
2712 			int nrates, void __iomem *reg_base, spinlock_t *lock);
2713@@ -361,16 +532,21 @@ struct clk *rockchip_clk_register_mmc(const char *name,
2714 /*
2715  * DDRCLK flags, including method of setting the rate
2716  * ROCKCHIP_DDRCLK_SIP: use SIP call to bl31 to change ddrclk rate.
2717+ * ROCKCHIP_DDRCLK_SCPI: use SCPI APIs to let mcu change ddrclk rate.
2718  */
2719 #define ROCKCHIP_DDRCLK_SIP		BIT(0)
2720+#define ROCKCHIP_DDRCLK_SCPI		0x02
2721+#define ROCKCHIP_DDRCLK_SIP_V2		0x03
2722+
2723+void rockchip_set_ddrclk_params(void __iomem *params);
2724+void rockchip_set_ddrclk_dmcfreq_wait_complete(int (*func)(void));
2725 
2726 struct clk *rockchip_clk_register_ddrclk(const char *name, int flags,
2727 					 const char *const *parent_names,
2728 					 u8 num_parents, int mux_offset,
2729 					 int mux_shift, int mux_width,
2730 					 int div_shift, int div_width,
2731-					 int ddr_flags, void __iomem *reg_base,
2732-					 spinlock_t *lock);
2733+					 int ddr_flags, void __iomem *reg_base);
2734 
2735 #define ROCKCHIP_INVERTER_HIWORD_MASK	BIT(0)
2736 
2737@@ -388,8 +564,10 @@ struct clk *rockchip_clk_register_muxgrf(const char *name,
2738 
2739 enum rockchip_clk_branch_type {
2740 	branch_composite,
2741+	branch_composite_brother,
2742 	branch_mux,
2743 	branch_muxgrf,
2744+	branch_muxpmugrf,
2745 	branch_divider,
2746 	branch_fraction_divider,
2747 	branch_gate,
2748@@ -398,6 +576,7 @@ enum rockchip_clk_branch_type {
2749 	branch_factor,
2750 	branch_ddrclk,
2751 	branch_half_divider,
2752+	branch_dclk_divider,
2753 };
2754 
2755 struct rockchip_clk_branch {
2756@@ -411,6 +590,7 @@ struct rockchip_clk_branch {
2757 	u8				mux_shift;
2758 	u8				mux_width;
2759 	u8				mux_flags;
2760+	u32				*mux_table;
2761 	int				div_offset;
2762 	u8				div_shift;
2763 	u8				div_width;
2764@@ -420,6 +600,7 @@ struct rockchip_clk_branch {
2765 	u8				gate_shift;
2766 	u8				gate_flags;
2767 	struct rockchip_clk_branch	*child;
2768+	unsigned long			max_prate;
2769 };
2770 
2771 #define COMPOSITE(_id, cname, pnames, f, mo, ms, mw, mf, ds, dw,\
2772@@ -443,6 +624,50 @@ struct rockchip_clk_branch {
2773 		.gate_flags	= gf,				\
2774 	}
2775 
2776+#define COMPOSITE_BROTHER(_id, cname, pnames, f, mo, ms, mw, mf,\
2777+			  ds, dw, df, go, gs, gf, bro)		\
2778+	{							\
2779+		.id		= _id,				\
2780+		.branch_type	= branch_composite_brother,	\
2781+		.name		= cname,			\
2782+		.parent_names	= pnames,			\
2783+		.num_parents	= ARRAY_SIZE(pnames),		\
2784+		.flags		= f,				\
2785+		.muxdiv_offset	= mo,				\
2786+		.mux_shift	= ms,				\
2787+		.mux_width	= mw,				\
2788+		.mux_flags	= mf,				\
2789+		.div_shift	= ds,				\
2790+		.div_width	= dw,				\
2791+		.div_flags	= df,				\
2792+		.gate_offset	= go,				\
2793+		.gate_shift	= gs,				\
2794+		.gate_flags	= gf,				\
2795+		.child		= bro,				\
2796+	}
2797+
2798+#define COMPOSITE_MUXTBL(_id, cname, pnames, f, mo, ms, mw, mf,	\
2799+		 mt, ds, dw, df, go, gs, gf)			\
2800+	{							\
2801+		.id		= _id,				\
2802+		.branch_type	= branch_composite,		\
2803+		.name		= cname,			\
2804+		.parent_names	= pnames,			\
2805+		.num_parents	= ARRAY_SIZE(pnames),		\
2806+		.flags		= f,				\
2807+		.muxdiv_offset	= mo,				\
2808+		.mux_shift	= ms,				\
2809+		.mux_width	= mw,				\
2810+		.mux_flags	= mf,				\
2811+		.mux_table	= mt,				\
2812+		.div_shift	= ds,				\
2813+		.div_width	= dw,				\
2814+		.div_flags	= df,				\
2815+		.gate_offset	= go,				\
2816+		.gate_shift	= gs,				\
2817+		.gate_flags	= gf,				\
2818+	}
2819+
2820 #define COMPOSITE_DIV_OFFSET(_id, cname, pnames, f, mo, ms, mw,	\
2821 			     mf, do, ds, dw, df, go, gs, gf)	\
2822 	{							\
2823@@ -539,6 +764,26 @@ struct rockchip_clk_branch {
2824 		.gate_offset	= -1,				\
2825 	}
2826 
2827+#define COMPOSITE_BROTHER_NOGATE(_id, cname, pnames, f, mo, ms, \
2828+				 mw, mf, ds, dw, df, bro)	\
2829+	{							\
2830+		.id		= _id,				\
2831+		.branch_type	= branch_composite_brother,	\
2832+		.name		= cname,			\
2833+		.parent_names	= pnames,			\
2834+		.num_parents	= ARRAY_SIZE(pnames),		\
2835+		.flags		= f,				\
2836+		.muxdiv_offset	= mo,				\
2837+		.mux_shift	= ms,				\
2838+		.mux_width	= mw,				\
2839+		.mux_flags	= mf,				\
2840+		.div_shift	= ds,				\
2841+		.div_width	= dw,				\
2842+		.div_flags	= df,				\
2843+		.gate_offset	= -1,				\
2844+		.child		= bro,				\
2845+	}
2846+
2847 #define COMPOSITE_NOGATE_DIVTBL(_id, cname, pnames, f, mo, ms,	\
2848 				mw, mf, ds, dw, df, dt)		\
2849 	{							\
2850@@ -559,7 +804,7 @@ struct rockchip_clk_branch {
2851 		.gate_offset	= -1,				\
2852 	}
2853 
2854-#define COMPOSITE_FRAC(_id, cname, pname, f, mo, df, go, gs, gf)\
2855+#define COMPOSITE_FRAC(_id, cname, pname, f, mo, df, go, gs, gf, prate)\
2856 	{							\
2857 		.id		= _id,				\
2858 		.branch_type	= branch_fraction_divider,	\
2859@@ -574,9 +819,10 @@ struct rockchip_clk_branch {
2860 		.gate_offset	= go,				\
2861 		.gate_shift	= gs,				\
2862 		.gate_flags	= gf,				\
2863+		.max_prate	= prate,			\
2864 	}
2865 
2866-#define COMPOSITE_FRACMUX(_id, cname, pname, f, mo, df, go, gs, gf, ch) \
2867+#define COMPOSITE_FRACMUX(_id, cname, pname, f, mo, df, go, gs, gf, ch, prate) \
2868 	{							\
2869 		.id		= _id,				\
2870 		.branch_type	= branch_fraction_divider,	\
2871@@ -592,9 +838,10 @@ struct rockchip_clk_branch {
2872 		.gate_shift	= gs,				\
2873 		.gate_flags	= gf,				\
2874 		.child		= ch,				\
2875+		.max_prate	= prate,			\
2876 	}
2877 
2878-#define COMPOSITE_FRACMUX_NOGATE(_id, cname, pname, f, mo, df, ch) \
2879+#define COMPOSITE_FRACMUX_NOGATE(_id, cname, pname, f, mo, df, ch, prate) \
2880 	{							\
2881 		.id		= _id,				\
2882 		.branch_type	= branch_fraction_divider,	\
2883@@ -608,6 +855,7 @@ struct rockchip_clk_branch {
2884 		.div_flags	= df,				\
2885 		.gate_offset	= -1,				\
2886 		.child		= ch,				\
2887+		.max_prate	= prate,			\
2888 	}
2889 
2890 #define COMPOSITE_DDRCLK(_id, cname, pnames, f, mo, ms, mw,	\
2891@@ -643,6 +891,22 @@ struct rockchip_clk_branch {
2892 		.gate_offset	= -1,				\
2893 	}
2894 
2895+#define MUXTBL(_id, cname, pnames, f, o, s, w, mf, mt)		\
2896+	{							\
2897+		.id		= _id,				\
2898+		.branch_type	= branch_mux,			\
2899+		.name		= cname,			\
2900+		.parent_names	= pnames,			\
2901+		.num_parents	= ARRAY_SIZE(pnames),		\
2902+		.flags		= f,				\
2903+		.muxdiv_offset	= o,				\
2904+		.mux_shift	= s,				\
2905+		.mux_width	= w,				\
2906+		.mux_flags	= mf,				\
2907+		.gate_offset	= -1,				\
2908+		.mux_table	= mt,				\
2909+	}
2910+
2911 #define MUXGRF(_id, cname, pnames, f, o, s, w, mf)		\
2912 	{							\
2913 		.id		= _id,				\
2914@@ -658,6 +922,21 @@ struct rockchip_clk_branch {
2915 		.gate_offset	= -1,				\
2916 	}
2917 
2918+#define MUXPMUGRF(_id, cname, pnames, f, o, s, w, mf)		\
2919+	{							\
2920+		.id		= _id,				\
2921+		.branch_type	= branch_muxpmugrf,		\
2922+		.name		= cname,			\
2923+		.parent_names	= pnames,			\
2924+		.num_parents	= ARRAY_SIZE(pnames),		\
2925+		.flags		= f,				\
2926+		.muxdiv_offset	= o,				\
2927+		.mux_shift	= s,				\
2928+		.mux_width	= w,				\
2929+		.mux_flags	= mf,				\
2930+		.gate_offset	= -1,				\
2931+	}
2932+
2933 #define DIV(_id, cname, pname, f, o, s, w, df)			\
2934 	{							\
2935 		.id		= _id,				\
2936@@ -772,6 +1051,28 @@ struct rockchip_clk_branch {
2937 		.gate_flags	= gf,				\
2938 	}
2939 
2940+#define COMPOSITE_HALFDIV_OFFSET(_id, cname, pnames, f, mo, ms, mw, mf, do,\
2941+				 ds, dw, df, go, gs, gf)		   \
2942+	{							\
2943+		.id		= _id,				\
2944+		.branch_type	= branch_half_divider,		\
2945+		.name		= cname,			\
2946+		.parent_names	= pnames,			\
2947+		.num_parents	= ARRAY_SIZE(pnames),		\
2948+		.flags		= f,				\
2949+		.muxdiv_offset	= mo,				\
2950+		.mux_shift	= ms,				\
2951+		.mux_width	= mw,				\
2952+		.mux_flags	= mf,				\
2953+		.div_offset	= do,				\
2954+		.div_shift	= ds,				\
2955+		.div_width	= dw,				\
2956+		.div_flags	= df,				\
2957+		.gate_offset	= go,				\
2958+		.gate_shift	= gs,				\
2959+		.gate_flags	= gf,				\
2960+	}
2961+
2962 #define COMPOSITE_NOGATE_HALFDIV(_id, cname, pnames, f, mo, ms, mw, mf,	\
2963 				 ds, dw, df)				\
2964 	{							\
2965@@ -824,6 +1125,28 @@ struct rockchip_clk_branch {
2966 		.gate_offset	= -1,				\
2967 	}
2968 
2969+#define COMPOSITE_DCLK(_id, cname, pnames, f, mo, ms, mw, mf, ds, dw,\
2970+		  df, go, gs, gf, prate)				\
2971+	{							\
2972+		.id		= _id,				\
2973+		.branch_type	= branch_dclk_divider,		\
2974+		.name		= cname,			\
2975+		.parent_names	= pnames,			\
2976+		.num_parents	= ARRAY_SIZE(pnames),		\
2977+		.flags		= f,				\
2978+		.muxdiv_offset	= mo,				\
2979+		.mux_shift	= ms,				\
2980+		.mux_width	= mw,				\
2981+		.mux_flags	= mf,				\
2982+		.div_shift	= ds,				\
2983+		.div_width	= dw,				\
2984+		.div_flags	= df,				\
2985+		.gate_offset	= go,				\
2986+		.gate_shift	= gs,				\
2987+		.gate_flags	= gf,				\
2988+		.max_prate	= prate,				\
2989+	}
2990+
2991 /* SGRF clocks are only accessible from secure mode, so not controllable */
2992 #define SGRF_GATE(_id, cname, pname)				\
2993 		FACTOR(_id, cname, pname, 0, 1, 1)
2994@@ -840,13 +1163,17 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
2995 void rockchip_clk_register_plls(struct rockchip_clk_provider *ctx,
2996 				struct rockchip_pll_clock *pll_list,
2997 				unsigned int nr_pll, int grf_lock_offset);
2998-void rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
2999-			unsigned int lookup_id, const char *name,
3000-			const char *const *parent_names, u8 num_parents,
3001-			const struct rockchip_cpuclk_reg_data *reg_data,
3002-			const struct rockchip_cpuclk_rate_table *rates,
3003-			int nrates);
3004-void rockchip_clk_protect_critical(const char *const clocks[], int nclocks);
3005+void __init rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
3006+					 unsigned int lookup_id,
3007+					 const char *name,
3008+					 u8 num_parents,
3009+					 struct clk *parent, struct clk *alt_parent,
3010+					 const struct rockchip_cpuclk_reg_data *reg_data,
3011+					 const struct rockchip_cpuclk_rate_table *rates,
3012+					 int nrates);
3013+int rockchip_pll_clk_rate_to_scale(struct clk *clk, unsigned long rate);
3014+int rockchip_pll_clk_scale_to_rate(struct clk *clk, unsigned int scale);
3015+int rockchip_pll_clk_adaptive_scaling(struct clk *clk, int sel);
3016 void rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx,
3017 					unsigned int reg, void (*cb)(void));
3018 
3019@@ -857,12 +1184,27 @@ struct clk *rockchip_clk_register_halfdiv(const char *name,
3020 					  u8 num_parents, void __iomem *base,
3021 					  int muxdiv_offset, u8 mux_shift,
3022 					  u8 mux_width, u8 mux_flags,
3023-					  u8 div_shift, u8 div_width,
3024-					  u8 div_flags, int gate_offset,
3025-					  u8 gate_shift, u8 gate_flags,
3026-					  unsigned long flags,
3027+					  int div_offset, u8 div_shift,
3028+					  u8 div_width, u8 div_flags,
3029+					  int gate_offset, u8 gate_shift,
3030+					  u8 gate_flags, unsigned long flags,
3031 					  spinlock_t *lock);
3032 
3033+struct clk *rockchip_clk_register_dclk_branch(const char *name,
3034+					      const char *const *parent_names,
3035+					      u8 num_parents,
3036+					      void __iomem *base,
3037+					      int muxdiv_offset, u8 mux_shift,
3038+					      u8 mux_width, u8 mux_flags,
3039+					      int div_offset, u8 div_shift,
3040+					      u8 div_width, u8 div_flags,
3041+					      struct clk_div_table *div_table,
3042+					      int gate_offset,
3043+					      u8 gate_shift, u8 gate_flags,
3044+					      unsigned long flags,
3045+					      unsigned long max_prate,
3046+					      spinlock_t *lock);
3047+
3048 #ifdef CONFIG_RESET_CONTROLLER
3049 void rockchip_register_softrst(struct device_node *np,
3050 			       unsigned int num_regs,
3051@@ -874,5 +1216,6 @@ static inline void rockchip_register_softrst(struct device_node *np,
3052 {
3053 }
3054 #endif
3055+extern void (*rk_dump_cru)(void);
3056 
3057 #endif
3058diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
3059index a0c6e88be..9d9cb5757 100644
3060--- a/drivers/clocksource/Kconfig
3061+++ b/drivers/clocksource/Kconfig
3062@@ -85,7 +85,9 @@ config IXP4XX_TIMER
3063 	  Enables support for the Intel XScale IXP4xx SoC timer.
3064 
3065 config ROCKCHIP_TIMER
3066-	bool "Rockchip timer driver" if COMPILE_TEST
3067+	tristate "Rockchip timer driver"
3068+	default ARCH_ROCKCHIP
3069+	depends on ARCH_ROCKCHIP || COMPILE_TEST
3070 	depends on ARM || ARM64
3071 	select TIMER_OF
3072 	select CLKSRC_MMIO
3073diff --git a/drivers/clocksource/timer-rockchip.c b/drivers/clocksource/timer-rockchip.c
3074index 1f95d0aca..2f4e970d7 100644
3075--- a/drivers/clocksource/timer-rockchip.c
3076+++ b/drivers/clocksource/timer-rockchip.c
3077@@ -8,11 +8,13 @@
3078 #include <linux/clockchips.h>
3079 #include <linux/init.h>
3080 #include <linux/interrupt.h>
3081+#include <linux/module.h>
3082 #include <linux/sched_clock.h>
3083 #include <linux/slab.h>
3084 #include <linux/of.h>
3085 #include <linux/of_address.h>
3086 #include <linux/of_irq.h>
3087+#include <linux/platform_device.h>
3088 
3089 #define TIMER_NAME "rk_timer"
3090 
3091@@ -45,7 +47,9 @@ struct rk_clkevt {
3092 };
3093 
3094 static struct rk_clkevt *rk_clkevt;
3095+#ifndef MODULE
3096 static struct rk_timer *rk_clksrc;
3097+#endif
3098 
3099 static inline struct rk_timer *rk_timer(struct clock_event_device *ce)
3100 {
3101@@ -119,10 +123,12 @@ static irqreturn_t rk_timer_interrupt(int irq, void *dev_id)
3102 	return IRQ_HANDLED;
3103 }
3104 
3105+#ifndef MODULE
3106 static u64 notrace rk_timer_sched_read(void)
3107 {
3108 	return ~readl_relaxed(rk_clksrc->base + TIMER_CURRENT_VALUE0);
3109 }
3110+#endif
3111 
3112 static int __init
3113 rk_timer_probe(struct rk_timer *timer, struct device_node *np)
3114@@ -250,6 +256,7 @@ static int __init rk_clkevt_init(struct device_node *np)
3115 	return ret;
3116 }
3117 
3118+#ifndef MODULE
3119 static int __init rk_clksrc_init(struct device_node *np)
3120 {
3121 	int ret = -EINVAL;
3122@@ -287,14 +294,17 @@ static int __init rk_clksrc_init(struct device_node *np)
3123 	rk_clksrc = ERR_PTR(ret);
3124 	return ret;
3125 }
3126+#endif
3127 
3128 static int __init rk_timer_init(struct device_node *np)
3129 {
3130 	if (!rk_clkevt)
3131 		return rk_clkevt_init(np);
3132 
3133+#ifndef MODULE
3134 	if (!rk_clksrc)
3135 		return rk_clksrc_init(np);
3136+#endif
3137 
3138 	pr_err("Too many timer definitions for '%s'\n", TIMER_NAME);
3139 	return -EINVAL;
3140@@ -302,3 +312,26 @@ static int __init rk_timer_init(struct device_node *np)
3141 
3142 TIMER_OF_DECLARE(rk3288_timer, "rockchip,rk3288-timer", rk_timer_init);
3143 TIMER_OF_DECLARE(rk3399_timer, "rockchip,rk3399-timer", rk_timer_init);
3144+
3145+#ifdef MODULE
3146+static int __init rk_timer_driver_probe(struct platform_device *pdev)
3147+{
3148+	return rk_timer_init(pdev->dev.of_node);
3149+}
3150+
3151+static const struct of_device_id rk_timer_match_table[] = {
3152+	{ .compatible = "rockchip,rk3288-timer" },
3153+	{ .compatible = "rockchip,rk3399-timer" },
3154+	{ /* sentinel */ },
3155+};
3156+
3157+static struct platform_driver rk_timer_driver = {
3158+	.driver = {
3159+		.name = TIMER_NAME,
3160+		.of_match_table = rk_timer_match_table,
3161+	},
3162+};
3163+module_platform_driver_probe(rk_timer_driver, rk_timer_driver_probe);
3164+
3165+MODULE_LICENSE("GPL");
3166+#endif
3167diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
3168index aea285651..6e428043a 100644
3169--- a/drivers/cpufreq/cpufreq-dt-platdev.c
3170+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
3171@@ -66,21 +66,6 @@ static const struct of_device_id whitelist[] __initconst = {
3172 	{ .compatible = "renesas,r8a7794", },
3173 	{ .compatible = "renesas,sh73a0", },
3174 
3175-	{ .compatible = "rockchip,rk2928", },
3176-	{ .compatible = "rockchip,rk3036", },
3177-	{ .compatible = "rockchip,rk3066a", },
3178-	{ .compatible = "rockchip,rk3066b", },
3179-	{ .compatible = "rockchip,rk3188", },
3180-	{ .compatible = "rockchip,rk3228", },
3181-	{ .compatible = "rockchip,rk3288", },
3182-	{ .compatible = "rockchip,rk3328", },
3183-	{ .compatible = "rockchip,rk3366", },
3184-	{ .compatible = "rockchip,rk3368", },
3185-	{ .compatible = "rockchip,rk3399",
3186-	  .data = &(struct cpufreq_dt_platform_data)
3187-		{ .have_governor_per_policy = true, },
3188-	},
3189-
3190 	{ .compatible = "st-ericsson,u8500", },
3191 	{ .compatible = "st-ericsson,u8540", },
3192 	{ .compatible = "st-ericsson,u9500", },
3193@@ -139,6 +124,30 @@ static const struct of_device_id blacklist[] __initconst = {
3194 	{ .compatible = "qcom,sdm845", },
3195 	{ .compatible = "qcom,sm8150", },
3196 
3197+	{ .compatible = "rockchip,px30", },
3198+	{ .compatible = "rockchip,rk2928", },
3199+	{ .compatible = "rockchip,rk3036", },
3200+	{ .compatible = "rockchip,rk3066a", },
3201+	{ .compatible = "rockchip,rk3066b", },
3202+	{ .compatible = "rockchip,rk3126", },
3203+	{ .compatible = "rockchip,rk3128", },
3204+	{ .compatible = "rockchip,rk3188", },
3205+	{ .compatible = "rockchip,rk3228", },
3206+	{ .compatible = "rockchip,rk3229", },
3207+	{ .compatible = "rockchip,rk3288", },
3208+	{ .compatible = "rockchip,rk3288w", },
3209+	{ .compatible = "rockchip,rk3326", },
3210+	{ .compatible = "rockchip,rk3328", },
3211+	{ .compatible = "rockchip,rk3366", },
3212+	{ .compatible = "rockchip,rk3368", },
3213+	{ .compatible = "rockchip,rk3399", },
3214+	{ .compatible = "rockchip,rk3399pro", },
3215+	{ .compatible = "rockchip,rk3566", },
3216+	{ .compatible = "rockchip,rk3568", },
3217+	{ .compatible = "rockchip,rk3588", },
3218+	{ .compatible = "rockchip,rv1109", },
3219+	{ .compatible = "rockchip,rv1126", },
3220+
3221 	{ .compatible = "st,stih407", },
3222 	{ .compatible = "st,stih410", },
3223 	{ .compatible = "st,stih418", },
3224diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
3225index e363ae04a..bbf10ab42 100644
3226--- a/drivers/cpufreq/cpufreq-dt.c
3227+++ b/drivers/cpufreq/cpufreq-dt.c
3228@@ -23,6 +23,9 @@
3229 #include <linux/thermal.h>
3230 
3231 #include "cpufreq-dt.h"
3232+#ifdef CONFIG_ARCH_ROCKCHIP
3233+#include <linux/rockchip-cpufreq.h>
3234+#endif
3235 
3236 struct private_data {
3237 	struct list_head node;
3238@@ -30,7 +33,7 @@ struct private_data {
3239 	cpumask_var_t cpus;
3240 	struct device *cpu_dev;
3241 	struct opp_table *opp_table;
3242-	struct opp_table *reg_opp_table;
3243+	struct cpufreq_frequency_table *freq_table;
3244 	bool have_static_opps;
3245 };
3246 
3247@@ -59,7 +62,11 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
3248 	struct private_data *priv = policy->driver_data;
3249 	unsigned long freq = policy->freq_table[index].frequency;
3250 
3251+#ifdef CONFIG_ARCH_ROCKCHIP
3252+	return rockchip_cpufreq_opp_set_rate(priv->cpu_dev, freq * 1000);
3253+#else
3254 	return dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000);
3255+#endif
3256 }
3257 
3258 /*
3259@@ -102,7 +109,6 @@ static const char *find_supply_name(struct device *dev)
3260 
3261 static int cpufreq_init(struct cpufreq_policy *policy)
3262 {
3263-	struct cpufreq_frequency_table *freq_table;
3264 	struct private_data *priv;
3265 	struct device *cpu_dev;
3266 	struct clk *cpu_clk;
3267@@ -114,9 +120,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
3268 		pr_err("failed to find data for cpu%d\n", policy->cpu);
3269 		return -ENODEV;
3270 	}
3271-
3272 	cpu_dev = priv->cpu_dev;
3273-	cpumask_copy(policy->cpus, priv->cpus);
3274 
3275 	cpu_clk = clk_get(cpu_dev, NULL);
3276 	if (IS_ERR(cpu_clk)) {
3277@@ -125,67 +129,32 @@ static int cpufreq_init(struct cpufreq_policy *policy)
3278 		return ret;
3279 	}
3280 
3281-	/*
3282-	 * Initialize OPP tables for all policy->cpus. They will be shared by
3283-	 * all CPUs which have marked their CPUs shared with OPP bindings.
3284-	 *
3285-	 * For platforms not using operating-points-v2 bindings, we do this
3286-	 * before updating policy->cpus. Otherwise, we will end up creating
3287-	 * duplicate OPPs for policy->cpus.
3288-	 *
3289-	 * OPPs might be populated at runtime, don't check for error here
3290-	 */
3291-	if (!dev_pm_opp_of_cpumask_add_table(policy->cpus))
3292-		priv->have_static_opps = true;
3293-
3294-	/*
3295-	 * But we need OPP table to function so if it is not there let's
3296-	 * give platform code chance to provide it for us.
3297-	 */
3298-	ret = dev_pm_opp_get_opp_count(cpu_dev);
3299-	if (ret <= 0) {
3300-		dev_err(cpu_dev, "OPP table can't be empty\n");
3301-		ret = -ENODEV;
3302-		goto out_free_opp;
3303-	}
3304-
3305-	ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
3306-	if (ret) {
3307-		dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
3308-		goto out_free_opp;
3309-	}
3310+	transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
3311+	if (!transition_latency)
3312+		transition_latency = CPUFREQ_ETERNAL;
3313 
3314+	cpumask_copy(policy->cpus, priv->cpus);
3315 	policy->driver_data = priv;
3316 	policy->clk = cpu_clk;
3317-	policy->freq_table = freq_table;
3318-
3319+	policy->freq_table = priv->freq_table;
3320 	policy->suspend_freq = dev_pm_opp_get_suspend_opp_freq(cpu_dev) / 1000;
3321+	policy->cpuinfo.transition_latency = transition_latency;
3322+	policy->dvfs_possible_from_any_cpu = true;
3323 
3324 	/* Support turbo/boost mode */
3325 	if (policy_has_boost_freq(policy)) {
3326 		/* This gets disabled by core on driver unregister */
3327 		ret = cpufreq_enable_boost_support();
3328 		if (ret)
3329-			goto out_free_cpufreq_table;
3330+			goto out_clk_put;
3331 		cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
3332 	}
3333 
3334-	transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
3335-	if (!transition_latency)
3336-		transition_latency = CPUFREQ_ETERNAL;
3337-
3338-	policy->cpuinfo.transition_latency = transition_latency;
3339-	policy->dvfs_possible_from_any_cpu = true;
3340-
3341 	dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
3342 
3343 	return 0;
3344 
3345-out_free_cpufreq_table:
3346-	dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
3347-out_free_opp:
3348-	if (priv->have_static_opps)
3349-		dev_pm_opp_of_cpumask_remove_table(policy->cpus);
3350+out_clk_put:
3351 	clk_put(cpu_clk);
3352 
3353 	return ret;
3354@@ -208,11 +177,6 @@ static int cpufreq_offline(struct cpufreq_policy *policy)
3355 
3356 static int cpufreq_exit(struct cpufreq_policy *policy)
3357 {
3358-	struct private_data *priv = policy->driver_data;
3359-
3360-	dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
3361-	if (priv->have_static_opps)
3362-		dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
3363 	clk_put(policy->clk);
3364 	return 0;
3365 }
3366@@ -236,6 +200,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
3367 {
3368 	struct private_data *priv;
3369 	struct device *cpu_dev;
3370+	bool fallback = false;
3371 	const char *reg_name;
3372 	int ret;
3373 
3374@@ -254,68 +219,91 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
3375 	if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL))
3376 		return -ENOMEM;
3377 
3378+	cpumask_set_cpu(cpu, priv->cpus);
3379 	priv->cpu_dev = cpu_dev;
3380 
3381-	/* Try to get OPP table early to ensure resources are available */
3382-	priv->opp_table = dev_pm_opp_get_opp_table(cpu_dev);
3383-	if (IS_ERR(priv->opp_table)) {
3384-		ret = PTR_ERR(priv->opp_table);
3385-		if (ret != -EPROBE_DEFER)
3386-			dev_err(cpu_dev, "failed to get OPP table: %d\n", ret);
3387-		goto free_cpumask;
3388-	}
3389-
3390 	/*
3391 	 * OPP layer will be taking care of regulators now, but it needs to know
3392 	 * the name of the regulator first.
3393 	 */
3394 	reg_name = find_supply_name(cpu_dev);
3395 	if (reg_name) {
3396-		priv->reg_opp_table = dev_pm_opp_set_regulators(cpu_dev,
3397-								&reg_name, 1);
3398-		if (IS_ERR(priv->reg_opp_table)) {
3399-			ret = PTR_ERR(priv->reg_opp_table);
3400+		priv->opp_table = dev_pm_opp_set_regulators(cpu_dev, &reg_name,
3401+							    1);
3402+		if (IS_ERR(priv->opp_table)) {
3403+			ret = PTR_ERR(priv->opp_table);
3404 			if (ret != -EPROBE_DEFER)
3405 				dev_err(cpu_dev, "failed to set regulators: %d\n",
3406 					ret);
3407-			goto put_table;
3408+			goto free_cpumask;
3409 		}
3410 	}
3411 
3412-	/* Find OPP sharing information so we can fill pri->cpus here */
3413 	/* Get OPP-sharing information from "operating-points-v2" bindings */
3414 	ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->cpus);
3415 	if (ret) {
3416 		if (ret != -ENOENT)
3417-			goto put_reg;
3418+			goto out;
3419 
3420 		/*
3421 		 * operating-points-v2 not supported, fallback to all CPUs share
3422 		 * OPP for backward compatibility if the platform hasn't set
3423 		 * sharing CPUs.
3424 		 */
3425-		if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus)) {
3426-			cpumask_setall(priv->cpus);
3427-
3428-			/*
3429-			 * OPP tables are initialized only for cpu, do it for
3430-			 * others as well.
3431-			 */
3432-			ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->cpus);
3433-			if (ret)
3434-				dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
3435-					__func__, ret);
3436-		}
3437+		if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus))
3438+			fallback = true;
3439+	}
3440+
3441+	/*
3442+	 * Initialize OPP tables for all priv->cpus. They will be shared by
3443+	 * all CPUs which have marked their CPUs shared with OPP bindings.
3444+	 *
3445+	 * For platforms not using operating-points-v2 bindings, we do this
3446+	 * before updating priv->cpus. Otherwise, we will end up creating
3447+	 * duplicate OPPs for the CPUs.
3448+	 *
3449+	 * OPPs might be populated at runtime, don't check for error here.
3450+	 */
3451+	if (!dev_pm_opp_of_cpumask_add_table(priv->cpus))
3452+		priv->have_static_opps = true;
3453+
3454+	/*
3455+	 * The OPP table must be initialized, statically or dynamically, by this
3456+	 * point.
3457+	 */
3458+	ret = dev_pm_opp_get_opp_count(cpu_dev);
3459+	if (ret <= 0) {
3460+		dev_err(cpu_dev, "OPP table can't be empty\n");
3461+		ret = -ENODEV;
3462+		goto out;
3463+	}
3464+
3465+	if (fallback) {
3466+		cpumask_setall(priv->cpus);
3467+		ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->cpus);
3468+		if (ret)
3469+			dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
3470+				__func__, ret);
3471+	}
3472+
3473+#ifdef CONFIG_ARCH_ROCKCHIP
3474+	rockchip_cpufreq_adjust_power_scale(cpu_dev);
3475+#endif
3476+
3477+	ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &priv->freq_table);
3478+	if (ret) {
3479+		dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
3480+		goto out;
3481 	}
3482 
3483 	list_add(&priv->node, &priv_list);
3484 	return 0;
3485 
3486-put_reg:
3487-	if (priv->reg_opp_table)
3488-		dev_pm_opp_put_regulators(priv->reg_opp_table);
3489-put_table:
3490-	dev_pm_opp_put_opp_table(priv->opp_table);
3491+out:
3492+	if (priv->have_static_opps)
3493+		dev_pm_opp_of_cpumask_remove_table(priv->cpus);
3494+	if (priv->opp_table)
3495+		dev_pm_opp_put_regulators(priv->opp_table);
3496 free_cpumask:
3497 	free_cpumask_var(priv->cpus);
3498 	return ret;
3499@@ -326,9 +314,11 @@ static void dt_cpufreq_release(void)
3500 	struct private_data *priv, *tmp;
3501 
3502 	list_for_each_entry_safe(priv, tmp, &priv_list, node) {
3503-		if (priv->reg_opp_table)
3504-			dev_pm_opp_put_regulators(priv->reg_opp_table);
3505-		dev_pm_opp_put_opp_table(priv->opp_table);
3506+		dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &priv->freq_table);
3507+		if (priv->have_static_opps)
3508+			dev_pm_opp_of_cpumask_remove_table(priv->cpus);
3509+		if (priv->opp_table)
3510+			dev_pm_opp_put_regulators(priv->opp_table);
3511 		free_cpumask_var(priv->cpus);
3512 		list_del(&priv->node);
3513 	}
3514diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
3515index 583423909..6926796dc 100644
3516--- a/drivers/cpufreq/cpufreq.c
3517+++ b/drivers/cpufreq/cpufreq.c
3518@@ -688,8 +688,12 @@ static ssize_t show_##file_name				\
3519 	return sprintf(buf, "%u\n", policy->object);	\
3520 }
3521 
3522+static ssize_t show_cpuinfo_max_freq(struct cpufreq_policy *policy, char *buf)
3523+{
3524+	unsigned int max_freq = policy->cpuinfo.max_freq;
3525+	return sprintf(buf, "%u\n", max_freq);
3526+}
3527 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
3528-show_one(cpuinfo_max_freq, cpuinfo.max_freq);
3529 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
3530 show_one(scaling_min_freq, min);
3531 show_one(scaling_max_freq, max);
3532@@ -1400,7 +1404,7 @@ static int cpufreq_online(unsigned int cpu)
3533 
3534 		ret = freq_qos_add_request(&policy->constraints,
3535 					   policy->min_freq_req, FREQ_QOS_MIN,
3536-					   FREQ_QOS_MIN_DEFAULT_VALUE);
3537+					   policy->min);
3538 		if (ret < 0) {
3539 			/*
3540 			 * So we don't call freq_qos_remove_request() for an
3541@@ -1420,7 +1424,7 @@ static int cpufreq_online(unsigned int cpu)
3542 
3543 		ret = freq_qos_add_request(&policy->constraints,
3544 					   policy->max_freq_req, FREQ_QOS_MAX,
3545-					   FREQ_QOS_MAX_DEFAULT_VALUE);
3546+					   policy->max);
3547 		if (ret < 0) {
3548 			policy->max_freq_req = NULL;
3549 			goto out_destroy_policy;
3550@@ -2535,6 +2539,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
3551 
3552 	return ret;
3553 }
3554+EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_frequency_limits);
3555 
3556 /**
3557  * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
3558diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
3559index 50a4d7846..1f001d281 100644
3560--- a/drivers/cpufreq/cpufreq_userspace.c
3561+++ b/drivers/cpufreq/cpufreq_userspace.c
3562@@ -78,20 +78,18 @@ static int cpufreq_userspace_policy_start(struct cpufreq_policy *policy)
3563 
3564 	mutex_lock(&userspace_mutex);
3565 	per_cpu(cpu_is_managed, policy->cpu) = 1;
3566-	*setspeed = policy->cur;
3567+	if (!*setspeed)
3568+		*setspeed = policy->cur;
3569 	mutex_unlock(&userspace_mutex);
3570 	return 0;
3571 }
3572 
3573 static void cpufreq_userspace_policy_stop(struct cpufreq_policy *policy)
3574 {
3575-	unsigned int *setspeed = policy->governor_data;
3576-
3577 	pr_debug("managing cpu %u stopped\n", policy->cpu);
3578 
3579 	mutex_lock(&userspace_mutex);
3580 	per_cpu(cpu_is_managed, policy->cpu) = 0;
3581-	*setspeed = 0;
3582 	mutex_unlock(&userspace_mutex);
3583 }
3584 
3585diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
3586index 4070e573b..557f59ac4 100644
3587--- a/drivers/cpuidle/driver.c
3588+++ b/drivers/cpuidle/driver.c
3589@@ -381,3 +381,4 @@ void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx,
3590 
3591 	mutex_unlock(&cpuidle_lock);
3592 }
3593+EXPORT_SYMBOL_GPL(cpuidle_driver_state_disabled);
3594diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
3595index 29acaf48e..0e51ed256 100644
3596--- a/drivers/cpuidle/governor.c
3597+++ b/drivers/cpuidle/governor.c
3598@@ -102,6 +102,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
3599 
3600 	return ret;
3601 }
3602+EXPORT_SYMBOL_GPL(cpuidle_register_governor);
3603 
3604 /**
3605  * cpuidle_governor_latency_req - Compute a latency constraint for CPU
3606@@ -118,3 +119,4 @@ s64 cpuidle_governor_latency_req(unsigned int cpu)
3607 
3608 	return (s64)device_req * NSEC_PER_USEC;
3609 }
3610+EXPORT_SYMBOL_GPL(cpuidle_governor_latency_req);
3611diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
3612index c6f460550..db0fe99c8 100644
3613--- a/drivers/devfreq/devfreq.c
3614+++ b/drivers/devfreq/devfreq.c
3615@@ -1761,6 +1761,40 @@ static ssize_t timer_store(struct device *dev, struct device_attribute *attr,
3616 }
3617 static DEVICE_ATTR_RW(timer);
3618 
3619+static ssize_t load_show(struct device *dev, struct device_attribute *attr,
3620+			 char *buf)
3621+{
3622+	int err;
3623+	struct devfreq *devfreq = to_devfreq(dev);
3624+	struct devfreq_dev_status stat = devfreq->last_status;
3625+	unsigned long freq;
3626+	ssize_t len;
3627+
3628+	err = devfreq_update_stats(devfreq);
3629+	if (err)
3630+		return err;
3631+
3632+	if (stat.total_time < stat.busy_time) {
3633+		err = devfreq_update_stats(devfreq);
3634+		if (err)
3635+			return err;
3636+	};
3637+
3638+	if (!stat.total_time)
3639+		return 0;
3640+
3641+	len = sprintf(buf, "%lu", stat.busy_time * 100 / stat.total_time);
3642+
3643+	if (devfreq->profile->get_cur_freq &&
3644+	    !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
3645+		len += sprintf(buf + len, "@%luHz\n", freq);
3646+	else
3647+		len += sprintf(buf + len, "@%luHz\n", devfreq->previous_freq);
3648+
3649+	return len;
3650+}
3651+static DEVICE_ATTR_RO(load);
3652+
3653 static struct attribute *devfreq_attrs[] = {
3654 	&dev_attr_name.attr,
3655 	&dev_attr_governor.attr,
3656@@ -1773,6 +1807,7 @@ static struct attribute *devfreq_attrs[] = {
3657 	&dev_attr_max_freq.attr,
3658 	&dev_attr_trans_stat.attr,
3659 	&dev_attr_timer.attr,
3660+	&dev_attr_load.attr,
3661 	NULL,
3662 };
3663 ATTRIBUTE_GROUPS(devfreq);
3664diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
3665index 9a88faaf8..395790397 100644
3666--- a/drivers/devfreq/event/rockchip-dfi.c
3667+++ b/drivers/devfreq/event/rockchip-dfi.c
3668@@ -20,26 +20,81 @@
3669 
3670 #include <soc/rockchip/rk3399_grf.h>
3671 
3672-#define RK3399_DMC_NUM_CH	2
3673-
3674+#define PX30_PMUGRF_OS_REG2		0x208
3675+#define PX30_PMUGRF_OS_REG3		0x20c
3676+
3677+#define RK3588_PMUGRF_OS_REG(n)		(0x200 + (n) * 4)
3678+
3679+#define RK3128_GRF_SOC_CON0		0x140
3680+#define RK3128_GRF_OS_REG1		0x1cc
3681+#define RK3128_GRF_DFI_WRNUM		0x220
3682+#define RK3128_GRF_DFI_RDNUM		0x224
3683+#define RK3128_GRF_DFI_TIMERVAL		0x22c
3684+#define RK3128_DDR_MONITOR_EN		((1 << (16 + 6)) + (1 << 6))
3685+#define RK3128_DDR_MONITOR_DISB		((1 << (16 + 6)) + (0 << 6))
3686+
3687+#define RK3288_PMU_SYS_REG2		0x9c
3688+#define RK3288_GRF_SOC_CON4		0x254
3689+#define RK3288_GRF_SOC_STATUS(n)	(0x280 + (n) * 4)
3690+#define RK3288_DFI_EN			(0x30003 << 14)
3691+#define RK3288_DFI_DIS			(0x30000 << 14)
3692+#define RK3288_LPDDR_SEL		(0x10001 << 13)
3693+#define RK3288_DDR3_SEL			(0x10000 << 13)
3694+
3695+#define RK3328_GRF_OS_REG2		0x5d0
3696+
3697+#define RK3368_GRF_DDRC0_CON0		0x600
3698+#define RK3368_GRF_SOC_STATUS5		0x494
3699+#define RK3368_GRF_SOC_STATUS6		0x498
3700+#define RK3368_GRF_SOC_STATUS8		0x4a0
3701+#define RK3368_GRF_SOC_STATUS9		0x4a4
3702+#define RK3368_GRF_SOC_STATUS10		0x4a8
3703+#define RK3368_DFI_EN			(0x30003 << 5)
3704+#define RK3368_DFI_DIS			(0x30000 << 5)
3705+
3706+#define MAX_DMC_NUM_CH			4
3707+#define READ_DRAMTYPE_INFO(n)		(((n) >> 13) & 0x7)
3708+#define READ_CH_INFO(n)			(((n) >> 28) & 0x3)
3709+#define READ_DRAMTYPE_INFO_V3(n, m)	((((n) >> 13) & 0x7) | ((((m) >> 12) & 0x3) << 3))
3710+#define READ_SYSREG_VERSION(m)		(((m) >> 28) & 0xf)
3711+#define READ_LP5_BANK_MODE(m)		(((m) >> 1) & 0x3)
3712+#define READ_LP5_CKR(m)			(((m) >> 0) & 0x1)
3713 /* DDRMON_CTRL */
3714-#define DDRMON_CTRL	0x04
3715-#define CLR_DDRMON_CTRL	(0x1f0000 << 0)
3716-#define LPDDR4_EN	(0x10001 << 4)
3717-#define HARDWARE_EN	(0x10001 << 3)
3718-#define LPDDR3_EN	(0x10001 << 2)
3719-#define SOFTWARE_EN	(0x10001 << 1)
3720-#define SOFTWARE_DIS	(0x10000 << 1)
3721-#define TIME_CNT_EN	(0x10001 << 0)
3722+#define DDRMON_CTRL			0x04
3723+#define CLR_DDRMON_CTRL			(0xffff0000 << 0)
3724+#define LPDDR5_BANK_MODE(m)		((0x30000 | ((m) & 0x3)) << 7)
3725+#define LPDDR5_EN			(0x10001 << 6)
3726+#define DDR4_EN				(0x10001 << 5)
3727+#define LPDDR4_EN			(0x10001 << 4)
3728+#define HARDWARE_EN			(0x10001 << 3)
3729+#define LPDDR2_3_EN			(0x10001 << 2)
3730+#define SOFTWARE_EN			(0x10001 << 1)
3731+#define SOFTWARE_DIS			(0x10000 << 1)
3732+#define TIME_CNT_EN			(0x10001 << 0)
3733 
3734 #define DDRMON_CH0_COUNT_NUM		0x28
3735 #define DDRMON_CH0_DFI_ACCESS_NUM	0x2c
3736 #define DDRMON_CH1_COUNT_NUM		0x3c
3737 #define DDRMON_CH1_DFI_ACCESS_NUM	0x40
3738 
3739+/* pmu grf */
3740+#define PMUGRF_OS_REG2			0x308
3741+
3742+enum {
3743+	DDR4 = 0,
3744+	DDR3 = 3,
3745+	LPDDR2 = 5,
3746+	LPDDR3 = 6,
3747+	LPDDR4 = 7,
3748+	LPDDR4X = 8,
3749+	LPDDR5 = 9,
3750+	DDR5 = 10,
3751+	UNUSED = 0xFF
3752+};
3753+
3754 struct dmc_usage {
3755-	u32 access;
3756-	u32 total;
3757+	u64 access;
3758+	u64 total;
3759 };
3760 
3761 /*
3762@@ -50,44 +105,307 @@ struct dmc_usage {
3763 struct rockchip_dfi {
3764 	struct devfreq_event_dev *edev;
3765 	struct devfreq_event_desc *desc;
3766-	struct dmc_usage ch_usage[RK3399_DMC_NUM_CH];
3767+	struct dmc_usage ch_usage[MAX_DMC_NUM_CH];
3768 	struct device *dev;
3769 	void __iomem *regs;
3770 	struct regmap *regmap_pmu;
3771+	struct regmap *regmap_grf;
3772+	struct regmap *regmap_pmugrf;
3773 	struct clk *clk;
3774+	u32 dram_type;
3775+	u32 mon_idx;
3776+	u32 count_rate;
3777+	u32 dram_dynamic_info_reg;
3778+	/* 0: BG mode, 1: 16 Bank mode, 2: 8 bank mode */
3779+	u32 lp5_bank_mode;
3780+	/* 0: clk:dqs = 1:2, 1: 1:4 */
3781+	u32 lp5_ckr;
3782+	/*
3783+	 * available mask, 1: available, 0: not available
3784+	 * each bit represent a channel
3785+	 */
3786+	u32 ch_msk;
3787+};
3788+
3789+static void rk3128_dfi_start_hardware_counter(struct devfreq_event_dev *edev)
3790+{
3791+	struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
3792+
3793+	regmap_write(info->regmap_grf,
3794+		     RK3128_GRF_SOC_CON0,
3795+		     RK3128_DDR_MONITOR_EN);
3796+}
3797+
3798+static void rk3128_dfi_stop_hardware_counter(struct devfreq_event_dev *edev)
3799+{
3800+	struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
3801+
3802+	regmap_write(info->regmap_grf,
3803+		     RK3128_GRF_SOC_CON0,
3804+		     RK3128_DDR_MONITOR_DISB);
3805+}
3806+
3807+static int rk3128_dfi_disable(struct devfreq_event_dev *edev)
3808+{
3809+	rk3128_dfi_stop_hardware_counter(edev);
3810+
3811+	return 0;
3812+}
3813+
3814+static int rk3128_dfi_enable(struct devfreq_event_dev *edev)
3815+{
3816+	rk3128_dfi_start_hardware_counter(edev);
3817+
3818+	return 0;
3819+}
3820+
3821+static int rk3128_dfi_set_event(struct devfreq_event_dev *edev)
3822+{
3823+	return 0;
3824+}
3825+
3826+static int rk3128_dfi_get_event(struct devfreq_event_dev *edev,
3827+				struct devfreq_event_data *edata)
3828+{
3829+	struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
3830+	unsigned long flags;
3831+	u32 dfi_wr, dfi_rd, dfi_timer;
3832+
3833+	local_irq_save(flags);
3834+
3835+	rk3128_dfi_stop_hardware_counter(edev);
3836+
3837+	regmap_read(info->regmap_grf, RK3128_GRF_DFI_WRNUM, &dfi_wr);
3838+	regmap_read(info->regmap_grf, RK3128_GRF_DFI_RDNUM, &dfi_rd);
3839+	regmap_read(info->regmap_grf, RK3128_GRF_DFI_TIMERVAL, &dfi_timer);
3840+
3841+	edata->load_count = (dfi_wr + dfi_rd) * 4;
3842+	edata->total_count = dfi_timer;
3843+
3844+	rk3128_dfi_start_hardware_counter(edev);
3845+
3846+	local_irq_restore(flags);
3847+
3848+	return 0;
3849+}
3850+
3851+static const struct devfreq_event_ops rk3128_dfi_ops = {
3852+	.disable = rk3128_dfi_disable,
3853+	.enable = rk3128_dfi_enable,
3854+	.get_event = rk3128_dfi_get_event,
3855+	.set_event = rk3128_dfi_set_event,
3856+};
3857+
3858+static void rk3288_dfi_start_hardware_counter(struct devfreq_event_dev *edev)
3859+{
3860+	struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
3861+
3862+	regmap_write(info->regmap_grf, RK3288_GRF_SOC_CON4, RK3288_DFI_EN);
3863+}
3864+
3865+static void rk3288_dfi_stop_hardware_counter(struct devfreq_event_dev *edev)
3866+{
3867+	struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
3868+
3869+	regmap_write(info->regmap_grf, RK3288_GRF_SOC_CON4, RK3288_DFI_DIS);
3870+}
3871+
3872+static int rk3288_dfi_disable(struct devfreq_event_dev *edev)
3873+{
3874+	rk3288_dfi_stop_hardware_counter(edev);
3875+
3876+	return 0;
3877+}
3878+
3879+static int rk3288_dfi_enable(struct devfreq_event_dev *edev)
3880+{
3881+	rk3288_dfi_start_hardware_counter(edev);
3882+
3883+	return 0;
3884+}
3885+
3886+static int rk3288_dfi_set_event(struct devfreq_event_dev *edev)
3887+{
3888+	return 0;
3889+}
3890+
3891+static int rk3288_dfi_get_busier_ch(struct devfreq_event_dev *edev)
3892+{
3893+	struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
3894+	u32 tmp, max = 0;
3895+	u32 i, busier_ch = 0;
3896+	u32 rd_count, wr_count, total_count;
3897+
3898+	rk3288_dfi_stop_hardware_counter(edev);
3899+
3900+	/* Find out which channel is busier */
3901+	for (i = 0; i < MAX_DMC_NUM_CH; i++) {
3902+		if (!(info->ch_msk & BIT(i)))
3903+			continue;
3904+		regmap_read(info->regmap_grf,
3905+			    RK3288_GRF_SOC_STATUS(11 + i * 4), &wr_count);
3906+		regmap_read(info->regmap_grf,
3907+			    RK3288_GRF_SOC_STATUS(12 + i * 4), &rd_count);
3908+		regmap_read(info->regmap_grf,
3909+			    RK3288_GRF_SOC_STATUS(14 + i * 4), &total_count);
3910+		info->ch_usage[i].access = (wr_count + rd_count) * 4;
3911+		info->ch_usage[i].total = total_count;
3912+		tmp = info->ch_usage[i].access;
3913+		if (tmp > max) {
3914+			busier_ch = i;
3915+			max = tmp;
3916+		}
3917+	}
3918+	rk3288_dfi_start_hardware_counter(edev);
3919+
3920+	return busier_ch;
3921+}
3922+
3923+static int rk3288_dfi_get_event(struct devfreq_event_dev *edev,
3924+				struct devfreq_event_data *edata)
3925+{
3926+	struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
3927+	int busier_ch;
3928+	unsigned long flags;
3929+
3930+	local_irq_save(flags);
3931+	busier_ch = rk3288_dfi_get_busier_ch(edev);
3932+	local_irq_restore(flags);
3933+
3934+	edata->load_count = info->ch_usage[busier_ch].access;
3935+	edata->total_count = info->ch_usage[busier_ch].total;
3936+
3937+	return 0;
3938+}
3939+
3940+static const struct devfreq_event_ops rk3288_dfi_ops = {
3941+	.disable = rk3288_dfi_disable,
3942+	.enable = rk3288_dfi_enable,
3943+	.get_event = rk3288_dfi_get_event,
3944+	.set_event = rk3288_dfi_set_event,
3945+};
3946+
3947+static void rk3368_dfi_start_hardware_counter(struct devfreq_event_dev *edev)
3948+{
3949+	struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
3950+
3951+	regmap_write(info->regmap_grf, RK3368_GRF_DDRC0_CON0, RK3368_DFI_EN);
3952+}
3953+
3954+static void rk3368_dfi_stop_hardware_counter(struct devfreq_event_dev *edev)
3955+{
3956+	struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
3957+
3958+	regmap_write(info->regmap_grf, RK3368_GRF_DDRC0_CON0, RK3368_DFI_DIS);
3959+}
3960+
3961+static int rk3368_dfi_disable(struct devfreq_event_dev *edev)
3962+{
3963+	rk3368_dfi_stop_hardware_counter(edev);
3964+
3965+	return 0;
3966+}
3967+
3968+static int rk3368_dfi_enable(struct devfreq_event_dev *edev)
3969+{
3970+	rk3368_dfi_start_hardware_counter(edev);
3971+
3972+	return 0;
3973+}
3974+
3975+static int rk3368_dfi_set_event(struct devfreq_event_dev *edev)
3976+{
3977+	return 0;
3978+}
3979+
3980+static int rk3368_dfi_get_event(struct devfreq_event_dev *edev,
3981+				struct devfreq_event_data *edata)
3982+{
3983+	struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
3984+	unsigned long flags;
3985+	u32 dfi0_wr, dfi0_rd, dfi1_wr, dfi1_rd, dfi_timer;
3986+
3987+	local_irq_save(flags);
3988+
3989+	rk3368_dfi_stop_hardware_counter(edev);
3990+
3991+	regmap_read(info->regmap_grf, RK3368_GRF_SOC_STATUS5, &dfi0_wr);
3992+	regmap_read(info->regmap_grf, RK3368_GRF_SOC_STATUS6, &dfi0_rd);
3993+	regmap_read(info->regmap_grf, RK3368_GRF_SOC_STATUS9, &dfi1_wr);
3994+	regmap_read(info->regmap_grf, RK3368_GRF_SOC_STATUS10, &dfi1_rd);
3995+	regmap_read(info->regmap_grf, RK3368_GRF_SOC_STATUS8, &dfi_timer);
3996+
3997+	edata->load_count = (dfi0_wr + dfi0_rd + dfi1_wr + dfi1_rd) * 2;
3998+	edata->total_count = dfi_timer;
3999+
4000+	rk3368_dfi_start_hardware_counter(edev);
4001+
4002+	local_irq_restore(flags);
4003+
4004+	return 0;
4005+}
4006+
4007+static const struct devfreq_event_ops rk3368_dfi_ops = {
4008+	.disable = rk3368_dfi_disable,
4009+	.enable = rk3368_dfi_enable,
4010+	.get_event = rk3368_dfi_get_event,
4011+	.set_event = rk3368_dfi_set_event,
4012 };
4013 
4014 static void rockchip_dfi_start_hardware_counter(struct devfreq_event_dev *edev)
4015 {
4016 	struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
4017 	void __iomem *dfi_regs = info->regs;
4018-	u32 val;
4019-	u32 ddr_type;
4020+	u32 mon_idx = 0, val_6 = 0;
4021+	u32 i;
4022 
4023-	/* get ddr type */
4024-	regmap_read(info->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val);
4025-	ddr_type = (val >> RK3399_PMUGRF_DDRTYPE_SHIFT) &
4026-		    RK3399_PMUGRF_DDRTYPE_MASK;
4027+	if (info->mon_idx)
4028+		mon_idx = info->mon_idx;
4029 
4030-	/* clear DDRMON_CTRL setting */
4031-	writel_relaxed(CLR_DDRMON_CTRL, dfi_regs + DDRMON_CTRL);
4032+	if (info->dram_dynamic_info_reg)
4033+		regmap_read(info->regmap_pmugrf, info->dram_dynamic_info_reg, &val_6);
4034 
4035-	/* set ddr type to dfi */
4036-	if (ddr_type == RK3399_PMUGRF_DDRTYPE_LPDDR3)
4037-		writel_relaxed(LPDDR3_EN, dfi_regs + DDRMON_CTRL);
4038-	else if (ddr_type == RK3399_PMUGRF_DDRTYPE_LPDDR4)
4039-		writel_relaxed(LPDDR4_EN, dfi_regs + DDRMON_CTRL);
4040+	if (info->dram_type == LPDDR5) {
4041+		info->lp5_bank_mode = READ_LP5_BANK_MODE(val_6);
4042+		info->lp5_ckr = READ_LP5_CKR(val_6);
4043+	}
4044 
4045-	/* enable count, use software mode */
4046-	writel_relaxed(SOFTWARE_EN, dfi_regs + DDRMON_CTRL);
4047+	for (i = 0; i < MAX_DMC_NUM_CH; i++) {
4048+		if (!(info->ch_msk & BIT(i)))
4049+			continue;
4050+		/* clear DDRMON_CTRL setting */
4051+		writel_relaxed(CLR_DDRMON_CTRL, dfi_regs + i * mon_idx + DDRMON_CTRL);
4052+
4053+		/* set ddr type to dfi */
4054+		if (info->dram_type == LPDDR3 || info->dram_type == LPDDR2)
4055+			writel_relaxed(LPDDR2_3_EN, dfi_regs + i * mon_idx + DDRMON_CTRL);
4056+		else if (info->dram_type == LPDDR4 || info->dram_type == LPDDR4X)
4057+			writel_relaxed(LPDDR4_EN, dfi_regs + i * mon_idx + DDRMON_CTRL);
4058+		else if (info->dram_type == DDR4)
4059+			writel_relaxed(DDR4_EN, dfi_regs + i * mon_idx + DDRMON_CTRL);
4060+		else if (info->dram_type == LPDDR5)
4061+			writel_relaxed(LPDDR5_EN | LPDDR5_BANK_MODE(info->lp5_bank_mode),
4062+				       dfi_regs + i * mon_idx + DDRMON_CTRL);
4063+
4064+		/* enable count, use software mode */
4065+		writel_relaxed(SOFTWARE_EN, dfi_regs + i * mon_idx + DDRMON_CTRL);
4066+	}
4067 }
4068 
4069 static void rockchip_dfi_stop_hardware_counter(struct devfreq_event_dev *edev)
4070 {
4071 	struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
4072 	void __iomem *dfi_regs = info->regs;
4073+	u32 mon_idx = 0, i;
4074 
4075-	writel_relaxed(SOFTWARE_DIS, dfi_regs + DDRMON_CTRL);
4076+	if (info->mon_idx)
4077+		mon_idx = info->mon_idx;
4078+
4079+	for (i = 0; i < MAX_DMC_NUM_CH; i++) {
4080+		if (!(info->ch_msk & BIT(i)))
4081+			continue;
4082+		writel_relaxed(SOFTWARE_DIS, dfi_regs + i * mon_idx + DDRMON_CTRL);
4083+	}
4084 }
4085 
4086 static int rockchip_dfi_get_busier_ch(struct devfreq_event_dev *edev)
4087@@ -96,16 +414,35 @@ static int rockchip_dfi_get_busier_ch(struct devfreq_event_dev *edev)
4088 	u32 tmp, max = 0;
4089 	u32 i, busier_ch = 0;
4090 	void __iomem *dfi_regs = info->regs;
4091+	u32 mon_idx = 0x20, count_rate = 1;
4092 
4093 	rockchip_dfi_stop_hardware_counter(edev);
4094 
4095+	if (info->mon_idx)
4096+		mon_idx = info->mon_idx;
4097+	if (info->count_rate)
4098+		count_rate = info->count_rate;
4099+
4100 	/* Find out which channel is busier */
4101-	for (i = 0; i < RK3399_DMC_NUM_CH; i++) {
4102-		info->ch_usage[i].access = readl_relaxed(dfi_regs +
4103-				DDRMON_CH0_DFI_ACCESS_NUM + i * 20) * 4;
4104+	for (i = 0; i < MAX_DMC_NUM_CH; i++) {
4105+		if (!(info->ch_msk & BIT(i)))
4106+			continue;
4107+
4108+		/* rk3588 counter is dfi clk rate */
4109 		info->ch_usage[i].total = readl_relaxed(dfi_regs +
4110-				DDRMON_CH0_COUNT_NUM + i * 20);
4111-		tmp = info->ch_usage[i].access;
4112+				DDRMON_CH0_COUNT_NUM + i * mon_idx) * count_rate;
4113+
4114+		/* LPDDR5 LPDDR4 and LPDDR4X BL = 16,other DDR type BL = 8 */
4115+		tmp = readl_relaxed(dfi_regs +
4116+				DDRMON_CH0_DFI_ACCESS_NUM + i * mon_idx);
4117+		if (info->dram_type == LPDDR4 || info->dram_type == LPDDR4X)
4118+			tmp *= 8;
4119+		else if (info->dram_type == LPDDR5)
4120+			tmp *= 16 / (4 << info->lp5_ckr);
4121+		else
4122+			tmp *= 4;
4123+		info->ch_usage[i].access = tmp;
4124+
4125 		if (tmp > max) {
4126 			busier_ch = i;
4127 			max = tmp;
4128@@ -121,7 +458,8 @@ static int rockchip_dfi_disable(struct devfreq_event_dev *edev)
4129 	struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
4130 
4131 	rockchip_dfi_stop_hardware_counter(edev);
4132-	clk_disable_unprepare(info->clk);
4133+	if (info->clk)
4134+		clk_disable_unprepare(info->clk);
4135 
4136 	return 0;
4137 }
4138@@ -131,10 +469,13 @@ static int rockchip_dfi_enable(struct devfreq_event_dev *edev)
4139 	struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
4140 	int ret;
4141 
4142-	ret = clk_prepare_enable(info->clk);
4143-	if (ret) {
4144-		dev_err(&edev->dev, "failed to enable dfi clk: %d\n", ret);
4145-		return ret;
4146+	if (info->clk) {
4147+		ret = clk_prepare_enable(info->clk);
4148+		if (ret) {
4149+			dev_err(&edev->dev, "failed to enable dfi clk: %d\n",
4150+				ret);
4151+			return ret;
4152+		}
4153 	}
4154 
4155 	rockchip_dfi_start_hardware_counter(edev);
4156@@ -151,8 +492,11 @@ static int rockchip_dfi_get_event(struct devfreq_event_dev *edev,
4157 {
4158 	struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
4159 	int busier_ch;
4160+	unsigned long flags;
4161 
4162+	local_irq_save(flags);
4163 	busier_ch = rockchip_dfi_get_busier_ch(edev);
4164+	local_irq_restore(flags);
4165 
4166 	edata->load_count = info->ch_usage[busier_ch].access;
4167 	edata->total_count = info->ch_usage[busier_ch].total;
4168@@ -167,22 +511,156 @@ static const struct devfreq_event_ops rockchip_dfi_ops = {
4169 	.set_event = rockchip_dfi_set_event,
4170 };
4171 
4172-static const struct of_device_id rockchip_dfi_id_match[] = {
4173-	{ .compatible = "rockchip,rk3399-dfi" },
4174-	{ },
4175-};
4176-MODULE_DEVICE_TABLE(of, rockchip_dfi_id_match);
4177+static __init int rk3588_dfi_init(struct platform_device *pdev,
4178+				  struct rockchip_dfi *data,
4179+				  struct devfreq_event_desc *desc)
4180+{
4181+	struct device_node *np = pdev->dev.of_node;
4182+	struct resource *res;
4183+	u32 val_2, val_3, val_4;
4184 
4185-static int rockchip_dfi_probe(struct platform_device *pdev)
4186+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4187+	data->regs = devm_ioremap_resource(&pdev->dev, res);
4188+	if (IS_ERR(data->regs))
4189+		return PTR_ERR(data->regs);
4190+
4191+	data->regmap_pmugrf = syscon_regmap_lookup_by_phandle(np, "rockchip,pmu_grf");
4192+	if (IS_ERR(data->regmap_pmugrf))
4193+		return PTR_ERR(data->regmap_pmugrf);
4194+
4195+	regmap_read(data->regmap_pmugrf, RK3588_PMUGRF_OS_REG(2), &val_2);
4196+	regmap_read(data->regmap_pmugrf, RK3588_PMUGRF_OS_REG(3), &val_3);
4197+	regmap_read(data->regmap_pmugrf, RK3588_PMUGRF_OS_REG(4), &val_4);
4198+	if (READ_SYSREG_VERSION(val_3) >= 0x3)
4199+		data->dram_type = READ_DRAMTYPE_INFO_V3(val_2, val_3);
4200+	else
4201+		data->dram_type = READ_DRAMTYPE_INFO(val_2);
4202+
4203+	data->mon_idx = 0x4000;
4204+	data->count_rate = 2;
4205+	data->dram_dynamic_info_reg = RK3588_PMUGRF_OS_REG(6);
4206+	data->ch_msk = READ_CH_INFO(val_2) | READ_CH_INFO(val_4) << 2;
4207+	data->clk = NULL;
4208+
4209+	desc->ops = &rockchip_dfi_ops;
4210+
4211+	return 0;
4212+}
4213+
4214+static __init int px30_dfi_init(struct platform_device *pdev,
4215+				  struct rockchip_dfi *data,
4216+				  struct devfreq_event_desc *desc)
4217 {
4218-	struct device *dev = &pdev->dev;
4219-	struct rockchip_dfi *data;
4220-	struct devfreq_event_desc *desc;
4221 	struct device_node *np = pdev->dev.of_node, *node;
4222+	struct resource *res;
4223+	u32 val_2, val_3;
4224 
4225-	data = devm_kzalloc(dev, sizeof(struct rockchip_dfi), GFP_KERNEL);
4226-	if (!data)
4227-		return -ENOMEM;
4228+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4229+	data->regs = devm_ioremap_resource(&pdev->dev, res);
4230+	if (IS_ERR(data->regs))
4231+		return PTR_ERR(data->regs);
4232+
4233+	node = of_parse_phandle(np, "rockchip,pmugrf", 0);
4234+	if (node) {
4235+		data->regmap_pmugrf = syscon_node_to_regmap(node);
4236+		if (IS_ERR(data->regmap_pmugrf))
4237+			return PTR_ERR(data->regmap_pmugrf);
4238+	}
4239+
4240+	regmap_read(data->regmap_pmugrf, PX30_PMUGRF_OS_REG2, &val_2);
4241+	regmap_read(data->regmap_pmugrf, PX30_PMUGRF_OS_REG3, &val_3);
4242+	if (READ_SYSREG_VERSION(val_3) >= 0x3)
4243+		data->dram_type = READ_DRAMTYPE_INFO_V3(val_2, val_3);
4244+	else
4245+		data->dram_type = READ_DRAMTYPE_INFO(val_2);
4246+	data->ch_msk = 1;
4247+	data->clk = NULL;
4248+
4249+	desc->ops = &rockchip_dfi_ops;
4250+
4251+	return 0;
4252+}
4253+
4254+static __init int rk3128_dfi_init(struct platform_device *pdev,
4255+				  struct rockchip_dfi *data,
4256+				  struct devfreq_event_desc *desc)
4257+{
4258+	struct device_node *np = pdev->dev.of_node, *node;
4259+
4260+	node = of_parse_phandle(np, "rockchip,grf", 0);
4261+	if (node) {
4262+		data->regmap_grf = syscon_node_to_regmap(node);
4263+		if (IS_ERR(data->regmap_grf))
4264+			return PTR_ERR(data->regmap_grf);
4265+	}
4266+
4267+	desc->ops = &rk3128_dfi_ops;
4268+
4269+	return 0;
4270+}
4271+
4272+static __init int rk3288_dfi_init(struct platform_device *pdev,
4273+				  struct rockchip_dfi *data,
4274+				  struct devfreq_event_desc *desc)
4275+{
4276+	struct device_node *np = pdev->dev.of_node, *node;
4277+	u32 val;
4278+
4279+	node = of_parse_phandle(np, "rockchip,pmu", 0);
4280+	if (node) {
4281+		data->regmap_pmu = syscon_node_to_regmap(node);
4282+		if (IS_ERR(data->regmap_pmu))
4283+			return PTR_ERR(data->regmap_pmu);
4284+	}
4285+
4286+	node = of_parse_phandle(np, "rockchip,grf", 0);
4287+	if (node) {
4288+		data->regmap_grf = syscon_node_to_regmap(node);
4289+		if (IS_ERR(data->regmap_grf))
4290+			return PTR_ERR(data->regmap_grf);
4291+	}
4292+
4293+	regmap_read(data->regmap_pmu, RK3288_PMU_SYS_REG2, &val);
4294+	data->dram_type = READ_DRAMTYPE_INFO(val);
4295+	data->ch_msk = READ_CH_INFO(val);
4296+
4297+	if (data->dram_type == DDR3)
4298+		regmap_write(data->regmap_grf, RK3288_GRF_SOC_CON4,
4299+			     RK3288_DDR3_SEL);
4300+	else
4301+		regmap_write(data->regmap_grf, RK3288_GRF_SOC_CON4,
4302+			     RK3288_LPDDR_SEL);
4303+
4304+	desc->ops = &rk3288_dfi_ops;
4305+
4306+	return 0;
4307+}
4308+
4309+static __init int rk3368_dfi_init(struct platform_device *pdev,
4310+				  struct rockchip_dfi *data,
4311+				  struct devfreq_event_desc *desc)
4312+{
4313+	struct device *dev = &pdev->dev;
4314+
4315+	if (!dev->parent || !dev->parent->of_node)
4316+		return -EINVAL;
4317+
4318+	data->regmap_grf = syscon_node_to_regmap(dev->parent->of_node);
4319+	if (IS_ERR(data->regmap_grf))
4320+		return PTR_ERR(data->regmap_grf);
4321+
4322+	desc->ops = &rk3368_dfi_ops;
4323+
4324+	return 0;
4325+}
4326+
4327+static __init int rockchip_dfi_init(struct platform_device *pdev,
4328+				    struct rockchip_dfi *data,
4329+				    struct devfreq_event_desc *desc)
4330+{
4331+	struct device *dev = &pdev->dev;
4332+	struct device_node *np = pdev->dev.of_node, *node;
4333+	u32 val;
4334 
4335 	data->regs = devm_platform_ioremap_resource(pdev, 0);
4336 	if (IS_ERR(data->regs))
4337@@ -202,23 +680,101 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
4338 		if (IS_ERR(data->regmap_pmu))
4339 			return PTR_ERR(data->regmap_pmu);
4340 	}
4341-	data->dev = dev;
4342+
4343+	regmap_read(data->regmap_pmu, PMUGRF_OS_REG2, &val);
4344+	data->dram_type = READ_DRAMTYPE_INFO(val);
4345+	data->ch_msk = READ_CH_INFO(val);
4346+
4347+	desc->ops = &rockchip_dfi_ops;
4348+
4349+	return 0;
4350+}
4351+
4352+static __init int rk3328_dfi_init(struct platform_device *pdev,
4353+				  struct rockchip_dfi *data,
4354+				  struct devfreq_event_desc *desc)
4355+{
4356+	struct device_node *np = pdev->dev.of_node, *node;
4357+	struct resource *res;
4358+	u32 val;
4359+
4360+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4361+	data->regs = devm_ioremap_resource(&pdev->dev, res);
4362+	if (IS_ERR(data->regs))
4363+		return PTR_ERR(data->regs);
4364+
4365+	node = of_parse_phandle(np, "rockchip,grf", 0);
4366+	if (node) {
4367+		data->regmap_grf = syscon_node_to_regmap(node);
4368+		if (IS_ERR(data->regmap_grf))
4369+			return PTR_ERR(data->regmap_grf);
4370+	}
4371+
4372+	regmap_read(data->regmap_grf, RK3328_GRF_OS_REG2, &val);
4373+	data->dram_type = READ_DRAMTYPE_INFO(val);
4374+	data->ch_msk = 1;
4375+	data->clk = NULL;
4376+
4377+	desc->ops = &rockchip_dfi_ops;
4378+
4379+	return 0;
4380+}
4381+
4382+static const struct of_device_id rockchip_dfi_id_match[] = {
4383+	{ .compatible = "rockchip,px30-dfi", .data = px30_dfi_init },
4384+	{ .compatible = "rockchip,rk1808-dfi", .data = px30_dfi_init },
4385+	{ .compatible = "rockchip,rk3128-dfi", .data = rk3128_dfi_init },
4386+	{ .compatible = "rockchip,rk3288-dfi", .data = rk3288_dfi_init },
4387+	{ .compatible = "rockchip,rk3328-dfi", .data = rk3328_dfi_init },
4388+	{ .compatible = "rockchip,rk3368-dfi", .data = rk3368_dfi_init },
4389+	{ .compatible = "rockchip,rk3399-dfi", .data = rockchip_dfi_init },
4390+	{ .compatible = "rockchip,rk3568-dfi", .data = px30_dfi_init },
4391+	{ .compatible = "rockchip,rk3588-dfi", .data = rk3588_dfi_init },
4392+	{ .compatible = "rockchip,rv1126-dfi", .data = px30_dfi_init },
4393+	{ },
4394+};
4395+
4396+static int rockchip_dfi_probe(struct platform_device *pdev)
4397+{
4398+	struct device *dev = &pdev->dev;
4399+	struct rockchip_dfi *data;
4400+	struct devfreq_event_desc *desc;
4401+	struct device_node *np = pdev->dev.of_node;
4402+	const struct of_device_id *match;
4403+	int (*init)(struct platform_device *pdev, struct rockchip_dfi *data,
4404+		    struct devfreq_event_desc *desc);
4405+
4406+	data = devm_kzalloc(dev, sizeof(struct rockchip_dfi), GFP_KERNEL);
4407+	if (!data)
4408+		return -ENOMEM;
4409 
4410 	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
4411 	if (!desc)
4412 		return -ENOMEM;
4413 
4414-	desc->ops = &rockchip_dfi_ops;
4415+	match = of_match_node(rockchip_dfi_id_match, pdev->dev.of_node);
4416+	if (match) {
4417+		init = match->data;
4418+		if (init) {
4419+			if (init(pdev, data, desc))
4420+				return -EINVAL;
4421+		} else {
4422+			return 0;
4423+		}
4424+	} else {
4425+		return 0;
4426+	}
4427+
4428 	desc->driver_data = data;
4429 	desc->name = np->name;
4430-	data->desc = desc;
4431 
4432-	data->edev = devm_devfreq_event_add_edev(&pdev->dev, desc);
4433+	data->edev = devm_devfreq_event_add_edev(dev, desc);
4434 	if (IS_ERR(data->edev)) {
4435-		dev_err(&pdev->dev,
4436-			"failed to add devfreq-event device\n");
4437+		dev_err(dev, "failed to add devfreq-event device\n");
4438 		return PTR_ERR(data->edev);
4439 	}
4440+	data->desc = desc;
4441+	data->dev = &pdev->dev;
4442 
4443 	platform_set_drvdata(pdev, data);
4444 
4445diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
4446index 594b77d89..2d55b177a 100644
4447--- a/drivers/dma-buf/Kconfig
4448+++ b/drivers/dma-buf/Kconfig
4449@@ -18,10 +18,9 @@ config SYNC_FILE
4450 	  Documentation/driver-api/sync_file.rst.
4451 
4452 config SW_SYNC
4453-	bool "Sync File Validation Framework"
4454+	tristate "Sync File Validation Framework"
4455 	default n
4456 	depends on SYNC_FILE
4457-	depends on DEBUG_FS
4458 	help
4459 	  A sync object driver that uses a 32bit counter to coordinate
4460 	  synchronization.  Useful when there is no hardware primitive backing
4461@@ -42,7 +41,6 @@ config UDMABUF
4462 config DMABUF_MOVE_NOTIFY
4463 	bool "Move notify between drivers (EXPERIMENTAL)"
4464 	default n
4465-	depends on DMA_SHARED_BUFFER
4466 	help
4467 	  Don't pin buffers if the dynamic DMA-buf interface is available on
4468 	  both the exporter as well as the importer. This fixes a security
4469@@ -56,19 +54,6 @@ config DMABUF_SELFTESTS
4470 	default n
4471 	depends on DMA_SHARED_BUFFER
4472 
4473-config DMABUF_PROCESS_INFO
4474-	bool "Show dmabuf usage of all processes"
4475-	default n
4476-	depends on DMA_SHARED_BUFFER
4477-	depends on PROC_FS || DEBUG_FS
4478-	help
4479-	  Choose this option to show dmabuf objects usage of all processes.
4480-	  Firstly, with this option, when a process creates a dmabuf object,
4481-	  its pid and task_comm will be recorded in the dmabuf.
4482-	  Secondly, this option creates dma_buf/process_bufinfo file in
4483-	  debugfs (if DEBUG_FS enabled) and process_dmabuf_info file in procfs
4484-	  (if PROC_FS enabled) to show dmabuf objects usage of all processes.
4485-
4486 menuconfig DMABUF_HEAPS
4487 	bool "DMA-BUF Userland Memory Heaps"
4488 	select DMA_SHARED_BUFFER
4489@@ -80,7 +65,7 @@ menuconfig DMABUF_HEAPS
4490 
4491 menuconfig DMABUF_SYSFS_STATS
4492 	bool "DMA-BUF sysfs statistics"
4493-	depends on DMA_SHARED_BUFFER
4494+	select DMA_SHARED_BUFFER
4495 	help
4496 	   Choose this option to enable DMA-BUF sysfs statistics
4497 	   in location /sys/kernel/dmabuf/buffers.
4498diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
4499index cfbc5e3da..32757328b 100644
4500--- a/drivers/dma-buf/Makefile
4501+++ b/drivers/dma-buf/Makefile
4502@@ -4,7 +4,8 @@ obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
4503 obj-$(CONFIG_DMABUF_HEAPS)	+= dma-heap.o
4504 obj-$(CONFIG_DMABUF_HEAPS)	+= heaps/
4505 obj-$(CONFIG_SYNC_FILE)		+= sync_file.o
4506-obj-$(CONFIG_SW_SYNC)		+= sw_sync.o sync_debug.o
4507+obj-$(CONFIG_SW_SYNC)		+= sw_sync.o
4508+obj-$(CONFIG_SW_SYNC_DEBUG)	+= sync_debug.o
4509 obj-$(CONFIG_UDMABUF)		+= udmabuf.o
4510 obj-$(CONFIG_DMABUF_SYSFS_STATS) += dma-buf-sysfs-stats.o
4511 
4512@@ -14,5 +15,3 @@ dmabuf_selftests-y := \
4513 	st-dma-fence-chain.o
4514 
4515 obj-$(CONFIG_DMABUF_SELFTESTS)	+= dmabuf_selftests.o
4516-
4517-obj-$(CONFIG_DMABUF_PROCESS_INFO)	+= dma-buf-process-info.o
4518diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.c b/drivers/dma-buf/dma-buf-sysfs-stats.c
4519index 053baadca..2389a363b 100644
4520--- a/drivers/dma-buf/dma-buf-sysfs-stats.c
4521+++ b/drivers/dma-buf/dma-buf-sysfs-stats.c
4522@@ -16,40 +16,6 @@
4523 
4524 #define to_dma_buf_entry_from_kobj(x) container_of(x, struct dma_buf_sysfs_entry, kobj)
4525 
4526-/**
4527- * DOC: overview
4528- *
4529- * ``/sys/kernel/debug/dma_buf/bufinfo`` provides an overview of every DMA-BUF
4530- * in the system. However, since debugfs is not safe to be mounted in
4531- * production, procfs and sysfs can be used to gather DMA-BUF statistics on
4532- * production systems.
4533- *
4534- * The ``/proc/<pid>/fdinfo/<fd>`` files in procfs can be used to gather
4535- * information about DMA-BUF fds. Detailed documentation about the interface
4536- * is present in Documentation/filesystems/proc.rst.
4537- *
4538- * Unfortunately, the existing procfs interfaces can only provide information
4539- * about the DMA-BUFs for which processes hold fds or have the buffers mmapped
4540- * into their address space. This necessitated the creation of the DMA-BUF sysfs
4541- * statistics interface to provide per-buffer information on production systems.
4542- *
4543- * The interface at ``/sys/kernel/dma-buf/buffers`` exposes information about
4544- * every DMA-BUF when ``CONFIG_DMABUF_SYSFS_STATS`` is enabled.
4545- *
4546- * The following stats are exposed by the interface:
4547- *
4548- * * ``/sys/kernel/dmabuf/buffers/<inode_number>/exporter_name``
4549- * * ``/sys/kernel/dmabuf/buffers/<inode_number>/size``
4550- *
4551- * The information in the interface can also be used to derive per-exporter
4552- * statistics. The data from the interface can be gathered on error conditions
4553- * or other important events to provide a snapshot of DMA-BUF usage.
4554- * It can also be collected periodically by telemetry to monitor various metrics.
4555- *
4556- * Detailed documentation about the interface is present in
4557- * Documentation/ABI/testing/sysfs-kernel-dmabuf-buffers.
4558- */
4559-
4560 struct dma_buf_stats_attribute {
4561 	struct attribute attr;
4562 	ssize_t (*show)(struct dma_buf *dmabuf,
4563@@ -130,8 +96,9 @@ void dma_buf_stats_teardown(struct dma_buf *dmabuf)
4564 	kobject_put(&sysfs_entry->kobj);
4565 }
4566 
4567-
4568-/* Statistics files do not need to send uevents. */
4569+/*
4570+ * Statistics files do not need to send uevents.
4571+ */
4572 static int dmabuf_sysfs_uevent_filter(struct kset *kset, struct kobject *kobj)
4573 {
4574 	return 0;
4575diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
4576index 89c10136b..068e0aeeb 100644
4577--- a/drivers/dma-buf/dma-buf.c
4578+++ b/drivers/dma-buf/dma-buf.c
4579@@ -30,9 +30,6 @@
4580 #include <uapi/linux/magic.h>
4581 
4582 #include "dma-buf-sysfs-stats.h"
4583-#include "dma-buf-process-info.h"
4584-
4585-static inline int is_dma_buf_file(struct file *);
4586 
4587 struct dma_buf_list {
4588 	struct list_head head;
4589@@ -41,6 +38,30 @@ struct dma_buf_list {
4590 
4591 static struct dma_buf_list db_list;
4592 
4593+/*
4594+ * This function helps in traversing the db_list and calls the
4595+ * callback function which can extract required info out of each
4596+ * dmabuf.
4597+ */
4598+int get_each_dmabuf(int (*callback)(const struct dma_buf *dmabuf,
4599+		    void *private), void *private)
4600+{
4601+	struct dma_buf *buf;
4602+	int ret = mutex_lock_interruptible(&db_list.lock);
4603+
4604+	if (ret)
4605+		return ret;
4606+
4607+	list_for_each_entry(buf, &db_list.head, list_node) {
4608+		ret = callback(buf, private);
4609+		if (ret)
4610+			break;
4611+	}
4612+	mutex_unlock(&db_list.lock);
4613+	return ret;
4614+}
4615+EXPORT_SYMBOL_GPL(get_each_dmabuf);
4616+
4617 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
4618 {
4619 	struct dma_buf *dmabuf;
4620@@ -60,6 +81,9 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
4621 static void dma_buf_release(struct dentry *dentry)
4622 {
4623 	struct dma_buf *dmabuf;
4624+#ifdef CONFIG_NO_GKI
4625+	int dtor_ret = 0;
4626+#endif
4627 
4628 	dmabuf = dentry->d_fsdata;
4629 	if (unlikely(!dmabuf))
4630@@ -77,13 +101,19 @@ static void dma_buf_release(struct dentry *dentry)
4631 	 */
4632 	BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
4633 
4634-	dmabuf->ops->release(dmabuf);
4635+	dma_buf_stats_teardown(dmabuf);
4636+#ifdef CONFIG_NO_GKI
4637+	if (dmabuf->dtor)
4638+		dtor_ret = dmabuf->dtor(dmabuf, dmabuf->dtor_data);
4639+
4640+	if (!dtor_ret)
4641+#endif
4642+		dmabuf->ops->release(dmabuf);
4643 
4644 	if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
4645 		dma_resv_fini(dmabuf->resv);
4646 
4647-	WARN_ON(!list_empty(&dmabuf->attachments));
4648-	dma_buf_stats_teardown(dmabuf);
4649+    WARN_ON(!list_empty(&dmabuf->attachments));
4650 	module_put(dmabuf->owner);
4651 	kfree(dmabuf->name);
4652 	kfree(dmabuf);
4653@@ -328,6 +358,25 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
4654 	return events;
4655 }
4656 
4657+static long _dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
4658+{
4659+	long ret = 0;
4660+
4661+	dma_resv_lock(dmabuf->resv, NULL);
4662+	if (!list_empty(&dmabuf->attachments)) {
4663+		ret = -EBUSY;
4664+		goto out_unlock;
4665+	}
4666+	spin_lock(&dmabuf->name_lock);
4667+	kfree(dmabuf->name);
4668+	dmabuf->name = name;
4669+	spin_unlock(&dmabuf->name_lock);
4670+
4671+out_unlock:
4672+	dma_resv_unlock(dmabuf->resv);
4673+	return ret;
4674+}
4675+
4676 /**
4677  * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
4678  * The name of the dma-buf buffer can only be set when the dma-buf is not
4679@@ -343,7 +392,23 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
4680  * devices, return -EBUSY.
4681  *
4682  */
4683-static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
4684+long dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
4685+{
4686+	long ret = 0;
4687+	char *buf = kstrndup(name, DMA_BUF_NAME_LEN, GFP_KERNEL);
4688+
4689+	if (!buf)
4690+		return -ENOMEM;
4691+
4692+	ret = _dma_buf_set_name(dmabuf, buf);
4693+	if (ret)
4694+		kfree(buf);
4695+
4696+	return ret;
4697+}
4698+EXPORT_SYMBOL_GPL(dma_buf_set_name);
4699+
4700+static long dma_buf_set_name_user(struct dma_buf *dmabuf, const char __user *buf)
4701 {
4702 	char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
4703 	long ret = 0;
4704@@ -351,19 +416,10 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
4705 	if (IS_ERR(name))
4706 		return PTR_ERR(name);
4707 
4708-	dma_resv_lock(dmabuf->resv, NULL);
4709-	if (!list_empty(&dmabuf->attachments)) {
4710-		ret = -EBUSY;
4711+	ret = _dma_buf_set_name(dmabuf, name);
4712+	if (ret)
4713 		kfree(name);
4714-		goto out_unlock;
4715-	}
4716-	spin_lock(&dmabuf->name_lock);
4717-	kfree(dmabuf->name);
4718-	dmabuf->name = name;
4719-	spin_unlock(&dmabuf->name_lock);
4720 
4721-out_unlock:
4722-	dma_resv_unlock(dmabuf->resv);
4723 	return ret;
4724 }
4725 
4726@@ -372,6 +428,7 @@ static long dma_buf_ioctl(struct file *file,
4727 {
4728 	struct dma_buf *dmabuf;
4729 	struct dma_buf_sync sync;
4730+	struct dma_buf_sync_partial sync_p;
4731 	enum dma_data_direction direction;
4732 	int ret;
4733 
4734@@ -408,7 +465,45 @@ static long dma_buf_ioctl(struct file *file,
4735 
4736 	case DMA_BUF_SET_NAME_A:
4737 	case DMA_BUF_SET_NAME_B:
4738-		return dma_buf_set_name(dmabuf, (const char __user *)arg);
4739+		return dma_buf_set_name_user(dmabuf, (const char __user *)arg);
4740+
4741+	case DMA_BUF_IOCTL_SYNC_PARTIAL:
4742+		if (copy_from_user(&sync_p, (void __user *) arg, sizeof(sync_p)))
4743+			return -EFAULT;
4744+
4745+		if (sync_p.len == 0)
4746+			return 0;
4747+
4748+		if (sync_p.len > dmabuf->size || sync_p.offset > dmabuf->size - sync_p.len)
4749+			return -EINVAL;
4750+
4751+		if (sync_p.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
4752+			return -EINVAL;
4753+
4754+		switch (sync_p.flags & DMA_BUF_SYNC_RW) {
4755+		case DMA_BUF_SYNC_READ:
4756+			direction = DMA_FROM_DEVICE;
4757+			break;
4758+		case DMA_BUF_SYNC_WRITE:
4759+			direction = DMA_TO_DEVICE;
4760+			break;
4761+		case DMA_BUF_SYNC_RW:
4762+			direction = DMA_BIDIRECTIONAL;
4763+			break;
4764+		default:
4765+			return -EINVAL;
4766+		}
4767+
4768+		if (sync_p.flags & DMA_BUF_SYNC_END)
4769+			ret = dma_buf_end_cpu_access_partial(dmabuf, direction,
4770+							     sync_p.offset,
4771+							     sync_p.len);
4772+		else
4773+			ret = dma_buf_begin_cpu_access_partial(dmabuf, direction,
4774+							       sync_p.offset,
4775+							       sync_p.len);
4776+
4777+		return ret;
4778 
4779 	default:
4780 		return -ENOTTY;
4781@@ -442,10 +537,11 @@ static const struct file_operations dma_buf_fops = {
4782 /*
4783  * is_dma_buf_file - Check if struct file* is associated with dma_buf
4784  */
4785-static inline int is_dma_buf_file(struct file *file)
4786+int is_dma_buf_file(struct file *file)
4787 {
4788 	return file->f_op == &dma_buf_fops;
4789 }
4790+EXPORT_SYMBOL_GPL(is_dma_buf_file);
4791 
4792 static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
4793 {
4794@@ -595,7 +691,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
4795 	list_add(&dmabuf->list_node, &db_list.head);
4796 	mutex_unlock(&db_list.lock);
4797 
4798-	init_dma_buf_task_info(dmabuf);
4799 	return dmabuf;
4800 
4801 err_sysfs:
4802@@ -1132,6 +1227,30 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
4803 }
4804 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
4805 
4806+int dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
4807+				     enum dma_data_direction direction,
4808+				     unsigned int offset, unsigned int len)
4809+{
4810+	int ret = 0;
4811+
4812+	if (WARN_ON(!dmabuf))
4813+		return -EINVAL;
4814+
4815+	if (dmabuf->ops->begin_cpu_access_partial)
4816+		ret = dmabuf->ops->begin_cpu_access_partial(dmabuf, direction,
4817+							    offset, len);
4818+
4819+	/* Ensure that all fences are waited upon - but we first allow
4820+	 * the native handler the chance to do so more efficiently if it
4821+	 * chooses. A double invocation here will be reasonably cheap no-op.
4822+	 */
4823+	if (ret == 0)
4824+		ret = __dma_buf_begin_cpu_access(dmabuf, direction);
4825+
4826+	return ret;
4827+}
4828+EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access_partial);
4829+
4830 /**
4831  * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
4832  * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
4833@@ -1158,6 +1277,21 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
4834 }
4835 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
4836 
4837+int dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
4838+				   enum dma_data_direction direction,
4839+				   unsigned int offset, unsigned int len)
4840+{
4841+	int ret = 0;
4842+
4843+	WARN_ON(!dmabuf);
4844+
4845+	if (dmabuf->ops->end_cpu_access_partial)
4846+		ret = dmabuf->ops->end_cpu_access_partial(dmabuf, direction,
4847+							  offset, len);
4848+
4849+	return ret;
4850+}
4851+EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access_partial);
4852 
4853 /**
4854  * dma_buf_mmap - Setup up a userspace mmap with the given vma
4855@@ -1286,6 +1420,32 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
4856 }
4857 EXPORT_SYMBOL_GPL(dma_buf_vunmap);
4858 
4859+int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags)
4860+{
4861+	int ret = 0;
4862+
4863+	if (WARN_ON(!dmabuf) || !flags)
4864+		return -EINVAL;
4865+
4866+	if (dmabuf->ops->get_flags)
4867+		ret = dmabuf->ops->get_flags(dmabuf, flags);
4868+
4869+	return ret;
4870+}
4871+EXPORT_SYMBOL_GPL(dma_buf_get_flags);
4872+
4873+int dma_buf_get_uuid(struct dma_buf *dmabuf, uuid_t *uuid)
4874+{
4875+	if (WARN_ON(!dmabuf) || !uuid)
4876+		return -EINVAL;
4877+
4878+	if (!dmabuf->ops->get_uuid)
4879+		return -ENODEV;
4880+
4881+	return dmabuf->ops->get_uuid(dmabuf, uuid);
4882+}
4883+EXPORT_SYMBOL_GPL(dma_buf_get_uuid);
4884+
4885 #ifdef CONFIG_DEBUG_FS
4886 static int dma_buf_debug_show(struct seq_file *s, void *unused)
4887 {
4888@@ -1305,10 +1465,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
4889 		return ret;
4890 
4891 	seq_puts(s, "\nDma-buf Objects:\n");
4892-	seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\t"
4893-		   "%-16s\t%-16s\t%-16s\n",
4894-		   "size", "flags", "mode", "count", "ino",
4895-		   "buf_name", "exp_pid",  "exp_task_comm");
4896+	seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
4897+		   "size", "flags", "mode", "count", "ino");
4898 
4899 	list_for_each_entry(buf_obj, &db_list.head, list_node) {
4900 
4901@@ -1316,16 +1474,15 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
4902 		if (ret)
4903 			goto error_unlock;
4904 
4905-		seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\t"
4906-			   "%-16d\t%-16s\n",
4907+		spin_lock(&buf_obj->name_lock);
4908+		seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
4909 				buf_obj->size,
4910 				buf_obj->file->f_flags, buf_obj->file->f_mode,
4911 				file_count(buf_obj->file),
4912 				buf_obj->exp_name,
4913 				file_inode(buf_obj->file)->i_ino,
4914-				buf_obj->name ?: "NULL",
4915-				dma_buf_exp_pid(buf_obj),
4916-				dma_buf_exp_task_comm(buf_obj) ?: "NULL");
4917+				buf_obj->name ?: "");
4918+		spin_unlock(&buf_obj->name_lock);
4919 
4920 		robj = buf_obj->resv;
4921 		while (true) {
4922@@ -1406,7 +1563,6 @@ static int dma_buf_init_debugfs(void)
4923 		err = PTR_ERR(d);
4924 	}
4925 
4926-	dma_buf_process_info_init_debugfs(dma_buf_debugfs_dir);
4927 	return err;
4928 }
4929 
4930@@ -1424,19 +1580,6 @@ static inline void dma_buf_uninit_debugfs(void)
4931 }
4932 #endif
4933 
4934-#ifdef CONFIG_DMABUF_PROCESS_INFO
4935-struct dma_buf *get_dma_buf_from_file(struct file *f)
4936-{
4937-	if (IS_ERR_OR_NULL(f))
4938-		return NULL;
4939-
4940-	if (!is_dma_buf_file(f))
4941-		return NULL;
4942-
4943-	return f->private_data;
4944-}
4945-#endif /* CONFIG_DMABUF_PROCESS_INFO */
4946-
4947 static int __init dma_buf_init(void)
4948 {
4949 	int ret;
4950@@ -1452,7 +1595,6 @@ static int __init dma_buf_init(void)
4951 	mutex_init(&db_list.lock);
4952 	INIT_LIST_HEAD(&db_list.head);
4953 	dma_buf_init_debugfs();
4954-	dma_buf_process_info_init_procfs();
4955 	return 0;
4956 }
4957 subsys_initcall(dma_buf_init);
4958@@ -1462,6 +1604,5 @@ static void __exit dma_buf_deinit(void)
4959 	dma_buf_uninit_debugfs();
4960 	kern_unmount(dma_buf_mnt);
4961 	dma_buf_uninit_sysfs_statistics();
4962-	dma_buf_process_info_uninit_procfs();
4963 }
4964 __exitcall(dma_buf_deinit);
4965diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
4966index 7475e09b0..d64fc0392 100644
4967--- a/drivers/dma-buf/dma-fence.c
4968+++ b/drivers/dma-buf/dma-fence.c
4969@@ -312,22 +312,25 @@ void __dma_fence_might_wait(void)
4970 
4971 
4972 /**
4973- * dma_fence_signal_locked - signal completion of a fence
4974+ * dma_fence_signal_timestamp_locked - signal completion of a fence
4975  * @fence: the fence to signal
4976+ * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
4977  *
4978  * Signal completion for software callbacks on a fence, this will unblock
4979  * dma_fence_wait() calls and run all the callbacks added with
4980  * dma_fence_add_callback(). Can be called multiple times, but since a fence
4981  * can only go from the unsignaled to the signaled state and not back, it will
4982- * only be effective the first time.
4983+ * only be effective the first time. Set the timestamp provided as the fence
4984+ * signal timestamp.
4985  *
4986- * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock
4987- * held.
4988+ * Unlike dma_fence_signal_timestamp(), this function must be called with
4989+ * &dma_fence.lock held.
4990  *
4991  * Returns 0 on success and a negative error value when @fence has been
4992  * signalled already.
4993  */
4994-int dma_fence_signal_locked(struct dma_fence *fence)
4995+int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
4996+				      ktime_t timestamp)
4997 {
4998 	struct dma_fence_cb *cur, *tmp;
4999 	struct list_head cb_list;
5000@@ -341,7 +344,7 @@ int dma_fence_signal_locked(struct dma_fence *fence)
5001 	/* Stash the cb_list before replacing it with the timestamp */
5002 	list_replace(&fence->cb_list, &cb_list);
5003 
5004-	fence->timestamp = ktime_get();
5005+	fence->timestamp = timestamp;
5006 	set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
5007 	trace_dma_fence_signaled(fence);
5008 
5009@@ -352,6 +355,59 @@ int dma_fence_signal_locked(struct dma_fence *fence)
5010 
5011 	return 0;
5012 }
5013+EXPORT_SYMBOL(dma_fence_signal_timestamp_locked);
5014+
5015+/**
5016+ * dma_fence_signal_timestamp - signal completion of a fence
5017+ * @fence: the fence to signal
5018+ * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
5019+ *
5020+ * Signal completion for software callbacks on a fence, this will unblock
5021+ * dma_fence_wait() calls and run all the callbacks added with
5022+ * dma_fence_add_callback(). Can be called multiple times, but since a fence
5023+ * can only go from the unsignaled to the signaled state and not back, it will
5024+ * only be effective the first time. Set the timestamp provided as the fence
5025+ * signal timestamp.
5026+ *
5027+ * Returns 0 on success and a negative error value when @fence has been
5028+ * signalled already.
5029+ */
5030+int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
5031+{
5032+	unsigned long flags;
5033+	int ret;
5034+
5035+	if (!fence)
5036+		return -EINVAL;
5037+
5038+	spin_lock_irqsave(fence->lock, flags);
5039+	ret = dma_fence_signal_timestamp_locked(fence, timestamp);
5040+	spin_unlock_irqrestore(fence->lock, flags);
5041+
5042+	return ret;
5043+}
5044+EXPORT_SYMBOL(dma_fence_signal_timestamp);
5045+
5046+/**
5047+ * dma_fence_signal_locked - signal completion of a fence
5048+ * @fence: the fence to signal
5049+ *
5050+ * Signal completion for software callbacks on a fence, this will unblock
5051+ * dma_fence_wait() calls and run all the callbacks added with
5052+ * dma_fence_add_callback(). Can be called multiple times, but since a fence
5053+ * can only go from the unsignaled to the signaled state and not back, it will
5054+ * only be effective the first time.
5055+ *
5056+ * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock
5057+ * held.
5058+ *
5059+ * Returns 0 on success and a negative error value when @fence has been
5060+ * signalled already.
5061+ */
5062+int dma_fence_signal_locked(struct dma_fence *fence)
5063+{
5064+	return dma_fence_signal_timestamp_locked(fence, ktime_get());
5065+}
5066 EXPORT_SYMBOL(dma_fence_signal_locked);
5067 
5068 /**
5069@@ -379,7 +435,7 @@ int dma_fence_signal(struct dma_fence *fence)
5070 	tmp = dma_fence_begin_signalling();
5071 
5072 	spin_lock_irqsave(fence->lock, flags);
5073-	ret = dma_fence_signal_locked(fence);
5074+	ret = dma_fence_signal_timestamp_locked(fence, ktime_get());
5075 	spin_unlock_irqrestore(fence->lock, flags);
5076 
5077 	dma_fence_end_signalling(tmp);
5078diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
5079index bbbfa28b2..4fb22001b 100644
5080--- a/drivers/dma-buf/dma-heap.c
5081+++ b/drivers/dma-buf/dma-heap.c
5082@@ -14,7 +14,6 @@
5083 #include <linux/xarray.h>
5084 #include <linux/list.h>
5085 #include <linux/slab.h>
5086-#include <linux/nospec.h>
5087 #include <linux/uaccess.h>
5088 #include <linux/syscalls.h>
5089 #include <linux/dma-heap.h>
5090@@ -31,6 +30,7 @@
5091  * @heap_devt		heap device node
5092  * @list		list head connecting to list of heaps
5093  * @heap_cdev		heap char device
5094+ * @heap_dev		heap device struct
5095  *
5096  * Represents a heap of memory from which buffers can be made.
5097  */
5098@@ -41,6 +41,8 @@ struct dma_heap {
5099 	dev_t heap_devt;
5100 	struct list_head list;
5101 	struct cdev heap_cdev;
5102+	struct kref refcount;
5103+	struct device *heap_dev;
5104 };
5105 
5106 static LIST_HEAD(heap_list);
5107@@ -49,20 +51,72 @@ static dev_t dma_heap_devt;
5108 static struct class *dma_heap_class;
5109 static DEFINE_XARRAY_ALLOC(dma_heap_minors);
5110 
5111-static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
5112-				 unsigned int fd_flags,
5113-				 unsigned int heap_flags)
5114+struct dma_heap *dma_heap_find(const char *name)
5115 {
5116+	struct dma_heap *h;
5117+
5118+	mutex_lock(&heap_list_lock);
5119+	list_for_each_entry(h, &heap_list, list) {
5120+		if (!strcmp(h->name, name)) {
5121+			kref_get(&h->refcount);
5122+			mutex_unlock(&heap_list_lock);
5123+			return h;
5124+		}
5125+	}
5126+	mutex_unlock(&heap_list_lock);
5127+	return NULL;
5128+}
5129+EXPORT_SYMBOL_GPL(dma_heap_find);
5130+
5131+
5132+void dma_heap_buffer_free(struct dma_buf *dmabuf)
5133+{
5134+	dma_buf_put(dmabuf);
5135+}
5136+EXPORT_SYMBOL_GPL(dma_heap_buffer_free);
5137+
5138+struct dma_buf *dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
5139+				      unsigned int fd_flags,
5140+				      unsigned int heap_flags)
5141+{
5142+	if (fd_flags & ~DMA_HEAP_VALID_FD_FLAGS)
5143+		return ERR_PTR(-EINVAL);
5144+
5145+	if (heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS)
5146+		return ERR_PTR(-EINVAL);
5147 	/*
5148 	 * Allocations from all heaps have to begin
5149 	 * and end on page boundaries.
5150 	 */
5151 	len = PAGE_ALIGN(len);
5152 	if (!len)
5153-		return -EINVAL;
5154+		return ERR_PTR(-EINVAL);
5155 
5156 	return heap->ops->allocate(heap, len, fd_flags, heap_flags);
5157 }
5158+EXPORT_SYMBOL_GPL(dma_heap_buffer_alloc);
5159+
5160+int dma_heap_bufferfd_alloc(struct dma_heap *heap, size_t len,
5161+			    unsigned int fd_flags,
5162+			    unsigned int heap_flags)
5163+{
5164+	struct dma_buf *dmabuf;
5165+	int fd;
5166+
5167+	dmabuf = dma_heap_buffer_alloc(heap, len, fd_flags, heap_flags);
5168+
5169+	if (IS_ERR(dmabuf))
5170+		return PTR_ERR(dmabuf);
5171+
5172+	fd = dma_buf_fd(dmabuf, fd_flags);
5173+	if (fd < 0) {
5174+		dma_buf_put(dmabuf);
5175+		/* just return, as put will call release and that will free */
5176+	}
5177+	return fd;
5178+
5179+}
5180+EXPORT_SYMBOL_GPL(dma_heap_bufferfd_alloc);
5181 
5182 static int dma_heap_open(struct inode *inode, struct file *file)
5183 {
5184@@ -90,15 +144,9 @@ static long dma_heap_ioctl_allocate(struct file *file, void *data)
5185 	if (heap_allocation->fd)
5186 		return -EINVAL;
5187 
5188-	if (heap_allocation->fd_flags & ~DMA_HEAP_VALID_FD_FLAGS)
5189-		return -EINVAL;
5190-
5191-	if (heap_allocation->heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS)
5192-		return -EINVAL;
5193-
5194-	fd = dma_heap_buffer_alloc(heap, heap_allocation->len,
5195-				   heap_allocation->fd_flags,
5196-				   heap_allocation->heap_flags);
5197+	fd = dma_heap_bufferfd_alloc(heap, heap_allocation->len,
5198+				     heap_allocation->fd_flags,
5199+				     heap_allocation->heap_flags);
5200 	if (fd < 0)
5201 		return fd;
5202 
5203@@ -124,7 +172,6 @@ static long dma_heap_ioctl(struct file *file, unsigned int ucmd,
5204 	if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds))
5205 		return -EINVAL;
5206 
5207-	nr = array_index_nospec(nr, ARRAY_SIZE(dma_heap_ioctl_cmds));
5208 	/* Get the kernel ioctl cmd that matches */
5209 	kcmd = dma_heap_ioctl_cmds[nr];
5210 
5211@@ -191,6 +238,47 @@ void *dma_heap_get_drvdata(struct dma_heap *heap)
5212 {
5213 	return heap->priv;
5214 }
5215+EXPORT_SYMBOL_GPL(dma_heap_get_drvdata);
5216+
5217+static void dma_heap_release(struct kref *ref)
5218+{
5219+	struct dma_heap *heap = container_of(ref, struct dma_heap, refcount);
5220+	int minor = MINOR(heap->heap_devt);
5221+
5222+	/* Note, we already holding the heap_list_lock here */
5223+	list_del(&heap->list);
5224+
5225+	device_destroy(dma_heap_class, heap->heap_devt);
5226+	cdev_del(&heap->heap_cdev);
5227+	xa_erase(&dma_heap_minors, minor);
5228+
5229+	kfree(heap);
5230+}
5231+
5232+void dma_heap_put(struct dma_heap *h)
5233+{
5234+	/*
5235+	 * Take the heap_list_lock now to avoid racing with code
5236+	 * scanning the list and then taking a kref.
5237+	 */
5238+	mutex_lock(&heap_list_lock);
5239+	kref_put(&h->refcount, dma_heap_release);
5240+	mutex_unlock(&heap_list_lock);
5241+}
5242+EXPORT_SYMBOL_GPL(dma_heap_put);
5243+
5244+/**
5245+ * dma_heap_get_dev() - get device struct for the heap
5246+ * @heap: DMA-Heap to retrieve device struct from
5247+ *
5248+ * Returns:
5249+ * The device struct for the heap.
5250+ */
5251+struct device *dma_heap_get_dev(struct dma_heap *heap)
5252+{
5253+	return heap->heap_dev;
5254+}
5255+EXPORT_SYMBOL_GPL(dma_heap_get_dev);
5256 
5257 /**
5258  * dma_heap_get_name() - get heap name
5259@@ -203,11 +291,11 @@ const char *dma_heap_get_name(struct dma_heap *heap)
5260 {
5261 	return heap->name;
5262 }
5263+EXPORT_SYMBOL_GPL(dma_heap_get_name);
5264 
5265 struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
5266 {
5267-	struct dma_heap *heap, *h, *err_ret;
5268-	struct device *dev_ret;
5269+	struct dma_heap *heap, *err_ret;
5270 	unsigned int minor;
5271 	int ret;
5272 
5273@@ -221,10 +309,20 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
5274 		return ERR_PTR(-EINVAL);
5275 	}
5276 
5277+	/* check the name is unique */
5278+	heap = dma_heap_find(exp_info->name);
5279+	if (heap) {
5280+		pr_err("dma_heap: Already registered heap named %s\n",
5281+		       exp_info->name);
5282+		dma_heap_put(heap);
5283+		return ERR_PTR(-EINVAL);
5284+	}
5285+
5286 	heap = kzalloc(sizeof(*heap), GFP_KERNEL);
5287 	if (!heap)
5288 		return ERR_PTR(-ENOMEM);
5289 
5290+	kref_init(&heap->refcount);
5291 	heap->name = exp_info->name;
5292 	heap->ops = exp_info->ops;
5293 	heap->priv = exp_info->priv;
5294@@ -249,37 +347,27 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
5295 		goto err1;
5296 	}
5297 
5298-	dev_ret = device_create(dma_heap_class,
5299-				NULL,
5300-				heap->heap_devt,
5301-				NULL,
5302-				heap->name);
5303-	if (IS_ERR(dev_ret)) {
5304+	heap->heap_dev = device_create(dma_heap_class,
5305+				       NULL,
5306+				       heap->heap_devt,
5307+				       NULL,
5308+				       heap->name);
5309+	if (IS_ERR(heap->heap_dev)) {
5310 		pr_err("dma_heap: Unable to create device\n");
5311-		err_ret = ERR_CAST(dev_ret);
5312+		err_ret = ERR_CAST(heap->heap_dev);
5313 		goto err2;
5314 	}
5315 
5316-	mutex_lock(&heap_list_lock);
5317-	/* check the name is unique */
5318-	list_for_each_entry(h, &heap_list, list) {
5319-		if (!strcmp(h->name, exp_info->name)) {
5320-			mutex_unlock(&heap_list_lock);
5321-			pr_err("dma_heap: Already registered heap named %s\n",
5322-			       exp_info->name);
5323-			err_ret = ERR_PTR(-EINVAL);
5324-			goto err3;
5325-		}
5326-	}
5327+	/* Make sure it doesn't disappear on us */
5328+	heap->heap_dev = get_device(heap->heap_dev);
5329 
5330 	/* Add heap to the list */
5331+	mutex_lock(&heap_list_lock);
5332 	list_add(&heap->list, &heap_list);
5333 	mutex_unlock(&heap_list_lock);
5334 
5335 	return heap;
5336 
5337-err3:
5338-	device_destroy(dma_heap_class, heap->heap_devt);
5339 err2:
5340 	cdev_del(&heap->heap_cdev);
5341 err1:
5342@@ -288,27 +376,88 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
5343 	kfree(heap);
5344 	return err_ret;
5345 }
5346+EXPORT_SYMBOL_GPL(dma_heap_add);
5347 
5348 static char *dma_heap_devnode(struct device *dev, umode_t *mode)
5349 {
5350 	return kasprintf(GFP_KERNEL, "dma_heap/%s", dev_name(dev));
5351 }
5352 
5353+static ssize_t total_pools_kb_show(struct kobject *kobj,
5354+				   struct kobj_attribute *attr, char *buf)
5355+{
5356+	struct dma_heap *heap;
5357+	u64 total_pool_size = 0;
5358+
5359+	mutex_lock(&heap_list_lock);
5360+	list_for_each_entry(heap, &heap_list, list) {
5361+		if (heap->ops->get_pool_size)
5362+			total_pool_size += heap->ops->get_pool_size(heap);
5363+	}
5364+	mutex_unlock(&heap_list_lock);
5365+
5366+	return sysfs_emit(buf, "%llu\n", total_pool_size / 1024);
5367+}
5368+
5369+static struct kobj_attribute total_pools_kb_attr =
5370+	__ATTR_RO(total_pools_kb);
5371+
5372+static struct attribute *dma_heap_sysfs_attrs[] = {
5373+	&total_pools_kb_attr.attr,
5374+	NULL,
5375+};
5376+
5377+ATTRIBUTE_GROUPS(dma_heap_sysfs);
5378+
5379+static struct kobject *dma_heap_kobject;
5380+
5381+static int dma_heap_sysfs_setup(void)
5382+{
5383+	int ret;
5384+
5385+	dma_heap_kobject = kobject_create_and_add("dma_heap", kernel_kobj);
5386+	if (!dma_heap_kobject)
5387+		return -ENOMEM;
5388+
5389+	ret = sysfs_create_groups(dma_heap_kobject, dma_heap_sysfs_groups);
5390+	if (ret) {
5391+		kobject_put(dma_heap_kobject);
5392+		return ret;
5393+	}
5394+
5395+	return 0;
5396+}
5397+
5398+static void dma_heap_sysfs_teardown(void)
5399+{
5400+	kobject_put(dma_heap_kobject);
5401+}
5402+
5403 static int dma_heap_init(void)
5404 {
5405 	int ret;
5406 
5407-	ret = alloc_chrdev_region(&dma_heap_devt, 0, NUM_HEAP_MINORS, DEVNAME);
5408+	ret = dma_heap_sysfs_setup();
5409 	if (ret)
5410 		return ret;
5411 
5412+	ret = alloc_chrdev_region(&dma_heap_devt, 0, NUM_HEAP_MINORS, DEVNAME);
5413+	if (ret)
5414+		goto err_chrdev;
5415+
5416 	dma_heap_class = class_create(THIS_MODULE, DEVNAME);
5417 	if (IS_ERR(dma_heap_class)) {
5418-		unregister_chrdev_region(dma_heap_devt, NUM_HEAP_MINORS);
5419-		return PTR_ERR(dma_heap_class);
5420+		ret = PTR_ERR(dma_heap_class);
5421+		goto err_class;
5422 	}
5423 	dma_heap_class->devnode = dma_heap_devnode;
5424 
5425 	return 0;
5426+
5427+err_class:
5428+	unregister_chrdev_region(dma_heap_devt, NUM_HEAP_MINORS);
5429+err_chrdev:
5430+	dma_heap_sysfs_teardown();
5431+	return ret;
5432 }
5433 subsys_initcall(dma_heap_init);
5434diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
5435index a5eef06c4..341d6d50f 100644
5436--- a/drivers/dma-buf/heaps/Kconfig
5437+++ b/drivers/dma-buf/heaps/Kconfig
5438@@ -1,12 +1,12 @@
5439 config DMABUF_HEAPS_SYSTEM
5440-	bool "DMA-BUF System Heap"
5441-	depends on DMABUF_HEAPS
5442+	tristate "DMA-BUF System Heap"
5443+	depends on DMABUF_HEAPS && DMABUF_HEAPS_DEFERRED_FREE && DMABUF_HEAPS_PAGE_POOL
5444 	help
5445 	  Choose this option to enable the system dmabuf heap. The system heap
5446 	  is backed by pages from the buddy allocator. If in doubt, say Y.
5447 
5448 config DMABUF_HEAPS_CMA
5449-	bool "DMA-BUF CMA Heap"
5450+	tristate "DMA-BUF CMA Heap"
5451 	depends on DMABUF_HEAPS && DMA_CMA
5452 	help
5453 	  Choose this option to enable dma-buf CMA heap. This heap is backed
5454diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile
5455index 6e54cdec3..d06078f26 100644
5456--- a/drivers/dma-buf/heaps/Makefile
5457+++ b/drivers/dma-buf/heaps/Makefile
5458@@ -1,4 +1,4 @@
5459 # SPDX-License-Identifier: GPL-2.0
5460-obj-y					+= heap-helpers.o
5461+
5462 obj-$(CONFIG_DMABUF_HEAPS_SYSTEM)	+= system_heap.o
5463 obj-$(CONFIG_DMABUF_HEAPS_CMA)		+= cma_heap.o
5464diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
5465index e55384dc1..fd564aa70 100644
5466--- a/drivers/dma-buf/heaps/cma_heap.c
5467+++ b/drivers/dma-buf/heaps/cma_heap.c
5468@@ -2,76 +2,306 @@
5469 /*
5470  * DMABUF CMA heap exporter
5471  *
5472- * Copyright (C) 2012, 2019 Linaro Ltd.
5473+ * Copyright (C) 2012, 2019, 2020 Linaro Ltd.
5474  * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
5475+ *
5476+ * Also utilizing parts of Andrew Davis' SRAM heap:
5477+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
5478+ *	Andrew F. Davis <afd@ti.com>
5479  */
5480-
5481 #include <linux/cma.h>
5482-#include <linux/device.h>
5483 #include <linux/dma-buf.h>
5484 #include <linux/dma-heap.h>
5485 #include <linux/dma-map-ops.h>
5486 #include <linux/err.h>
5487-#include <linux/errno.h>
5488 #include <linux/highmem.h>
5489+#include <linux/io.h>
5490+#include <linux/mm.h>
5491 #include <linux/module.h>
5492-#include <linux/slab.h>
5493 #include <linux/scatterlist.h>
5494-#include <linux/sched/signal.h>
5495+#include <linux/slab.h>
5496+#include <linux/vmalloc.h>
5497 
5498-#include "heap-helpers.h"
5499 
5500 struct cma_heap {
5501 	struct dma_heap *heap;
5502 	struct cma *cma;
5503 };
5504 
5505-static void cma_heap_free(struct heap_helper_buffer *buffer)
5506+struct cma_heap_buffer {
5507+	struct cma_heap *heap;
5508+	struct list_head attachments;
5509+	struct mutex lock;
5510+	unsigned long len;
5511+	struct page *cma_pages;
5512+	struct page **pages;
5513+	pgoff_t pagecount;
5514+	int vmap_cnt;
5515+	void *vaddr;
5516+};
5517+
5518+struct dma_heap_attachment {
5519+	struct device *dev;
5520+	struct sg_table table;
5521+	struct list_head list;
5522+	bool mapped;
5523+};
5524+
5525+static int cma_heap_attach(struct dma_buf *dmabuf,
5526+			   struct dma_buf_attachment *attachment)
5527+{
5528+	struct cma_heap_buffer *buffer = dmabuf->priv;
5529+	struct dma_heap_attachment *a;
5530+	int ret;
5531+
5532+	a = kzalloc(sizeof(*a), GFP_KERNEL);
5533+	if (!a)
5534+		return -ENOMEM;
5535+
5536+	ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
5537+					buffer->pagecount, 0,
5538+					buffer->pagecount << PAGE_SHIFT,
5539+					GFP_KERNEL);
5540+	if (ret) {
5541+		kfree(a);
5542+		return ret;
5543+	}
5544+
5545+	a->dev = attachment->dev;
5546+	INIT_LIST_HEAD(&a->list);
5547+	a->mapped = false;
5548+
5549+	attachment->priv = a;
5550+
5551+	mutex_lock(&buffer->lock);
5552+	list_add(&a->list, &buffer->attachments);
5553+	mutex_unlock(&buffer->lock);
5554+
5555+	return 0;
5556+}
5557+
5558+static void cma_heap_detach(struct dma_buf *dmabuf,
5559+			    struct dma_buf_attachment *attachment)
5560+{
5561+	struct cma_heap_buffer *buffer = dmabuf->priv;
5562+	struct dma_heap_attachment *a = attachment->priv;
5563+
5564+	mutex_lock(&buffer->lock);
5565+	list_del(&a->list);
5566+	mutex_unlock(&buffer->lock);
5567+
5568+	sg_free_table(&a->table);
5569+	kfree(a);
5570+}
5571+
5572+static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
5573+					     enum dma_data_direction direction)
5574 {
5575-	struct cma_heap *cma_heap = dma_heap_get_drvdata(buffer->heap);
5576-	unsigned long nr_pages = buffer->pagecount;
5577-	struct page *cma_pages = buffer->priv_virt;
5578+	struct dma_heap_attachment *a = attachment->priv;
5579+	struct sg_table *table = &a->table;
5580+	int attrs = attachment->dma_map_attrs;
5581+	int ret;
5582+
5583+	ret = dma_map_sgtable(attachment->dev, table, direction, attrs);
5584+	if (ret)
5585+		return ERR_PTR(-ENOMEM);
5586+	a->mapped = true;
5587+	return table;
5588+}
5589+
5590+static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
5591+				   struct sg_table *table,
5592+				   enum dma_data_direction direction)
5593+{
5594+	struct dma_heap_attachment *a = attachment->priv;
5595+	int attrs = attachment->dma_map_attrs;
5596+
5597+	a->mapped = false;
5598+	dma_unmap_sgtable(attachment->dev, table, direction, attrs);
5599+}
5600+
5601+static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
5602+					     enum dma_data_direction direction)
5603+{
5604+	struct cma_heap_buffer *buffer = dmabuf->priv;
5605+	struct dma_heap_attachment *a;
5606+
5607+	if (buffer->vmap_cnt)
5608+		invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
5609+
5610+	mutex_lock(&buffer->lock);
5611+	list_for_each_entry(a, &buffer->attachments, list) {
5612+		if (!a->mapped)
5613+			continue;
5614+		dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
5615+	}
5616+	mutex_unlock(&buffer->lock);
5617+
5618+	return 0;
5619+}
5620+
5621+static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
5622+					   enum dma_data_direction direction)
5623+{
5624+	struct cma_heap_buffer *buffer = dmabuf->priv;
5625+	struct dma_heap_attachment *a;
5626+
5627+	if (buffer->vmap_cnt)
5628+		flush_kernel_vmap_range(buffer->vaddr, buffer->len);
5629+
5630+	mutex_lock(&buffer->lock);
5631+	list_for_each_entry(a, &buffer->attachments, list) {
5632+		if (!a->mapped)
5633+			continue;
5634+		dma_sync_sgtable_for_device(a->dev, &a->table, direction);
5635+	}
5636+	mutex_unlock(&buffer->lock);
5637+
5638+	return 0;
5639+}
5640+
5641+static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
5642+{
5643+	struct vm_area_struct *vma = vmf->vma;
5644+	struct cma_heap_buffer *buffer = vma->vm_private_data;
5645+
5646+	if (vmf->pgoff > buffer->pagecount)
5647+		return VM_FAULT_SIGBUS;
5648+
5649+	vmf->page = buffer->pages[vmf->pgoff];
5650+	get_page(vmf->page);
5651+
5652+	return 0;
5653+}
5654+
5655+static const struct vm_operations_struct dma_heap_vm_ops = {
5656+	.fault = cma_heap_vm_fault,
5657+};
5658+
5659+static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
5660+{
5661+	struct cma_heap_buffer *buffer = dmabuf->priv;
5662+
5663+	if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
5664+		return -EINVAL;
5665+
5666+	vma->vm_ops = &dma_heap_vm_ops;
5667+	vma->vm_private_data = buffer;
5668+
5669+	return 0;
5670+}
5671+
5672+static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
5673+{
5674+	void *vaddr;
5675+
5676+	vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
5677+	if (!vaddr)
5678+		return ERR_PTR(-ENOMEM);
5679+
5680+	return vaddr;
5681+}
5682+
5683+static void *cma_heap_vmap(struct dma_buf *dmabuf)
5684+{
5685+	struct cma_heap_buffer *buffer = dmabuf->priv;
5686+	void *vaddr;
5687+
5688+	mutex_lock(&buffer->lock);
5689+	if (buffer->vmap_cnt) {
5690+		buffer->vmap_cnt++;
5691+		vaddr = buffer->vaddr;
5692+		goto out;
5693+	}
5694+
5695+	vaddr = cma_heap_do_vmap(buffer);
5696+	if (IS_ERR(vaddr))
5697+		goto out;
5698+
5699+	buffer->vaddr = vaddr;
5700+	buffer->vmap_cnt++;
5701+out:
5702+	mutex_unlock(&buffer->lock);
5703+
5704+	return vaddr;
5705+}
5706+
5707+static void cma_heap_vunmap(struct dma_buf *dmabuf, void *vaddr)
5708+{
5709+	struct cma_heap_buffer *buffer = dmabuf->priv;
5710+
5711+	mutex_lock(&buffer->lock);
5712+	if (!--buffer->vmap_cnt) {
5713+		vunmap(buffer->vaddr);
5714+		buffer->vaddr = NULL;
5715+	}
5716+	mutex_unlock(&buffer->lock);
5717+}
5718+
5719+static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
5720+{
5721+	struct cma_heap_buffer *buffer = dmabuf->priv;
5722+	struct cma_heap *cma_heap = buffer->heap;
5723+
5724+	if (buffer->vmap_cnt > 0) {
5725+		WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
5726+		vunmap(buffer->vaddr);
5727+	}
5728 
5729 	/* free page list */
5730 	kfree(buffer->pages);
5731 	/* release memory */
5732-	cma_release(cma_heap->cma, cma_pages, nr_pages);
5733+	cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
5734 	kfree(buffer);
5735 }
5736 
5737-/* dmabuf heap CMA operations functions */
5738-static int cma_heap_allocate(struct dma_heap *heap,
5739-			     unsigned long len,
5740-			     unsigned long fd_flags,
5741-			     unsigned long heap_flags)
5742+static const struct dma_buf_ops cma_heap_buf_ops = {
5743+	.attach = cma_heap_attach,
5744+	.detach = cma_heap_detach,
5745+	.map_dma_buf = cma_heap_map_dma_buf,
5746+	.unmap_dma_buf = cma_heap_unmap_dma_buf,
5747+	.begin_cpu_access = cma_heap_dma_buf_begin_cpu_access,
5748+	.end_cpu_access = cma_heap_dma_buf_end_cpu_access,
5749+	.mmap = cma_heap_mmap,
5750+	.vmap = cma_heap_vmap,
5751+	.vunmap = cma_heap_vunmap,
5752+	.release = cma_heap_dma_buf_release,
5753+};
5754+
5755+static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
5756+					 unsigned long len,
5757+					 unsigned long fd_flags,
5758+					 unsigned long heap_flags)
5759 {
5760 	struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
5761-	struct heap_helper_buffer *helper_buffer;
5762-	struct page *cma_pages;
5763+	struct cma_heap_buffer *buffer;
5764+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
5765 	size_t size = PAGE_ALIGN(len);
5766-	unsigned long nr_pages = size >> PAGE_SHIFT;
5767+	pgoff_t pagecount = size >> PAGE_SHIFT;
5768 	unsigned long align = get_order(size);
5769+	struct page *cma_pages;
5770 	struct dma_buf *dmabuf;
5771 	int ret = -ENOMEM;
5772 	pgoff_t pg;
5773 
5774-	if (align > CONFIG_CMA_ALIGNMENT)
5775-		align = CONFIG_CMA_ALIGNMENT;
5776+	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
5777+	if (!buffer)
5778+		return ERR_PTR(-ENOMEM);
5779 
5780-	helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
5781-	if (!helper_buffer)
5782-		return -ENOMEM;
5783+	INIT_LIST_HEAD(&buffer->attachments);
5784+	mutex_init(&buffer->lock);
5785+	buffer->len = size;
5786 
5787-	init_heap_helper_buffer(helper_buffer, cma_heap_free);
5788-	helper_buffer->heap = heap;
5789-	helper_buffer->size = len;
5790+	if (align > CONFIG_CMA_ALIGNMENT)
5791+		align = CONFIG_CMA_ALIGNMENT;
5792 
5793-	cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false);
5794+	cma_pages = cma_alloc(cma_heap->cma, pagecount, align, GFP_KERNEL);
5795 	if (!cma_pages)
5796-		goto free_buf;
5797+		goto free_buffer;
5798 
5799+	/* Clear the cma pages */
5800 	if (PageHighMem(cma_pages)) {
5801-		unsigned long nr_clear_pages = nr_pages;
5802+		unsigned long nr_clear_pages = pagecount;
5803 		struct page *page = cma_pages;
5804 
5805 		while (nr_clear_pages > 0) {
5806@@ -85,7 +315,6 @@ static int cma_heap_allocate(struct dma_heap *heap,
5807 			 */
5808 			if (fatal_signal_pending(current))
5809 				goto free_cma;
5810-
5811 			page++;
5812 			nr_clear_pages--;
5813 		}
5814@@ -93,44 +322,41 @@ static int cma_heap_allocate(struct dma_heap *heap,
5815 		memset(page_address(cma_pages), 0, size);
5816 	}
5817 
5818-	helper_buffer->pagecount = nr_pages;
5819-	helper_buffer->pages = kmalloc_array(helper_buffer->pagecount,
5820-					     sizeof(*helper_buffer->pages),
5821-					     GFP_KERNEL);
5822-	if (!helper_buffer->pages) {
5823+	buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL);
5824+	if (!buffer->pages) {
5825 		ret = -ENOMEM;
5826 		goto free_cma;
5827 	}
5828 
5829-	for (pg = 0; pg < helper_buffer->pagecount; pg++)
5830-		helper_buffer->pages[pg] = &cma_pages[pg];
5831+	for (pg = 0; pg < pagecount; pg++)
5832+		buffer->pages[pg] = &cma_pages[pg];
5833+
5834+	buffer->cma_pages = cma_pages;
5835+	buffer->heap = cma_heap;
5836+	buffer->pagecount = pagecount;
5837 
5838 	/* create the dmabuf */
5839-	dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
5840+	exp_info.exp_name = dma_heap_get_name(heap);
5841+	exp_info.ops = &cma_heap_buf_ops;
5842+	exp_info.size = buffer->len;
5843+	exp_info.flags = fd_flags;
5844+	exp_info.priv = buffer;
5845+	dmabuf = dma_buf_export(&exp_info);
5846 	if (IS_ERR(dmabuf)) {
5847 		ret = PTR_ERR(dmabuf);
5848 		goto free_pages;
5849 	}
5850 
5851-	helper_buffer->dmabuf = dmabuf;
5852-	helper_buffer->priv_virt = cma_pages;
5853-
5854-	ret = dma_buf_fd(dmabuf, fd_flags);
5855-	if (ret < 0) {
5856-		dma_buf_put(dmabuf);
5857-		/* just return, as put will call release and that will free */
5858-		return ret;
5859-	}
5860-
5861-	return ret;
5862+	return dmabuf;
5863 
5864 free_pages:
5865-	kfree(helper_buffer->pages);
5866+	kfree(buffer->pages);
5867 free_cma:
5868-	cma_release(cma_heap->cma, cma_pages, nr_pages);
5869-free_buf:
5870-	kfree(helper_buffer);
5871-	return ret;
5872+	cma_release(cma_heap->cma, cma_pages, pagecount);
5873+free_buffer:
5874+	kfree(buffer);
5875+
5876+	return ERR_PTR(ret);
5877 }
5878 
5879 static const struct dma_heap_ops cma_heap_ops = {
5880diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
5881index 0bf688e3c..18f55f954 100644
5882--- a/drivers/dma-buf/heaps/system_heap.c
5883+++ b/drivers/dma-buf/heaps/system_heap.c
5884@@ -3,7 +3,11 @@
5885  * DMABUF System heap exporter
5886  *
5887  * Copyright (C) 2011 Google, Inc.
5888- * Copyright (C) 2019 Linaro Ltd.
5889+ * Copyright (C) 2019, 2020 Linaro Ltd.
5890+ *
5891+ * Portions based off of Andrew Davis' SRAM heap:
5892+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
5893+ *	Andrew F. Davis <afd@ti.com>
5894  */
5895 
5896 #include <linux/dma-buf.h>
5897@@ -15,99 +19,804 @@
5898 #include <linux/module.h>
5899 #include <linux/scatterlist.h>
5900 #include <linux/slab.h>
5901-#include <linux/sched/signal.h>
5902-#include <asm/page.h>
5903+#include <linux/swiotlb.h>
5904+#include <linux/vmalloc.h>
5905+#include <linux/rockchip/rockchip_sip.h>
5906+
5907+#include <linux/page_pool.h>
5908+#include <linux/deferred-free-helper.h>
5909+
5910+#define CONFIG_SYSTEM_HEAP_FORCE_DMA_SYNC
5911+
5912+static struct dma_heap *sys_heap;
5913+static struct dma_heap *sys_dma32_heap;
5914+static struct dma_heap *sys_uncached_heap;
5915+static struct dma_heap *sys_uncached_dma32_heap;
5916+
5917+/* Default setting */
5918+static u32 bank_bit_first = 12;
5919+static u32 bank_bit_mask = 0x7;
5920+
5921+struct system_heap_buffer {
5922+	struct dma_heap *heap;
5923+	struct list_head attachments;
5924+	struct mutex lock;
5925+	unsigned long len;
5926+	struct sg_table sg_table;
5927+	int vmap_cnt;
5928+	void *vaddr;
5929+	struct deferred_freelist_item deferred_free;
5930+
5931+	bool uncached;
5932+};
5933 
5934-#include "heap-helpers.h"
5935+struct dma_heap_attachment {
5936+	struct device *dev;
5937+	struct sg_table *table;
5938+	struct list_head list;
5939+	bool mapped;
5940 
5941-struct dma_heap *sys_heap;
5942+	bool uncached;
5943+};
5944 
5945-static void system_heap_free(struct heap_helper_buffer *buffer)
5946+#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
5947+#define MID_ORDER_GFP (LOW_ORDER_GFP | __GFP_NOWARN)
5948+#define HIGH_ORDER_GFP  (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
5949+				| __GFP_NORETRY) & ~__GFP_RECLAIM) \
5950+				| __GFP_COMP)
5951+static gfp_t order_flags[] = {HIGH_ORDER_GFP, MID_ORDER_GFP, LOW_ORDER_GFP};
5952+/*
5953+ * The selection of the orders used for allocation (1MB, 64K, 4K) is designed
5954+ * to match with the sizes often found in IOMMUs. Using order 4 pages instead
5955+ * of order 0 pages can significantly improve the performance of many IOMMUs
5956+ * by reducing TLB pressure and time spent updating page tables.
5957+ */
5958+static unsigned int orders[] = {8, 4, 0};
5959+#define NUM_ORDERS ARRAY_SIZE(orders)
5960+struct dmabuf_page_pool *pools[NUM_ORDERS];
5961+struct dmabuf_page_pool *dma32_pools[NUM_ORDERS];
5962+
5963+static struct sg_table *dup_sg_table(struct sg_table *table)
5964 {
5965-	pgoff_t pg;
5966+	struct sg_table *new_table;
5967+	int ret, i;
5968+	struct scatterlist *sg, *new_sg;
5969 
5970-	for (pg = 0; pg < buffer->pagecount; pg++)
5971-		__free_page(buffer->pages[pg]);
5972-	kfree(buffer->pages);
5973-	kfree(buffer);
5974+	new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
5975+	if (!new_table)
5976+		return ERR_PTR(-ENOMEM);
5977+
5978+	ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
5979+	if (ret) {
5980+		kfree(new_table);
5981+		return ERR_PTR(-ENOMEM);
5982+	}
5983+
5984+	new_sg = new_table->sgl;
5985+	for_each_sgtable_sg(table, sg, i) {
5986+		sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
5987+		new_sg = sg_next(new_sg);
5988+	}
5989+
5990+	return new_table;
5991 }
5992 
5993-static int system_heap_allocate(struct dma_heap *heap,
5994-				unsigned long len,
5995-				unsigned long fd_flags,
5996-				unsigned long heap_flags)
5997+static int system_heap_attach(struct dma_buf *dmabuf,
5998+			      struct dma_buf_attachment *attachment)
5999 {
6000-	struct heap_helper_buffer *helper_buffer;
6001-	struct dma_buf *dmabuf;
6002-	int ret = -ENOMEM;
6003-	pgoff_t pg;
6004+	struct system_heap_buffer *buffer = dmabuf->priv;
6005+	struct dma_heap_attachment *a;
6006+	struct sg_table *table;
6007+
6008+	a = kzalloc(sizeof(*a), GFP_KERNEL);
6009+	if (!a)
6010+		return -ENOMEM;
6011 
6012-	helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
6013-	if (!helper_buffer)
6014+	table = dup_sg_table(&buffer->sg_table);
6015+	if (IS_ERR(table)) {
6016+		kfree(a);
6017 		return -ENOMEM;
6018+	}
6019+
6020+	a->table = table;
6021+	a->dev = attachment->dev;
6022+	INIT_LIST_HEAD(&a->list);
6023+	a->mapped = false;
6024+	a->uncached = buffer->uncached;
6025+	attachment->priv = a;
6026+
6027+	mutex_lock(&buffer->lock);
6028+	list_add(&a->list, &buffer->attachments);
6029+	mutex_unlock(&buffer->lock);
6030+
6031+	return 0;
6032+}
6033+
6034+static void system_heap_detach(struct dma_buf *dmabuf,
6035+			       struct dma_buf_attachment *attachment)
6036+{
6037+	struct system_heap_buffer *buffer = dmabuf->priv;
6038+	struct dma_heap_attachment *a = attachment->priv;
6039+
6040+	mutex_lock(&buffer->lock);
6041+	list_del(&a->list);
6042+	mutex_unlock(&buffer->lock);
6043+
6044+	sg_free_table(a->table);
6045+	kfree(a->table);
6046+	kfree(a);
6047+}
6048+
6049+static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
6050+						enum dma_data_direction direction)
6051+{
6052+	struct dma_heap_attachment *a = attachment->priv;
6053+	struct sg_table *table = a->table;
6054+	int attr = attachment->dma_map_attrs;
6055+	int ret;
6056+
6057+	if (a->uncached)
6058+		attr |= DMA_ATTR_SKIP_CPU_SYNC;
6059+
6060+	ret = dma_map_sgtable(attachment->dev, table, direction, attr);
6061+	if (ret)
6062+		return ERR_PTR(ret);
6063+
6064+	a->mapped = true;
6065+	return table;
6066+}
6067+
6068+static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
6069+				      struct sg_table *table,
6070+				      enum dma_data_direction direction)
6071+{
6072+	struct dma_heap_attachment *a = attachment->priv;
6073+	int attr = attachment->dma_map_attrs;
6074+
6075+	if (a->uncached)
6076+		attr |= DMA_ATTR_SKIP_CPU_SYNC;
6077+	a->mapped = false;
6078+	dma_unmap_sgtable(attachment->dev, table, direction, attr);
6079+}
6080+
6081+static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
6082+						enum dma_data_direction direction)
6083+{
6084+	struct system_heap_buffer *buffer = dmabuf->priv;
6085+	struct dma_heap_attachment *a;
6086+
6087+	mutex_lock(&buffer->lock);
6088+
6089+	if (buffer->vmap_cnt)
6090+		invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
6091+
6092+	if (!buffer->uncached) {
6093+		list_for_each_entry(a, &buffer->attachments, list) {
6094+			if (!a->mapped)
6095+				continue;
6096+			dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
6097+		}
6098+	}
6099+	mutex_unlock(&buffer->lock);
6100+
6101+	return 0;
6102+}
6103+
6104+static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
6105+					      enum dma_data_direction direction)
6106+{
6107+	struct system_heap_buffer *buffer = dmabuf->priv;
6108+	struct dma_heap_attachment *a;
6109+
6110+	mutex_lock(&buffer->lock);
6111+
6112+	if (buffer->vmap_cnt)
6113+		flush_kernel_vmap_range(buffer->vaddr, buffer->len);
6114+
6115+	if (!buffer->uncached) {
6116+		list_for_each_entry(a, &buffer->attachments, list) {
6117+			if (!a->mapped)
6118+				continue;
6119+			dma_sync_sgtable_for_device(a->dev, a->table, direction);
6120+		}
6121+	}
6122+	mutex_unlock(&buffer->lock);
6123+
6124+	return 0;
6125+}
6126+
6127+static int system_heap_sgl_sync_range(struct device *dev,
6128+				      struct scatterlist *sgl,
6129+				      unsigned int nents,
6130+				      unsigned int offset,
6131+				      unsigned int length,
6132+				      enum dma_data_direction dir,
6133+				      bool for_cpu)
6134+{
6135+	struct scatterlist *sg;
6136+	unsigned int len = 0;
6137+	dma_addr_t sg_dma_addr;
6138+	int i;
6139+
6140+	for_each_sg(sgl, sg, nents, i) {
6141+		unsigned int sg_offset, sg_left, size = 0;
6142+
6143+		sg_dma_addr = sg_dma_address(sg);
6144+
6145+		len += sg->length;
6146+		if (len <= offset)
6147+			continue;
6148+
6149+		sg_left = len - offset;
6150+		sg_offset = sg->length - sg_left;
6151+
6152+		size = (length < sg_left) ? length : sg_left;
6153+		if (for_cpu)
6154+			dma_sync_single_range_for_cpu(dev, sg_dma_addr,
6155+						      sg_offset, size, dir);
6156+		else
6157+			dma_sync_single_range_for_device(dev, sg_dma_addr,
6158+							 sg_offset, size, dir);
6159+
6160+		offset += size;
6161+		length -= size;
6162+
6163+		if (length == 0)
6164+			break;
6165+	}
6166+
6167+	return 0;
6168+}
6169+
6170+static int
6171+system_heap_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
6172+					     enum dma_data_direction direction,
6173+					     unsigned int offset,
6174+					     unsigned int len)
6175+{
6176+	struct system_heap_buffer *buffer = dmabuf->priv;
6177+	struct dma_heap_attachment *a;
6178+	int ret = 0;
6179+
6180+	if (direction == DMA_TO_DEVICE)
6181+		return 0;
6182+
6183+	mutex_lock(&buffer->lock);
6184+	if (IS_ENABLED(CONFIG_SYSTEM_HEAP_FORCE_DMA_SYNC)) {
6185+		struct dma_heap *heap = buffer->heap;
6186+		struct sg_table *table = &buffer->sg_table;
6187+
6188+		ret = system_heap_sgl_sync_range(dma_heap_get_dev(heap),
6189+						 table->sgl,
6190+						 table->nents,
6191+						 offset, len,
6192+						 direction, true);
6193+		goto unlock;
6194+	}
6195+
6196+	if (buffer->vmap_cnt)
6197+		invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
6198+
6199+	if (!buffer->uncached)
6200+		goto unlock;
6201+
6202+	list_for_each_entry(a, &buffer->attachments, list) {
6203+		if (!a->mapped)
6204+			continue;
6205+
6206+		ret = system_heap_sgl_sync_range(a->dev, a->table->sgl,
6207+						 a->table->nents,
6208+						 offset, len,
6209+						 direction, true);
6210+	}
6211+
6212+unlock:
6213+	mutex_unlock(&buffer->lock);
6214+
6215+	return ret;
6216+}
6217+
6218+static int
6219+system_heap_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
6220+					   enum dma_data_direction direction,
6221+					   unsigned int offset,
6222+					   unsigned int len)
6223+{
6224+	struct system_heap_buffer *buffer = dmabuf->priv;
6225+	struct dma_heap_attachment *a;
6226+	int ret = 0;
6227+
6228+	mutex_lock(&buffer->lock);
6229+	if (IS_ENABLED(CONFIG_SYSTEM_HEAP_FORCE_DMA_SYNC)) {
6230+		struct dma_heap *heap = buffer->heap;
6231+		struct sg_table *table = &buffer->sg_table;
6232+
6233+		ret = system_heap_sgl_sync_range(dma_heap_get_dev(heap),
6234+						 table->sgl,
6235+						 table->nents,
6236+						 offset, len,
6237+						 direction, false);
6238+		goto unlock;
6239+	}
6240+
6241+	if (buffer->vmap_cnt)
6242+		flush_kernel_vmap_range(buffer->vaddr, buffer->len);
6243+
6244+	if (!buffer->uncached)
6245+		goto unlock;
6246+
6247+	list_for_each_entry(a, &buffer->attachments, list) {
6248+		if (!a->mapped)
6249+			continue;
6250+
6251+		ret = system_heap_sgl_sync_range(a->dev, a->table->sgl,
6252+						 a->table->nents,
6253+						 offset, len,
6254+						 direction, false);
6255+	}
6256+unlock:
6257+	mutex_unlock(&buffer->lock);
6258+
6259+	return ret;
6260+}
6261+
6262+static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
6263+{
6264+	struct system_heap_buffer *buffer = dmabuf->priv;
6265+	struct sg_table *table = &buffer->sg_table;
6266+	unsigned long addr = vma->vm_start;
6267+	struct sg_page_iter piter;
6268+	int ret;
6269+
6270+	if (buffer->uncached)
6271+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
6272+
6273+	for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
6274+		struct page *page = sg_page_iter_page(&piter);
6275+
6276+		ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
6277+				      vma->vm_page_prot);
6278+		if (ret)
6279+			return ret;
6280+		addr += PAGE_SIZE;
6281+		if (addr >= vma->vm_end)
6282+			return 0;
6283+	}
6284+	return 0;
6285+}
6286+
6287+static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
6288+{
6289+	struct sg_table *table = &buffer->sg_table;
6290+	int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
6291+	struct page **pages = vmalloc(sizeof(struct page *) * npages);
6292+	struct page **tmp = pages;
6293+	struct sg_page_iter piter;
6294+	pgprot_t pgprot = PAGE_KERNEL;
6295+	void *vaddr;
6296+
6297+	if (!pages)
6298+		return ERR_PTR(-ENOMEM);
6299+
6300+	if (buffer->uncached)
6301+		pgprot = pgprot_writecombine(PAGE_KERNEL);
6302+
6303+	for_each_sgtable_page(table, &piter, 0) {
6304+		WARN_ON(tmp - pages >= npages);
6305+		*tmp++ = sg_page_iter_page(&piter);
6306+	}
6307 
6308-	init_heap_helper_buffer(helper_buffer, system_heap_free);
6309-	helper_buffer->heap = heap;
6310-	helper_buffer->size = len;
6311+	vaddr = vmap(pages, npages, VM_MAP, pgprot);
6312+	vfree(pages);
6313 
6314-	helper_buffer->pagecount = len / PAGE_SIZE;
6315-	helper_buffer->pages = kmalloc_array(helper_buffer->pagecount,
6316-					     sizeof(*helper_buffer->pages),
6317-					     GFP_KERNEL);
6318-	if (!helper_buffer->pages) {
6319-		ret = -ENOMEM;
6320-		goto err0;
6321+	if (!vaddr)
6322+		return ERR_PTR(-ENOMEM);
6323+
6324+	return vaddr;
6325+}
6326+
6327+static void *system_heap_vmap(struct dma_buf *dmabuf)
6328+{
6329+	struct system_heap_buffer *buffer = dmabuf->priv;
6330+	void *vaddr;
6331+
6332+	mutex_lock(&buffer->lock);
6333+	if (buffer->vmap_cnt) {
6334+		buffer->vmap_cnt++;
6335+		vaddr = buffer->vaddr;
6336+		goto out;
6337+	}
6338+
6339+	vaddr = system_heap_do_vmap(buffer);
6340+	if (IS_ERR(vaddr))
6341+		goto out;
6342+
6343+	buffer->vaddr = vaddr;
6344+	buffer->vmap_cnt++;
6345+out:
6346+	mutex_unlock(&buffer->lock);
6347+
6348+	return vaddr;
6349+}
6350+
6351+static void system_heap_vunmap(struct dma_buf *dmabuf, void *vaddr)
6352+{
6353+	struct system_heap_buffer *buffer = dmabuf->priv;
6354+
6355+	mutex_lock(&buffer->lock);
6356+	if (!--buffer->vmap_cnt) {
6357+		vunmap(buffer->vaddr);
6358+		buffer->vaddr = NULL;
6359+	}
6360+	mutex_unlock(&buffer->lock);
6361+}
6362+
6363+static int system_heap_zero_buffer(struct system_heap_buffer *buffer)
6364+{
6365+	struct sg_table *sgt = &buffer->sg_table;
6366+	struct sg_page_iter piter;
6367+	struct page *p;
6368+	void *vaddr;
6369+	int ret = 0;
6370+
6371+	for_each_sgtable_page(sgt, &piter, 0) {
6372+		p = sg_page_iter_page(&piter);
6373+		vaddr = kmap_atomic(p);
6374+		memset(vaddr, 0, PAGE_SIZE);
6375+		kunmap_atomic(vaddr);
6376+	}
6377+
6378+	return ret;
6379+}
6380+
6381+static void system_heap_buf_free(struct deferred_freelist_item *item,
6382+				 enum df_reason reason)
6383+{
6384+	struct system_heap_buffer *buffer;
6385+	struct sg_table *table;
6386+	struct scatterlist *sg;
6387+	int i, j;
6388+
6389+	buffer = container_of(item, struct system_heap_buffer, deferred_free);
6390+	/* Zero the buffer pages before adding back to the pool */
6391+	if (reason == DF_NORMAL)
6392+		if (system_heap_zero_buffer(buffer))
6393+			reason = DF_UNDER_PRESSURE; // On failure, just free
6394+
6395+	table = &buffer->sg_table;
6396+	for_each_sg(table->sgl, sg, table->nents, i) {
6397+		struct page *page = sg_page(sg);
6398+
6399+		if (reason == DF_UNDER_PRESSURE) {
6400+			__free_pages(page, compound_order(page));
6401+		} else {
6402+			for (j = 0; j < NUM_ORDERS; j++) {
6403+				if (compound_order(page) == orders[j])
6404+					break;
6405+			}
6406+			dmabuf_page_pool_free(pools[j], page);
6407+		}
6408 	}
6409+	sg_free_table(table);
6410+	kfree(buffer);
6411+}
6412+
6413+static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
6414+{
6415+	struct system_heap_buffer *buffer = dmabuf->priv;
6416+	int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
6417+
6418+	deferred_free(&buffer->deferred_free, system_heap_buf_free, npages);
6419+}
6420+
6421+static const struct dma_buf_ops system_heap_buf_ops = {
6422+	.attach = system_heap_attach,
6423+	.detach = system_heap_detach,
6424+	.map_dma_buf = system_heap_map_dma_buf,
6425+	.unmap_dma_buf = system_heap_unmap_dma_buf,
6426+	.begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
6427+	.end_cpu_access = system_heap_dma_buf_end_cpu_access,
6428+	.begin_cpu_access_partial = system_heap_dma_buf_begin_cpu_access_partial,
6429+	.end_cpu_access_partial = system_heap_dma_buf_end_cpu_access_partial,
6430+	.mmap = system_heap_mmap,
6431+	.vmap = system_heap_vmap,
6432+	.vunmap = system_heap_vunmap,
6433+	.release = system_heap_dma_buf_release,
6434+};
6435+
6436+static struct page *system_heap_alloc_largest_available(struct dma_heap *heap,
6437+							unsigned long size,
6438+							unsigned int max_order)
6439+{
6440+	struct page *page;
6441+	int i;
6442+	const char *name = dma_heap_get_name(heap);
6443+	struct dmabuf_page_pool **pool;
6444+
6445+	pool = strstr(name, "dma32") ? dma32_pools : pools;
6446+	for (i = 0; i < NUM_ORDERS; i++) {
6447+		if (size <  (PAGE_SIZE << orders[i]))
6448+			continue;
6449+		if (max_order < orders[i])
6450+			continue;
6451+		page = dmabuf_page_pool_alloc(pool[i]);
6452+		if (!page)
6453+			continue;
6454+		return page;
6455+	}
6456+	return NULL;
6457+}
6458+
6459+static struct dma_buf *system_heap_do_allocate(struct dma_heap *heap,
6460+					       unsigned long len,
6461+					       unsigned long fd_flags,
6462+					       unsigned long heap_flags,
6463+					       bool uncached)
6464+{
6465+	struct system_heap_buffer *buffer;
6466+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
6467+	unsigned long size_remaining = len;
6468+	unsigned int max_order = orders[0];
6469+	struct dma_buf *dmabuf;
6470+	struct sg_table *table;
6471+	struct scatterlist *sg;
6472+	struct list_head pages;
6473+	struct page *page, *tmp_page;
6474+	int i, ret = -ENOMEM;
6475+	struct list_head lists[8];
6476+	unsigned int block_index[8] = {0};
6477+	unsigned int block_1M = 0;
6478+	unsigned int block_64K = 0;
6479+	unsigned int maximum;
6480+	int j;
6481+
6482+	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
6483+	if (!buffer)
6484+		return ERR_PTR(-ENOMEM);
6485+
6486+	INIT_LIST_HEAD(&buffer->attachments);
6487+	mutex_init(&buffer->lock);
6488+	buffer->heap = heap;
6489+	buffer->len = len;
6490+	buffer->uncached = uncached;
6491 
6492-	for (pg = 0; pg < helper_buffer->pagecount; pg++) {
6493+	INIT_LIST_HEAD(&pages);
6494+	for (i = 0; i < 8; i++)
6495+		INIT_LIST_HEAD(&lists[i]);
6496+	i = 0;
6497+	while (size_remaining > 0) {
6498 		/*
6499 		 * Avoid trying to allocate memory if the process
6500-		 * has been killed by by SIGKILL
6501+		 * has been killed by SIGKILL
6502 		 */
6503 		if (fatal_signal_pending(current))
6504-			goto err1;
6505+			goto free_buffer;
6506+
6507+		page = system_heap_alloc_largest_available(heap, size_remaining, max_order);
6508+		if (!page)
6509+			goto free_buffer;
6510+
6511+		size_remaining -= page_size(page);
6512+		max_order = compound_order(page);
6513+		if (max_order) {
6514+			if (max_order == 8)
6515+				block_1M++;
6516+			if (max_order == 4)
6517+				block_64K++;
6518+			list_add_tail(&page->lru, &pages);
6519+		} else {
6520+			dma_addr_t phys = page_to_phys(page);
6521+			unsigned int bit_index = ((phys >> bank_bit_first) & bank_bit_mask) & 0x7;
6522+
6523+			list_add_tail(&page->lru, &lists[bit_index]);
6524+			block_index[bit_index]++;
6525+		}
6526+		i++;
6527+	}
6528+
6529+	table = &buffer->sg_table;
6530+	if (sg_alloc_table(table, i, GFP_KERNEL))
6531+		goto free_buffer;
6532 
6533-		helper_buffer->pages[pg] = alloc_page(GFP_KERNEL | __GFP_ZERO);
6534-		if (!helper_buffer->pages[pg])
6535-			goto err1;
6536+	maximum = block_index[0];
6537+	for (i = 1; i < 8; i++)
6538+		maximum = max(maximum, block_index[i]);
6539+	sg = table->sgl;
6540+	list_for_each_entry_safe(page, tmp_page, &pages, lru) {
6541+		sg_set_page(sg, page, page_size(page), 0);
6542+		sg = sg_next(sg);
6543+		list_del(&page->lru);
6544+	}
6545+	for (i = 0; i < maximum; i++) {
6546+		for (j = 0; j < 8; j++) {
6547+			if (!list_empty(&lists[j])) {
6548+				page = list_first_entry(&lists[j], struct page, lru);
6549+				sg_set_page(sg, page, PAGE_SIZE, 0);
6550+				sg = sg_next(sg);
6551+				list_del(&page->lru);
6552+			}
6553+		}
6554 	}
6555 
6556 	/* create the dmabuf */
6557-	dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
6558+	exp_info.exp_name = dma_heap_get_name(heap);
6559+	exp_info.ops = &system_heap_buf_ops;
6560+	exp_info.size = buffer->len;
6561+	exp_info.flags = fd_flags;
6562+	exp_info.priv = buffer;
6563+	dmabuf = dma_buf_export(&exp_info);
6564 	if (IS_ERR(dmabuf)) {
6565 		ret = PTR_ERR(dmabuf);
6566-		goto err1;
6567+		goto free_pages;
6568 	}
6569 
6570-	helper_buffer->dmabuf = dmabuf;
6571+	/*
6572+	 * For uncached buffers, we need to initially flush cpu cache, since
6573+	 * the __GFP_ZERO on the allocation means the zeroing was done by the
6574+	 * cpu and thus it is likely cached. Map (and implicitly flush) and
6575+	 * unmap it now so we don't get corruption later on.
6576+	 */
6577+	if (buffer->uncached) {
6578+		dma_map_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0);
6579+		dma_unmap_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0);
6580+	}
6581 
6582-	ret = dma_buf_fd(dmabuf, fd_flags);
6583-	if (ret < 0) {
6584-		dma_buf_put(dmabuf);
6585-		/* just return, as put will call release and that will free */
6586-		return ret;
6587+	return dmabuf;
6588+
6589+free_pages:
6590+	for_each_sgtable_sg(table, sg, i) {
6591+		struct page *p = sg_page(sg);
6592+
6593+		__free_pages(p, compound_order(p));
6594+	}
6595+	sg_free_table(table);
6596+free_buffer:
6597+	list_for_each_entry_safe(page, tmp_page, &pages, lru)
6598+		__free_pages(page, compound_order(page));
6599+	for (i = 0; i < 8; i++) {
6600+		list_for_each_entry_safe(page, tmp_page, &lists[i], lru)
6601+			__free_pages(page, compound_order(page));
6602 	}
6603+	kfree(buffer);
6604 
6605-	return ret;
6606+	return ERR_PTR(ret);
6607+}
6608 
6609-err1:
6610-	while (pg > 0)
6611-		__free_page(helper_buffer->pages[--pg]);
6612-	kfree(helper_buffer->pages);
6613-err0:
6614-	kfree(helper_buffer);
6615+static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
6616+					    unsigned long len,
6617+					    unsigned long fd_flags,
6618+					    unsigned long heap_flags)
6619+{
6620+	return system_heap_do_allocate(heap, len, fd_flags, heap_flags, false);
6621+}
6622 
6623-	return ret;
6624+static long system_get_pool_size(struct dma_heap *heap)
6625+{
6626+	int i;
6627+	long num_pages = 0;
6628+	struct dmabuf_page_pool **pool;
6629+	const char *name = dma_heap_get_name(heap);
6630+
6631+	pool = pools;
6632+	if (!strcmp(name, "system-dma32") || !strcmp(name, "system-uncached-dma32"))
6633+		pool = dma32_pools;
6634+	for (i = 0; i < NUM_ORDERS; i++, pool++) {
6635+		num_pages += ((*pool)->count[POOL_LOWPAGE] +
6636+			      (*pool)->count[POOL_HIGHPAGE]) << (*pool)->order;
6637+	}
6638+
6639+	return num_pages << PAGE_SHIFT;
6640 }
6641 
6642 static const struct dma_heap_ops system_heap_ops = {
6643 	.allocate = system_heap_allocate,
6644+	.get_pool_size = system_get_pool_size,
6645+};
6646+
6647+static struct dma_buf *system_uncached_heap_allocate(struct dma_heap *heap,
6648+						     unsigned long len,
6649+						     unsigned long fd_flags,
6650+						     unsigned long heap_flags)
6651+{
6652+	return system_heap_do_allocate(heap, len, fd_flags, heap_flags, true);
6653+}
6654+
6655+/* Dummy function to be used until we can call coerce_mask_and_coherent */
6656+static struct dma_buf *system_uncached_heap_not_initialized(struct dma_heap *heap,
6657+							    unsigned long len,
6658+							    unsigned long fd_flags,
6659+							    unsigned long heap_flags)
6660+{
6661+	return ERR_PTR(-EBUSY);
6662+}
6663+
6664+static struct dma_heap_ops system_uncached_heap_ops = {
6665+	/* After system_heap_create is complete, we will swap this */
6666+	.allocate = system_uncached_heap_not_initialized,
6667 };
6668 
6669+static int set_heap_dev_dma(struct device *heap_dev)
6670+{
6671+	int err = 0;
6672+
6673+	if (!heap_dev)
6674+		return -EINVAL;
6675+
6676+	dma_coerce_mask_and_coherent(heap_dev, DMA_BIT_MASK(64));
6677+
6678+	if (!heap_dev->dma_parms) {
6679+		heap_dev->dma_parms = devm_kzalloc(heap_dev,
6680+						   sizeof(*heap_dev->dma_parms),
6681+						   GFP_KERNEL);
6682+		if (!heap_dev->dma_parms)
6683+			return -ENOMEM;
6684+
6685+		err = dma_set_max_seg_size(heap_dev, (unsigned int)DMA_BIT_MASK(64));
6686+		if (err) {
6687+			devm_kfree(heap_dev, heap_dev->dma_parms);
6688+			dev_err(heap_dev, "Failed to set DMA segment size, err:%d\n", err);
6689+			return err;
6690+		}
6691+	}
6692+
6693+	return 0;
6694+}
6695+
6696 static int system_heap_create(void)
6697 {
6698 	struct dma_heap_export_info exp_info;
6699-	int ret = 0;
6700+	int i, err = 0;
6701+	struct dram_addrmap_info *ddr_map_info;
6702+
6703+	/*
6704+	 * Since swiotlb has memory size limitation, this will calculate
6705+	 * the maximum size locally.
6706+	 *
6707+	 * Once swiotlb_max_segment() return not '0', means that the totalram size
6708+	 * is larger than 4GiB and swiotlb is not force mode, in this case, system
6709+	 * heap should limit largest allocation.
6710+	 *
6711+	 * FIX: fix the orders[] as a workaround.
6712+	 */
6713+	if (swiotlb_max_segment()) {
6714+		unsigned int max_size = (1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE;
6715+		int max_order = MAX_ORDER;
6716+		int i;
6717+
6718+		max_size = max_t(unsigned int, max_size, PAGE_SIZE) >> PAGE_SHIFT;
6719+		max_order = min(max_order, ilog2(max_size));
6720+		for (i = 0; i < NUM_ORDERS; i++) {
6721+			if (max_order < orders[i])
6722+				orders[i] = max_order;
6723+			pr_info("system_heap: orders[%d] = %u\n", i, orders[i]);
6724+		}
6725+	}
6726+
6727+	for (i = 0; i < NUM_ORDERS; i++) {
6728+		pools[i] = dmabuf_page_pool_create(order_flags[i], orders[i]);
6729+
6730+		if (!pools[i]) {
6731+			int j;
6732+
6733+			pr_err("%s: page pool creation failed!\n", __func__);
6734+			for (j = 0; j < i; j++)
6735+				dmabuf_page_pool_destroy(pools[j]);
6736+			return -ENOMEM;
6737+		}
6738+	}
6739+
6740+	for (i = 0; i < NUM_ORDERS; i++) {
6741+		dma32_pools[i] = dmabuf_page_pool_create(order_flags[i] | GFP_DMA32, orders[i]);
6742+
6743+		if (!dma32_pools[i]) {
6744+			int j;
6745+
6746+			pr_err("%s: page dma32 pool creation failed!\n", __func__);
6747+			for (j = 0; j < i; j++)
6748+				dmabuf_page_pool_destroy(dma32_pools[j]);
6749+			goto err_dma32_pool;
6750+		}
6751+	}
6752 
6753 	exp_info.name = "system";
6754 	exp_info.ops = &system_heap_ops;
6755@@ -115,9 +824,56 @@ static int system_heap_create(void)
6756 
6757 	sys_heap = dma_heap_add(&exp_info);
6758 	if (IS_ERR(sys_heap))
6759-		ret = PTR_ERR(sys_heap);
6760+		return PTR_ERR(sys_heap);
6761 
6762-	return ret;
6763+	exp_info.name = "system-dma32";
6764+	exp_info.ops = &system_heap_ops;
6765+	exp_info.priv = NULL;
6766+
6767+	sys_dma32_heap = dma_heap_add(&exp_info);
6768+	if (IS_ERR(sys_dma32_heap))
6769+		return PTR_ERR(sys_dma32_heap);
6770+
6771+	exp_info.name = "system-uncached";
6772+	exp_info.ops = &system_uncached_heap_ops;
6773+	exp_info.priv = NULL;
6774+
6775+	sys_uncached_heap = dma_heap_add(&exp_info);
6776+	if (IS_ERR(sys_uncached_heap))
6777+		return PTR_ERR(sys_uncached_heap);
6778+
6779+	err = set_heap_dev_dma(dma_heap_get_dev(sys_uncached_heap));
6780+	if (err)
6781+		return err;
6782+
6783+	exp_info.name = "system-uncached-dma32";
6784+	exp_info.ops = &system_uncached_heap_ops;
6785+	exp_info.priv = NULL;
6786+
6787+	sys_uncached_dma32_heap = dma_heap_add(&exp_info);
6788+	if (IS_ERR(sys_uncached_dma32_heap))
6789+		return PTR_ERR(sys_uncached_dma32_heap);
6790+
6791+	err = set_heap_dev_dma(dma_heap_get_dev(sys_uncached_dma32_heap));
6792+	if (err)
6793+		return err;
6794+	dma_coerce_mask_and_coherent(dma_heap_get_dev(sys_uncached_dma32_heap), DMA_BIT_MASK(32));
6795+
6796+	mb(); /* make sure we only set allocate after dma_mask is set */
6797+	system_uncached_heap_ops.allocate = system_uncached_heap_allocate;
6798+
6799+	ddr_map_info = sip_smc_get_dram_map();
6800+	if (ddr_map_info) {
6801+		bank_bit_first = ddr_map_info->bank_bit_first;
6802+		bank_bit_mask = ddr_map_info->bank_bit_mask;
6803+	}
6804+
6805+	return 0;
6806+err_dma32_pool:
6807+	for (i = 0; i < NUM_ORDERS; i++)
6808+		dmabuf_page_pool_destroy(pools[i]);
6809+
6810+	return -ENOMEM;
6811 }
6812 module_init(system_heap_create);
6813 MODULE_LICENSE("GPL v2");
6814diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
6815index 348b3a917..3daa6c76b 100644
6816--- a/drivers/dma-buf/sw_sync.c
6817+++ b/drivers/dma-buf/sw_sync.c
6818@@ -7,6 +7,8 @@
6819 
6820 #include <linux/file.h>
6821 #include <linux/fs.h>
6822+#include <linux/miscdevice.h>
6823+#include <linux/module.h>
6824 #include <linux/uaccess.h>
6825 #include <linux/slab.h>
6826 #include <linux/sync_file.h>
6827@@ -410,3 +412,13 @@ const struct file_operations sw_sync_debugfs_fops = {
6828 	.unlocked_ioctl = sw_sync_ioctl,
6829 	.compat_ioctl	= compat_ptr_ioctl,
6830 };
6831+
6832+static struct miscdevice sw_sync_dev = {
6833+	.minor	= MISC_DYNAMIC_MINOR,
6834+	.name	= "sw_sync",
6835+	.fops	= &sw_sync_debugfs_fops,
6836+};
6837+
6838+module_misc_device(sw_sync_dev);
6839+
6840+MODULE_LICENSE("GPL v2");
6841diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h
6842index 6176e52ba..fb676da19 100644
6843--- a/drivers/dma-buf/sync_debug.h
6844+++ b/drivers/dma-buf/sync_debug.h
6845@@ -64,9 +64,16 @@ struct sync_pt {
6846 
6847 extern const struct file_operations sw_sync_debugfs_fops;
6848 
6849+#ifdef CONFIG_SW_SYNC_DEBUG
6850 void sync_timeline_debug_add(struct sync_timeline *obj);
6851 void sync_timeline_debug_remove(struct sync_timeline *obj);
6852 void sync_file_debug_add(struct sync_file *fence);
6853 void sync_file_debug_remove(struct sync_file *fence);
6854+#else
6855+static inline void sync_timeline_debug_add(struct sync_timeline *obj) {}
6856+static inline void sync_timeline_debug_remove(struct sync_timeline *obj) {}
6857+static inline void sync_file_debug_add(struct sync_file *fence) {}
6858+static inline void sync_file_debug_remove(struct sync_file *fence) {}
6859+#endif
6860 
6861 #endif /* _LINUX_SYNC_H */
6862diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
6863index 2e63274a4..ab666917b 100644
6864--- a/drivers/gpio/gpiolib-of.c
6865+++ b/drivers/gpio/gpiolib-of.c
6866@@ -1046,3 +1046,14 @@ void of_gpiochip_remove(struct gpio_chip *chip)
6867 {
6868 	of_node_put(chip->of_node);
6869 }
6870+
6871+void of_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev)
6872+{
6873+	/* If the gpiochip has an assigned OF node this takes precedence */
6874+	if (gc->of_node)
6875+		gdev->dev.of_node = gc->of_node;
6876+	else
6877+		gc->of_node = gdev->dev.of_node;
6878+	if (gdev->dev.of_node)
6879+		gdev->dev.fwnode = of_fwnode_handle(gdev->dev.of_node);
6880+}
6881diff --git a/drivers/gpio/gpiolib-of.h b/drivers/gpio/gpiolib-of.h
6882index ed26664f1..8af2bc899 100644
6883--- a/drivers/gpio/gpiolib-of.h
6884+++ b/drivers/gpio/gpiolib-of.h
6885@@ -15,6 +15,7 @@ int of_gpiochip_add(struct gpio_chip *gc);
6886 void of_gpiochip_remove(struct gpio_chip *gc);
6887 int of_gpio_get_count(struct device *dev, const char *con_id);
6888 bool of_gpio_need_valid_mask(const struct gpio_chip *gc);
6889+void of_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev);
6890 #else
6891 static inline struct gpio_desc *of_find_gpio(struct device *dev,
6892 					     const char *con_id,
6893@@ -33,6 +34,10 @@ static inline bool of_gpio_need_valid_mask(const struct gpio_chip *gc)
6894 {
6895 	return false;
6896 }
6897+static inline void of_gpio_dev_init(struct gpio_chip *gc,
6898+				    struct gpio_device *gdev)
6899+{
6900+}
6901 #endif /* CONFIG_OF_GPIO */
6902 
6903 extern struct notifier_block gpio_of_notifier;
6904diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
6905index 4e9b3a95f..95994151a 100644
6906--- a/drivers/gpu/drm/Kconfig
6907+++ b/drivers/gpu/drm/Kconfig
6908@@ -32,6 +32,10 @@ config DRM_MIPI_DBI
6909 	depends on DRM
6910 	select DRM_KMS_HELPER
6911 
6912+config DRM_IGNORE_IOTCL_PERMIT
6913+	bool "Ignore drm ioctl permission"
6914+	depends on DRM && ANDROID && NO_GKI
6915+
6916 config DRM_MIPI_DSI
6917 	bool
6918 	depends on DRM
6919diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
6920index e8baa0745..0bc97715e 100644
6921--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
6922+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
6923@@ -13,6 +13,7 @@
6924 #include <linux/interrupt.h>
6925 #include <linux/io.h>
6926 #include <linux/iopoll.h>
6927+#include <linux/irq.h>
6928 #include <linux/module.h>
6929 #include <linux/of.h>
6930 #include <linux/phy/phy.h>
6931@@ -40,6 +41,20 @@ struct bridge_init {
6932 	struct device_node *node;
6933 };
6934 
6935+static bool analogix_dp_bandwidth_ok(struct analogix_dp_device *dp,
6936+				     const struct drm_display_mode *mode,
6937+				     unsigned int rate, unsigned int lanes)
6938+{
6939+	u32 max_bw, req_bw, bpp = 24;
6940+
6941+	req_bw = mode->clock * bpp / 8;
6942+	max_bw = lanes * rate;
6943+	if (req_bw > max_bw)
6944+		return false;
6945+
6946+	return true;
6947+}
6948+
6949 static int analogix_dp_init_dp(struct analogix_dp_device *dp)
6950 {
6951 	int ret;
6952@@ -64,6 +79,46 @@ static int analogix_dp_init_dp(struct analogix_dp_device *dp)
6953 	return 0;
6954 }
6955 
6956+static int analogix_dp_panel_prepare(struct analogix_dp_device *dp)
6957+{
6958+	int ret;
6959+
6960+	mutex_lock(&dp->panel_lock);
6961+
6962+	if (dp->panel_is_prepared)
6963+		goto out;
6964+
6965+	ret = drm_panel_prepare(dp->plat_data->panel);
6966+	if (ret)
6967+		goto out;
6968+
6969+	dp->panel_is_prepared = true;
6970+
6971+out:
6972+	mutex_unlock(&dp->panel_lock);
6973+	return 0;
6974+}
6975+
6976+static int analogix_dp_panel_unprepare(struct analogix_dp_device *dp)
6977+{
6978+	int ret;
6979+
6980+	mutex_lock(&dp->panel_lock);
6981+
6982+	if (!dp->panel_is_prepared)
6983+		goto out;
6984+
6985+	ret = drm_panel_unprepare(dp->plat_data->panel);
6986+	if (ret)
6987+		goto out;
6988+
6989+	dp->panel_is_prepared = false;
6990+
6991+out:
6992+	mutex_unlock(&dp->panel_lock);
6993+	return 0;
6994+}
6995+
6996 static int analogix_dp_detect_hpd(struct analogix_dp_device *dp)
6997 {
6998 	int timeout_loop = 0;
6999@@ -108,6 +163,9 @@ static bool analogix_dp_detect_sink_psr(struct analogix_dp_device *dp)
7000 	unsigned char psr_version;
7001 	int ret;
7002 
7003+	if (!device_property_read_bool(dp->dev, "support-psr"))
7004+		return 0;
7005+
7006 	ret = drm_dp_dpcd_readb(&dp->aux, DP_PSR_SUPPORT, &psr_version);
7007 	if (ret != 1) {
7008 		dev_err(dp->dev, "failed to get PSR version, disable it\n");
7009@@ -216,8 +274,24 @@ static int analogix_dp_set_enhanced_mode(struct analogix_dp_device *dp)
7010 	if (ret < 0)
7011 		return ret;
7012 
7013+	if (!data) {
7014+		/*
7015+		 * A setting of 1 indicates that this is an eDP device that
7016+		 * uses only Enhanced Framing, independently of the setting by
7017+		 * the source of ENHANCED_FRAME_EN
7018+		 */
7019+		ret = drm_dp_dpcd_readb(&dp->aux, DP_EDP_CONFIGURATION_CAP,
7020+					&data);
7021+		if (ret < 0)
7022+			return ret;
7023+
7024+		data = !!(data & DP_FRAMING_CHANGE_CAP);
7025+	}
7026+
7027 	analogix_dp_enable_enhanced_mode(dp, data);
7028 
7029+	dp->link_train.enhanced_framing = data;
7030+
7031 	return 0;
7032 }
7033 
7034@@ -233,32 +307,10 @@ static int analogix_dp_training_pattern_dis(struct analogix_dp_device *dp)
7035 	return ret < 0 ? ret : 0;
7036 }
7037 
7038-static void
7039-analogix_dp_set_lane_lane_pre_emphasis(struct analogix_dp_device *dp,
7040-				       int pre_emphasis, int lane)
7041-{
7042-	switch (lane) {
7043-	case 0:
7044-		analogix_dp_set_lane0_pre_emphasis(dp, pre_emphasis);
7045-		break;
7046-	case 1:
7047-		analogix_dp_set_lane1_pre_emphasis(dp, pre_emphasis);
7048-		break;
7049-
7050-	case 2:
7051-		analogix_dp_set_lane2_pre_emphasis(dp, pre_emphasis);
7052-		break;
7053-
7054-	case 3:
7055-		analogix_dp_set_lane3_pre_emphasis(dp, pre_emphasis);
7056-		break;
7057-	}
7058-}
7059-
7060 static int analogix_dp_link_start(struct analogix_dp_device *dp)
7061 {
7062 	u8 buf[4];
7063-	int lane, lane_count, pll_tries, retval;
7064+	int lane, lane_count, retval;
7065 
7066 	lane_count = dp->link_train.lane_count;
7067 
7068@@ -278,6 +330,14 @@ static int analogix_dp_link_start(struct analogix_dp_device *dp)
7069 	retval = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, 2);
7070 	if (retval < 0)
7071 		return retval;
7072+
7073+	/* Spread AMP if required, enable 8b/10b coding */
7074+	buf[0] = analogix_dp_ssc_supported(dp) ? DP_SPREAD_AMP_0_5 : 0;
7075+	buf[1] = DP_SET_ANSI_8B10B;
7076+	retval = drm_dp_dpcd_write(&dp->aux, DP_DOWNSPREAD_CTRL, buf, 2);
7077+	if (retval < 0)
7078+		return retval;
7079+
7080 	/* set enhanced mode if available */
7081 	retval = analogix_dp_set_enhanced_mode(dp);
7082 	if (retval < 0) {
7083@@ -285,22 +345,12 @@ static int analogix_dp_link_start(struct analogix_dp_device *dp)
7084 		return retval;
7085 	}
7086 
7087-	/* Set TX pre-emphasis to minimum */
7088+	/* Set TX voltage-swing and pre-emphasis to minimum */
7089 	for (lane = 0; lane < lane_count; lane++)
7090-		analogix_dp_set_lane_lane_pre_emphasis(dp,
7091-			PRE_EMPHASIS_LEVEL_0, lane);
7092-
7093-	/* Wait for PLL lock */
7094-	pll_tries = 0;
7095-	while (analogix_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
7096-		if (pll_tries == DP_TIMEOUT_LOOP_COUNT) {
7097-			dev_err(dp->dev, "Wait for PLL lock timed out\n");
7098-			return -ETIMEDOUT;
7099-		}
7100-
7101-		pll_tries++;
7102-		usleep_range(90, 120);
7103-	}
7104+		dp->link_train.training_lane[lane] =
7105+					DP_TRAIN_VOLTAGE_SWING_LEVEL_0 |
7106+					DP_TRAIN_PRE_EMPH_LEVEL_0;
7107+	analogix_dp_set_lane_link_training(dp);
7108 
7109 	/* Set training pattern 1 */
7110 	analogix_dp_set_training_pattern(dp, TRAINING_PTN1);
7111@@ -383,54 +433,6 @@ static unsigned char analogix_dp_get_adjust_request_pre_emphasis(
7112 	return ((link_value >> shift) & 0xc) >> 2;
7113 }
7114 
7115-static void analogix_dp_set_lane_link_training(struct analogix_dp_device *dp,
7116-					       u8 training_lane_set, int lane)
7117-{
7118-	switch (lane) {
7119-	case 0:
7120-		analogix_dp_set_lane0_link_training(dp, training_lane_set);
7121-		break;
7122-	case 1:
7123-		analogix_dp_set_lane1_link_training(dp, training_lane_set);
7124-		break;
7125-
7126-	case 2:
7127-		analogix_dp_set_lane2_link_training(dp, training_lane_set);
7128-		break;
7129-
7130-	case 3:
7131-		analogix_dp_set_lane3_link_training(dp, training_lane_set);
7132-		break;
7133-	}
7134-}
7135-
7136-static unsigned int
7137-analogix_dp_get_lane_link_training(struct analogix_dp_device *dp,
7138-				   int lane)
7139-{
7140-	u32 reg;
7141-
7142-	switch (lane) {
7143-	case 0:
7144-		reg = analogix_dp_get_lane0_link_training(dp);
7145-		break;
7146-	case 1:
7147-		reg = analogix_dp_get_lane1_link_training(dp);
7148-		break;
7149-	case 2:
7150-		reg = analogix_dp_get_lane2_link_training(dp);
7151-		break;
7152-	case 3:
7153-		reg = analogix_dp_get_lane3_link_training(dp);
7154-		break;
7155-	default:
7156-		WARN_ON(1);
7157-		return 0;
7158-	}
7159-
7160-	return reg;
7161-}
7162-
7163 static void analogix_dp_reduce_link_rate(struct analogix_dp_device *dp)
7164 {
7165 	analogix_dp_training_pattern_dis(dp);
7166@@ -463,13 +465,27 @@ static void analogix_dp_get_adjust_training_lane(struct analogix_dp_device *dp,
7167 	}
7168 }
7169 
7170+static bool analogix_dp_tps3_supported(struct analogix_dp_device *dp)
7171+{
7172+	bool source_tps3_supported, sink_tps3_supported;
7173+	u8 dpcd = 0;
7174+
7175+	source_tps3_supported =
7176+		dp->video_info.max_link_rate == DP_LINK_BW_5_4;
7177+	drm_dp_dpcd_readb(&dp->aux, DP_MAX_LANE_COUNT, &dpcd);
7178+	sink_tps3_supported = dpcd & DP_TPS3_SUPPORTED;
7179+
7180+	return source_tps3_supported && sink_tps3_supported;
7181+}
7182+
7183 static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp)
7184 {
7185 	int lane, lane_count, retval;
7186 	u8 voltage_swing, pre_emphasis, training_lane;
7187 	u8 link_status[2], adjust_request[2];
7188+	u8 training_pattern = TRAINING_PTN2;
7189 
7190-	usleep_range(100, 101);
7191+	drm_dp_link_train_clock_recovery_delay(dp->dpcd);
7192 
7193 	lane_count = dp->link_train.lane_count;
7194 
7195@@ -483,12 +499,16 @@ static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp)
7196 		return retval;
7197 
7198 	if (analogix_dp_clock_recovery_ok(link_status, lane_count) == 0) {
7199-		/* set training pattern 2 for EQ */
7200-		analogix_dp_set_training_pattern(dp, TRAINING_PTN2);
7201+		if (analogix_dp_tps3_supported(dp))
7202+			training_pattern = TRAINING_PTN3;
7203+
7204+		/* set training pattern for EQ */
7205+		analogix_dp_set_training_pattern(dp, training_pattern);
7206 
7207 		retval = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
7208 					    DP_LINK_SCRAMBLING_DISABLE |
7209-						DP_TRAINING_PATTERN_2);
7210+					    (training_pattern == TRAINING_PTN3 ?
7211+					     DP_TRAINING_PATTERN_3 : DP_TRAINING_PATTERN_2));
7212 		if (retval < 0)
7213 			return retval;
7214 
7215@@ -522,10 +542,7 @@ static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp)
7216 	}
7217 
7218 	analogix_dp_get_adjust_training_lane(dp, adjust_request);
7219-
7220-	for (lane = 0; lane < lane_count; lane++)
7221-		analogix_dp_set_lane_link_training(dp,
7222-			dp->link_train.training_lane[lane], lane);
7223+	analogix_dp_set_lane_link_training(dp);
7224 
7225 	retval = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET,
7226 				   dp->link_train.training_lane, lane_count);
7227@@ -537,11 +554,11 @@ static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp)
7228 
7229 static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
7230 {
7231-	int lane, lane_count, retval;
7232+	int lane_count, retval;
7233 	u32 reg;
7234 	u8 link_align, link_status[2], adjust_request[2];
7235 
7236-	usleep_range(400, 401);
7237+	drm_dp_link_train_channel_eq_delay(dp->dpcd);
7238 
7239 	lane_count = dp->link_train.lane_count;
7240 
7241@@ -597,9 +614,7 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
7242 		return -EIO;
7243 	}
7244 
7245-	for (lane = 0; lane < lane_count; lane++)
7246-		analogix_dp_set_lane_link_training(dp,
7247-			dp->link_train.training_lane[lane], lane);
7248+	analogix_dp_set_lane_link_training(dp);
7249 
7250 	retval = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET,
7251 				   dp->link_train.training_lane, lane_count);
7252@@ -609,10 +624,11 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
7253 	return 0;
7254 }
7255 
7256-static void analogix_dp_get_max_rx_bandwidth(struct analogix_dp_device *dp,
7257-					     u8 *bandwidth)
7258+static int analogix_dp_get_max_rx_bandwidth(struct analogix_dp_device *dp,
7259+					    u8 *bandwidth)
7260 {
7261 	u8 data;
7262+	int ret;
7263 
7264 	/*
7265 	 * For DP rev.1.1, Maximum link rate of Main Link lanes
7266@@ -620,28 +636,41 @@ static void analogix_dp_get_max_rx_bandwidth(struct analogix_dp_device *dp,
7267 	 * For DP rev.1.2, Maximum link rate of Main Link lanes
7268 	 * 0x06 = 1.62 Gbps, 0x0a = 2.7 Gbps, 0x14 = 5.4Gbps
7269 	 */
7270-	drm_dp_dpcd_readb(&dp->aux, DP_MAX_LINK_RATE, &data);
7271+	ret = drm_dp_dpcd_readb(&dp->aux, DP_MAX_LINK_RATE, &data);
7272+	if (ret < 0)
7273+		return ret;
7274+
7275 	*bandwidth = data;
7276+
7277+	return 0;
7278 }
7279 
7280-static void analogix_dp_get_max_rx_lane_count(struct analogix_dp_device *dp,
7281-					      u8 *lane_count)
7282+static int analogix_dp_get_max_rx_lane_count(struct analogix_dp_device *dp,
7283+					     u8 *lane_count)
7284 {
7285 	u8 data;
7286+	int ret;
7287 
7288 	/*
7289 	 * For DP rev.1.1, Maximum number of Main Link lanes
7290 	 * 0x01 = 1 lane, 0x02 = 2 lanes, 0x04 = 4 lanes
7291 	 */
7292-	drm_dp_dpcd_readb(&dp->aux, DP_MAX_LANE_COUNT, &data);
7293+	ret = drm_dp_dpcd_readb(&dp->aux, DP_MAX_LANE_COUNT, &data);
7294+	if (ret < 0)
7295+		return ret;
7296+
7297 	*lane_count = DPCD_MAX_LANE_COUNT(data);
7298+
7299+	return 0;
7300 }
7301 
7302 static int analogix_dp_full_link_train(struct analogix_dp_device *dp,
7303 				       u32 max_lanes, u32 max_rate)
7304 {
7305+	struct video_info *video = &dp->video_info;
7306 	int retval = 0;
7307 	bool training_finished = false;
7308+	u8 dpcd;
7309 
7310 	/*
7311 	 * MACRO_RST must be applied after the PLL_LOCK to avoid
7312@@ -667,6 +696,16 @@ static int analogix_dp_full_link_train(struct analogix_dp_device *dp,
7313 		dp->link_train.lane_count = (u8)LANE_COUNT1;
7314 	}
7315 
7316+	if (!analogix_dp_bandwidth_ok(dp, &video->mode,
7317+				      drm_dp_bw_code_to_link_rate(dp->link_train.link_rate),
7318+				      dp->link_train.lane_count)) {
7319+		dev_err(dp->dev, "bandwidth overflow\n");
7320+		return -EINVAL;
7321+	}
7322+
7323+	drm_dp_dpcd_readb(&dp->aux, DP_MAX_DOWNSPREAD, &dpcd);
7324+	dp->link_train.ssc = !!(dpcd & DP_MAX_DOWNSPREAD_0_5);
7325+
7326 	/* Setup TX lane count & rate */
7327 	if (dp->link_train.lane_count > max_lanes)
7328 		dp->link_train.lane_count = max_lanes;
7329@@ -711,27 +750,15 @@ static int analogix_dp_full_link_train(struct analogix_dp_device *dp,
7330 
7331 static int analogix_dp_fast_link_train(struct analogix_dp_device *dp)
7332 {
7333-	int i, ret;
7334+	int ret;
7335 	u8 link_align, link_status[2];
7336-	enum pll_status status;
7337 
7338 	analogix_dp_reset_macro(dp);
7339 
7340 	analogix_dp_set_link_bandwidth(dp, dp->link_train.link_rate);
7341 	analogix_dp_set_lane_count(dp, dp->link_train.lane_count);
7342-
7343-	for (i = 0; i < dp->link_train.lane_count; i++) {
7344-		analogix_dp_set_lane_link_training(dp,
7345-			dp->link_train.training_lane[i], i);
7346-	}
7347-
7348-	ret = readx_poll_timeout(analogix_dp_get_pll_lock_status, dp, status,
7349-				 status != PLL_UNLOCKED, 120,
7350-				 120 * DP_TIMEOUT_LOOP_COUNT);
7351-	if (ret) {
7352-		DRM_DEV_ERROR(dp->dev, "Wait for pll lock failed %d\n", ret);
7353-		return ret;
7354-	}
7355+	analogix_dp_set_lane_link_training(dp);
7356+	analogix_dp_enable_enhanced_mode(dp, dp->link_train.enhanced_framing);
7357 
7358 	/* source Set training pattern 1 */
7359 	analogix_dp_set_training_pattern(dp, TRAINING_PTN1);
7360@@ -742,7 +769,6 @@ static int analogix_dp_fast_link_train(struct analogix_dp_device *dp)
7361 	/* From DP spec, pattern must be on-screen for a minimum 500us */
7362 	usleep_range(500, 600);
7363 
7364-	/* TODO: enhanced_mode?*/
7365 	analogix_dp_set_training_pattern(dp, DP_NONE);
7366 
7367 	/*
7368@@ -884,25 +910,44 @@ static int analogix_dp_enable_scramble(struct analogix_dp_device *dp,
7369 	return ret < 0 ? ret : 0;
7370 }
7371 
7372+static irqreturn_t analogix_dp_hpd_irq_handler(int irq, void *arg)
7373+{
7374+	struct analogix_dp_device *dp = arg;
7375+
7376+	if (dp->drm_dev)
7377+		drm_helper_hpd_irq_event(dp->drm_dev);
7378+
7379+	return IRQ_HANDLED;
7380+}
7381+
7382 static irqreturn_t analogix_dp_hardirq(int irq, void *arg)
7383 {
7384 	struct analogix_dp_device *dp = arg;
7385-	irqreturn_t ret = IRQ_NONE;
7386 	enum dp_irq_type irq_type;
7387+	int ret;
7388+
7389+	ret = pm_runtime_get_sync(dp->dev);
7390+	if (ret < 0)
7391+		return IRQ_NONE;
7392 
7393 	irq_type = analogix_dp_get_irq_type(dp);
7394-	if (irq_type != DP_IRQ_TYPE_UNKNOWN) {
7395+	if (irq_type != DP_IRQ_TYPE_UNKNOWN)
7396 		analogix_dp_mute_hpd_interrupt(dp);
7397-		ret = IRQ_WAKE_THREAD;
7398-	}
7399 
7400-	return ret;
7401+	pm_runtime_put_sync(dp->dev);
7402+
7403+	return IRQ_WAKE_THREAD;
7404 }
7405 
7406 static irqreturn_t analogix_dp_irq_thread(int irq, void *arg)
7407 {
7408 	struct analogix_dp_device *dp = arg;
7409 	enum dp_irq_type irq_type;
7410+	int ret;
7411+
7412+	ret = pm_runtime_get_sync(dp->dev);
7413+	if (ret < 0)
7414+		return IRQ_NONE;
7415 
7416 	irq_type = analogix_dp_get_irq_type(dp);
7417 	if (irq_type & DP_IRQ_TYPE_HP_CABLE_IN ||
7418@@ -917,6 +962,8 @@ static irqreturn_t analogix_dp_irq_thread(int irq, void *arg)
7419 		analogix_dp_unmute_hpd_interrupt(dp);
7420 	}
7421 
7422+	pm_runtime_put_sync(dp->dev);
7423+
7424 	return IRQ_HANDLED;
7425 }
7426 
7427@@ -936,16 +983,73 @@ static int analogix_dp_fast_link_train_detection(struct analogix_dp_device *dp)
7428 	return 0;
7429 }
7430 
7431+static int analogix_dp_link_power_up(struct analogix_dp_device *dp)
7432+{
7433+	u8 value;
7434+	int ret;
7435+
7436+	if (dp->dpcd[DP_DPCD_REV] < 0x11)
7437+		return 0;
7438+
7439+	ret = drm_dp_dpcd_readb(&dp->aux, DP_SET_POWER, &value);
7440+	if (ret < 0)
7441+		return ret;
7442+
7443+	value &= ~DP_SET_POWER_MASK;
7444+	value |= DP_SET_POWER_D0;
7445+
7446+	ret = drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, value);
7447+	if (ret < 0)
7448+		return ret;
7449+
7450+	usleep_range(1000, 2000);
7451+
7452+	return 0;
7453+}
7454+
7455+static int analogix_dp_link_power_down(struct analogix_dp_device *dp)
7456+{
7457+	u8 value;
7458+	int ret;
7459+
7460+	if (dp->dpcd[DP_DPCD_REV] < 0x11)
7461+		return 0;
7462+
7463+	ret = drm_dp_dpcd_readb(&dp->aux, DP_SET_POWER, &value);
7464+	if (ret < 0)
7465+		return ret;
7466+
7467+	value &= ~DP_SET_POWER_MASK;
7468+	value |= DP_SET_POWER_D3;
7469+
7470+	ret = drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, value);
7471+	if (ret < 0)
7472+		return ret;
7473+
7474+	return 0;
7475+}
7476+
7477 static int analogix_dp_commit(struct analogix_dp_device *dp)
7478 {
7479+	struct video_info *video = &dp->video_info;
7480 	int ret;
7481 
7482-	/* Keep the panel disabled while we configure video */
7483-	if (dp->plat_data->panel) {
7484-		if (drm_panel_disable(dp->plat_data->panel))
7485-			DRM_ERROR("failed to disable the panel\n");
7486+	ret = drm_dp_read_dpcd_caps(&dp->aux, dp->dpcd);
7487+	if (ret < 0) {
7488+		dev_err(dp->dev, "failed to read dpcd caps: %d\n", ret);
7489+		return ret;
7490+	}
7491+
7492+	ret = analogix_dp_link_power_up(dp);
7493+	if (ret) {
7494+		dev_err(dp->dev, "failed to power up link: %d\n", ret);
7495+		return ret;
7496 	}
7497 
7498+	if (device_property_read_bool(dp->dev, "panel-self-test"))
7499+		return drm_dp_dpcd_writeb(&dp->aux, DP_EDP_CONFIGURATION_SET,
7500+					  DP_PANEL_SELF_TEST_ENABLE);
7501+
7502 	ret = analogix_dp_train_link(dp);
7503 	if (ret) {
7504 		dev_err(dp->dev, "unable to do link train, ret=%d\n", ret);
7505@@ -959,21 +1063,17 @@ static int analogix_dp_commit(struct analogix_dp_device *dp)
7506 	}
7507 
7508 	analogix_dp_init_video(dp);
7509+	analogix_dp_set_video_format(dp);
7510+
7511+	if (video->video_bist_enable)
7512+		analogix_dp_video_bist_enable(dp);
7513+
7514 	ret = analogix_dp_config_video(dp);
7515 	if (ret) {
7516 		dev_err(dp->dev, "unable to config video\n");
7517 		return ret;
7518 	}
7519 
7520-	/* Safe to enable the panel now */
7521-	if (dp->plat_data->panel) {
7522-		ret = drm_panel_enable(dp->plat_data->panel);
7523-		if (ret) {
7524-			DRM_ERROR("failed to enable the panel\n");
7525-			return ret;
7526-		}
7527-	}
7528-
7529 	/* Check whether panel supports fast training */
7530 	ret = analogix_dp_fast_link_train_detection(dp);
7531 	if (ret)
7532@@ -1058,66 +1158,18 @@ static int analogix_dp_disable_psr(struct analogix_dp_device *dp)
7533 	return analogix_dp_send_psr_spd(dp, &psr_vsc, true);
7534 }
7535 
7536-/*
7537- * This function is a bit of a catch-all for panel preparation, hopefully
7538- * simplifying the logic of functions that need to prepare/unprepare the panel
7539- * below.
7540- *
7541- * If @prepare is true, this function will prepare the panel. Conversely, if it
7542- * is false, the panel will be unprepared.
7543- *
7544- * If @is_modeset_prepare is true, the function will disregard the current state
7545- * of the panel and either prepare/unprepare the panel based on @prepare. Once
7546- * it finishes, it will update dp->panel_is_modeset to reflect the current state
7547- * of the panel.
7548- */
7549-static int analogix_dp_prepare_panel(struct analogix_dp_device *dp,
7550-				     bool prepare, bool is_modeset_prepare)
7551-{
7552-	int ret = 0;
7553-
7554-	if (!dp->plat_data->panel)
7555-		return 0;
7556-
7557-	mutex_lock(&dp->panel_lock);
7558-
7559-	/*
7560-	 * Exit early if this is a temporary prepare/unprepare and we're already
7561-	 * modeset (since we neither want to prepare twice or unprepare early).
7562-	 */
7563-	if (dp->panel_is_modeset && !is_modeset_prepare)
7564-		goto out;
7565-
7566-	if (prepare)
7567-		ret = drm_panel_prepare(dp->plat_data->panel);
7568-	else
7569-		ret = drm_panel_unprepare(dp->plat_data->panel);
7570-
7571-	if (ret)
7572-		goto out;
7573-
7574-	if (is_modeset_prepare)
7575-		dp->panel_is_modeset = prepare;
7576-
7577-out:
7578-	mutex_unlock(&dp->panel_lock);
7579-	return ret;
7580-}
7581-
7582 static int analogix_dp_get_modes(struct drm_connector *connector)
7583 {
7584 	struct analogix_dp_device *dp = to_dp(connector);
7585 	struct edid *edid;
7586-	int ret, num_modes = 0;
7587+	int num_modes = 0;
7588 
7589-	if (dp->plat_data->panel) {
7590+	if (dp->plat_data->panel)
7591 		num_modes += drm_panel_get_modes(dp->plat_data->panel, connector);
7592-	} else {
7593-		ret = analogix_dp_prepare_panel(dp, true, false);
7594-		if (ret) {
7595-			DRM_ERROR("Failed to prepare panel (%d)\n", ret);
7596-			return 0;
7597-		}
7598+
7599+	if (!num_modes) {
7600+		if (dp->plat_data->panel)
7601+			analogix_dp_panel_prepare(dp);
7602 
7603 		pm_runtime_get_sync(dp->dev);
7604 		edid = drm_get_edid(connector, &dp->aux.ddc);
7605@@ -1128,15 +1180,18 @@ static int analogix_dp_get_modes(struct drm_connector *connector)
7606 			num_modes += drm_add_edid_modes(&dp->connector, edid);
7607 			kfree(edid);
7608 		}
7609-
7610-		ret = analogix_dp_prepare_panel(dp, false, false);
7611-		if (ret)
7612-			DRM_ERROR("Failed to unprepare panel (%d)\n", ret);
7613 	}
7614 
7615 	if (dp->plat_data->get_modes)
7616 		num_modes += dp->plat_data->get_modes(dp->plat_data, connector);
7617 
7618+	if (num_modes > 0 && dp->plat_data->split_mode) {
7619+		struct drm_display_mode *mode;
7620+
7621+		list_for_each_entry(mode, &connector->probed_modes, head)
7622+			dp->plat_data->convert_to_split_mode(mode);
7623+	}
7624+
7625 	return num_modes;
7626 }
7627 
7628@@ -1182,34 +1237,52 @@ static const struct drm_connector_helper_funcs analogix_dp_connector_helper_func
7629 };
7630 
7631 static enum drm_connector_status
7632-analogix_dp_detect(struct drm_connector *connector, bool force)
7633+analogix_dp_detect(struct analogix_dp_device *dp)
7634 {
7635-	struct analogix_dp_device *dp = to_dp(connector);
7636 	enum drm_connector_status status = connector_status_disconnected;
7637 	int ret;
7638 
7639 	if (dp->plat_data->panel)
7640-		return connector_status_connected;
7641+		analogix_dp_panel_prepare(dp);
7642 
7643-	ret = analogix_dp_prepare_panel(dp, true, false);
7644-	if (ret) {
7645-		DRM_ERROR("Failed to prepare panel (%d)\n", ret);
7646-		return connector_status_disconnected;
7647-	}
7648+	pm_runtime_get_sync(dp->dev);
7649+
7650+	if (!analogix_dp_detect_hpd(dp)) {
7651+		ret = analogix_dp_get_max_rx_bandwidth(dp, &dp->link_train.link_rate);
7652+		if (ret) {
7653+			dev_err(dp->dev, "failed to read max link rate\n");
7654+			goto out;
7655+		}
7656+
7657+		ret = analogix_dp_get_max_rx_lane_count(dp, &dp->link_train.lane_count);
7658+		if (ret) {
7659+			dev_err(dp->dev, "failed to read max lane count\n");
7660+			goto out;
7661+		}
7662 
7663-	if (!analogix_dp_detect_hpd(dp))
7664 		status = connector_status_connected;
7665+	}
7666 
7667-	ret = analogix_dp_prepare_panel(dp, false, false);
7668-	if (ret)
7669-		DRM_ERROR("Failed to unprepare panel (%d)\n", ret);
7670+out:
7671+	pm_runtime_put(dp->dev);
7672 
7673 	return status;
7674 }
7675 
7676+static enum drm_connector_status
7677+analogix_dp_connector_detect(struct drm_connector *connector, bool force)
7678+{
7679+	struct analogix_dp_device *dp = to_dp(connector);
7680+
7681+	if (dp->plat_data->right && analogix_dp_detect(dp->plat_data->right) != connector_status_connected)
7682+		return connector_status_disconnected;
7683+
7684+	return analogix_dp_detect(dp);
7685+}
7686+
7687 static const struct drm_connector_funcs analogix_dp_connector_funcs = {
7688 	.fill_modes = drm_helper_probe_single_connector_modes,
7689-	.detect = analogix_dp_detect,
7690+	.detect = analogix_dp_connector_detect,
7691 	.destroy = drm_connector_cleanup,
7692 	.reset = drm_atomic_helper_connector_reset,
7693 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
7694@@ -1224,10 +1297,8 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge,
7695 	struct drm_connector *connector = NULL;
7696 	int ret = 0;
7697 
7698-	if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
7699-		DRM_ERROR("Fix bridge driver to make connector optional!");
7700-		return -EINVAL;
7701-	}
7702+	if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
7703+		return 0;
7704 
7705 	if (!bridge->encoder) {
7706 		DRM_ERROR("Parent encoder object not found");
7707@@ -1268,23 +1339,12 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge,
7708 	return 0;
7709 }
7710 
7711-static
7712-struct drm_crtc *analogix_dp_get_old_crtc(struct analogix_dp_device *dp,
7713-					  struct drm_atomic_state *state)
7714+static void analogix_dp_bridge_detach(struct drm_bridge *bridge)
7715 {
7716-	struct drm_encoder *encoder = dp->encoder;
7717-	struct drm_connector *connector;
7718-	struct drm_connector_state *conn_state;
7719-
7720-	connector = drm_atomic_get_old_connector_for_encoder(state, encoder);
7721-	if (!connector)
7722-		return NULL;
7723-
7724-	conn_state = drm_atomic_get_old_connector_state(state, connector);
7725-	if (!conn_state)
7726-		return NULL;
7727+	struct analogix_dp_device *dp = bridge->driver_private;
7728 
7729-	return conn_state->crtc;
7730+	if (dp->plat_data->detach)
7731+		dp->plat_data->detach(dp->plat_data, bridge);
7732 }
7733 
7734 static
7735@@ -1314,20 +1374,20 @@ analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
7736 	struct analogix_dp_device *dp = bridge->driver_private;
7737 	struct drm_crtc *crtc;
7738 	struct drm_crtc_state *old_crtc_state;
7739-	int ret;
7740 
7741-	crtc = analogix_dp_get_new_crtc(dp, old_state);
7742-	if (!crtc)
7743-		return;
7744+	if (dp->psr_supported) {
7745+		crtc = analogix_dp_get_new_crtc(dp, old_state);
7746+		if (!crtc)
7747+			return;
7748 
7749-	old_crtc_state = drm_atomic_get_old_crtc_state(old_state, crtc);
7750-	/* Don't touch the panel if we're coming back from PSR */
7751-	if (old_crtc_state && old_crtc_state->self_refresh_active)
7752-		return;
7753+		old_crtc_state = drm_atomic_get_old_crtc_state(old_state, crtc);
7754+		/* Don't touch the panel if we're coming back from PSR */
7755+		if (old_crtc_state && old_crtc_state->self_refresh_active)
7756+			return;
7757+	}
7758 
7759-	ret = analogix_dp_prepare_panel(dp, true, true);
7760-	if (ret)
7761-		DRM_ERROR("failed to setup the panel ret = %d\n", ret);
7762+	if (dp->plat_data->panel)
7763+		analogix_dp_panel_prepare(dp);
7764 }
7765 
7766 static int analogix_dp_set_bridge(struct analogix_dp_device *dp)
7767@@ -1336,16 +1396,10 @@ static int analogix_dp_set_bridge(struct analogix_dp_device *dp)
7768 
7769 	pm_runtime_get_sync(dp->dev);
7770 
7771-	ret = clk_prepare_enable(dp->clock);
7772-	if (ret < 0) {
7773-		DRM_ERROR("Failed to prepare_enable the clock clk [%d]\n", ret);
7774-		goto out_dp_clk_pre;
7775-	}
7776-
7777 	if (dp->plat_data->power_on_start)
7778 		dp->plat_data->power_on_start(dp->plat_data);
7779 
7780-	phy_power_on(dp->phy);
7781+	analogix_dp_phy_power_on(dp);
7782 
7783 	ret = analogix_dp_init_dp(dp);
7784 	if (ret)
7785@@ -1363,11 +1417,14 @@ static int analogix_dp_set_bridge(struct analogix_dp_device *dp)
7786 	}
7787 
7788 	ret = analogix_dp_commit(dp);
7789-	if (ret) {
7790+	if (ret < 0) {
7791 		DRM_ERROR("dp commit error, ret = %d\n", ret);
7792 		goto out_dp_init;
7793 	}
7794 
7795+	if (dp->plat_data->panel)
7796+		drm_panel_enable(dp->plat_data->panel);
7797+
7798 	if (dp->plat_data->power_on_end)
7799 		dp->plat_data->power_on_end(dp->plat_data);
7800 
7801@@ -1375,11 +1432,9 @@ static int analogix_dp_set_bridge(struct analogix_dp_device *dp)
7802 	return 0;
7803 
7804 out_dp_init:
7805-	phy_power_off(dp->phy);
7806+	analogix_dp_phy_power_off(dp);
7807 	if (dp->plat_data->power_off)
7808 		dp->plat_data->power_off(dp->plat_data);
7809-	clk_disable_unprepare(dp->clock);
7810-out_dp_clk_pre:
7811 	pm_runtime_put_sync(dp->dev);
7812 
7813 	return ret;
7814@@ -1396,17 +1451,19 @@ analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
7815 	int timeout_loop = 0;
7816 	int ret;
7817 
7818-	crtc = analogix_dp_get_new_crtc(dp, old_state);
7819-	if (!crtc)
7820-		return;
7821+	if (dp->psr_supported) {
7822+		crtc = analogix_dp_get_new_crtc(dp, old_state);
7823+		if (!crtc)
7824+			return;
7825 
7826-	old_crtc_state = drm_atomic_get_old_crtc_state(old_state, crtc);
7827-	/* Not a full enable, just disable PSR and continue */
7828-	if (old_crtc_state && old_crtc_state->self_refresh_active) {
7829-		ret = analogix_dp_disable_psr(dp);
7830-		if (ret)
7831-			DRM_ERROR("Failed to disable psr %d\n", ret);
7832-		return;
7833+		old_crtc_state = drm_atomic_get_old_crtc_state(old_state, crtc);
7834+		/* Not a full enable, just disable PSR and continue */
7835+		if (old_crtc_state && old_crtc_state->self_refresh_active) {
7836+			ret = analogix_dp_disable_psr(dp);
7837+			if (ret)
7838+				DRM_ERROR("Failed to disable psr %d\n", ret);
7839+			return;
7840+		}
7841 	}
7842 
7843 	if (dp->dpms_mode == DRM_MODE_DPMS_ON)
7844@@ -1428,7 +1485,6 @@ analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
7845 static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
7846 {
7847 	struct analogix_dp_device *dp = bridge->driver_private;
7848-	int ret;
7849 
7850 	if (dp->dpms_mode != DRM_MODE_DPMS_ON)
7851 		return;
7852@@ -1440,21 +1496,22 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
7853 		}
7854 	}
7855 
7856+	if (!analogix_dp_get_plug_in_status(dp))
7857+		analogix_dp_link_power_down(dp);
7858+
7859 	disable_irq(dp->irq);
7860 
7861 	if (dp->plat_data->power_off)
7862 		dp->plat_data->power_off(dp->plat_data);
7863 
7864+	analogix_dp_reset_aux(dp);
7865 	analogix_dp_set_analog_power_down(dp, POWER_ALL, 1);
7866-	phy_power_off(dp->phy);
7867-
7868-	clk_disable_unprepare(dp->clock);
7869+	analogix_dp_phy_power_off(dp);
7870 
7871 	pm_runtime_put_sync(dp->dev);
7872 
7873-	ret = analogix_dp_prepare_panel(dp, false, true);
7874-	if (ret)
7875-		DRM_ERROR("failed to setup the panel ret = %d\n", ret);
7876+	if (dp->plat_data->panel)
7877+		analogix_dp_panel_unprepare(dp);
7878 
7879 	dp->fast_train_enable = false;
7880 	dp->psr_supported = false;
7881@@ -1467,16 +1524,14 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
7882 {
7883 	struct drm_atomic_state *old_state = old_bridge_state->base.state;
7884 	struct analogix_dp_device *dp = bridge->driver_private;
7885-	struct drm_crtc *old_crtc, *new_crtc;
7886-	struct drm_crtc_state *old_crtc_state = NULL;
7887+	struct drm_crtc *crtc;
7888 	struct drm_crtc_state *new_crtc_state = NULL;
7889-	int ret;
7890 
7891-	new_crtc = analogix_dp_get_new_crtc(dp, old_state);
7892-	if (!new_crtc)
7893+	crtc = analogix_dp_get_new_crtc(dp, old_state);
7894+	if (!crtc)
7895 		goto out;
7896 
7897-	new_crtc_state = drm_atomic_get_new_crtc_state(old_state, new_crtc);
7898+	new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc);
7899 	if (!new_crtc_state)
7900 		goto out;
7901 
7902@@ -1485,19 +1540,6 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
7903 		return;
7904 
7905 out:
7906-	old_crtc = analogix_dp_get_old_crtc(dp, old_state);
7907-	if (old_crtc) {
7908-		old_crtc_state = drm_atomic_get_old_crtc_state(old_state,
7909-							       old_crtc);
7910-
7911-		/* When moving from PSR to fully disabled, exit PSR first. */
7912-		if (old_crtc_state && old_crtc_state->self_refresh_active) {
7913-			ret = analogix_dp_disable_psr(dp);
7914-			if (ret)
7915-				DRM_ERROR("Failed to disable psr (%d)\n", ret);
7916-		}
7917-	}
7918-
7919 	analogix_dp_bridge_disable(bridge);
7920 }
7921 
7922@@ -1526,14 +1568,19 @@ analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge,
7923 
7924 static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge,
7925 				const struct drm_display_mode *orig_mode,
7926-				const struct drm_display_mode *mode)
7927+				const struct drm_display_mode *adj_mode)
7928 {
7929 	struct analogix_dp_device *dp = bridge->driver_private;
7930 	struct drm_display_info *display_info = &dp->connector.display_info;
7931 	struct video_info *video = &dp->video_info;
7932+	struct drm_display_mode *mode = &video->mode;
7933 	struct device_node *dp_node = dp->dev->of_node;
7934 	int vic;
7935 
7936+	drm_mode_copy(mode, adj_mode);
7937+	if (dp->plat_data->split_mode)
7938+		dp->plat_data->convert_to_origin_mode(mode);
7939+
7940 	/* Input video interlaces & hsync pol & vsync pol */
7941 	video->interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
7942 	video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
7943@@ -1601,6 +1648,27 @@ static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge,
7944 		video->interlaced = true;
7945 }
7946 
7947+static enum drm_mode_status
7948+analogix_dp_bridge_mode_valid(struct drm_bridge *bridge,
7949+			      const struct drm_display_info *info,
7950+			      const struct drm_display_mode *mode)
7951+{
7952+	struct analogix_dp_device *dp = bridge->driver_private;
7953+	struct drm_display_mode m;
7954+
7955+	drm_mode_copy(&m, mode);
7956+
7957+	if (dp->plat_data->split_mode)
7958+		dp->plat_data->convert_to_origin_mode(&m);
7959+
7960+	if (!analogix_dp_bandwidth_ok(dp, &m,
7961+				      drm_dp_bw_code_to_link_rate(dp->link_train.link_rate),
7962+				      dp->link_train.lane_count))
7963+		return MODE_BAD;
7964+
7965+	return MODE_OK;
7966+}
7967+
7968 static const struct drm_bridge_funcs analogix_dp_bridge_funcs = {
7969 	.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
7970 	.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
7971@@ -1611,29 +1679,30 @@ static const struct drm_bridge_funcs analogix_dp_bridge_funcs = {
7972 	.atomic_post_disable = analogix_dp_bridge_atomic_post_disable,
7973 	.mode_set = analogix_dp_bridge_mode_set,
7974 	.attach = analogix_dp_bridge_attach,
7975+	.detach = analogix_dp_bridge_detach,
7976+	.mode_valid = analogix_dp_bridge_mode_valid,
7977 };
7978 
7979-static int analogix_dp_create_bridge(struct drm_device *drm_dev,
7980-				     struct analogix_dp_device *dp)
7981+static int analogix_dp_bridge_init(struct analogix_dp_device *dp)
7982 {
7983-	struct drm_bridge *bridge;
7984+	struct drm_bridge *bridge = &dp->bridge;
7985 	int ret;
7986 
7987-	bridge = devm_kzalloc(drm_dev->dev, sizeof(*bridge), GFP_KERNEL);
7988-	if (!bridge) {
7989-		DRM_ERROR("failed to allocate for drm bridge\n");
7990-		return -ENOMEM;
7991+	if (!dp->plat_data->left) {
7992+		ret = drm_bridge_attach(dp->encoder, bridge, NULL, 0);
7993+		if (ret) {
7994+			DRM_ERROR("failed to attach drm bridge\n");
7995+			return ret;
7996+		}
7997 	}
7998 
7999-	dp->bridge = bridge;
8000+	if (dp->plat_data->right) {
8001+		struct analogix_dp_device *secondary = dp->plat_data->right;
8002 
8003-	bridge->driver_private = dp;
8004-	bridge->funcs = &analogix_dp_bridge_funcs;
8005-
8006-	ret = drm_bridge_attach(dp->encoder, bridge, NULL, 0);
8007-	if (ret) {
8008-		DRM_ERROR("failed to attach drm bridge\n");
8009-		return -EINVAL;
8010+		ret = drm_bridge_attach(dp->encoder, &secondary->bridge, bridge,
8011+					DRM_BRIDGE_ATTACH_NO_CONNECTOR);
8012+		if (ret)
8013+			return ret;
8014 	}
8015 
8016 	return 0;
8017@@ -1646,7 +1715,7 @@ static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp)
8018 
8019 	switch (dp->plat_data->dev_type) {
8020 	case RK3288_DP:
8021-	case RK3399_EDP:
8022+	case RK3568_EDP:
8023 		/*
8024 		 * Like Rk3288 DisplayPort TRM indicate that "Main link
8025 		 * containing 4 physical lanes of 2.7/1.62 Gbps/lane".
8026@@ -1654,6 +1723,11 @@ static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp)
8027 		video_info->max_link_rate = 0x0A;
8028 		video_info->max_lane_count = 0x04;
8029 		break;
8030+	case RK3399_EDP:
8031+	case RK3588_EDP:
8032+		video_info->max_link_rate = 0x14;
8033+		video_info->max_lane_count = 0x04;
8034+		break;
8035 	case EXYNOS_DP:
8036 		/*
8037 		 * NOTE: those property parseing code is used for
8038@@ -1666,6 +1740,9 @@ static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp)
8039 		break;
8040 	}
8041 
8042+	video_info->video_bist_enable =
8043+		of_property_read_bool(dp_node, "analogix,video-bist-enable");
8044+
8045 	return 0;
8046 }
8047 
8048@@ -1673,20 +1750,69 @@ static ssize_t analogix_dpaux_transfer(struct drm_dp_aux *aux,
8049 				       struct drm_dp_aux_msg *msg)
8050 {
8051 	struct analogix_dp_device *dp = to_dp(aux);
8052+
8053+	return analogix_dp_transfer(dp, msg);
8054+}
8055+
8056+int analogix_dp_audio_hw_params(struct analogix_dp_device *dp,
8057+				struct hdmi_codec_daifmt *daifmt,
8058+				struct hdmi_codec_params *params)
8059+{
8060+	switch (daifmt->fmt) {
8061+	case HDMI_SPDIF:
8062+		analogix_dp_audio_config_spdif(dp);
8063+		break;
8064+	case HDMI_I2S:
8065+		analogix_dp_audio_config_i2s(dp);
8066+		break;
8067+	default:
8068+		DRM_DEV_ERROR(dp->dev, "invalid daifmt %d\n", daifmt->fmt);
8069+		return -EINVAL;
8070+	}
8071+
8072+	return 0;
8073+}
8074+EXPORT_SYMBOL_GPL(analogix_dp_audio_hw_params);
8075+
8076+void analogix_dp_audio_shutdown(struct analogix_dp_device *dp)
8077+{
8078+	analogix_dp_audio_disable(dp);
8079+}
8080+EXPORT_SYMBOL_GPL(analogix_dp_audio_shutdown);
8081+
8082+int analogix_dp_audio_startup(struct analogix_dp_device *dp)
8083+{
8084+	analogix_dp_audio_enable(dp);
8085+
8086+	return 0;
8087+}
8088+EXPORT_SYMBOL_GPL(analogix_dp_audio_startup);
8089+
8090+int analogix_dp_audio_get_eld(struct analogix_dp_device *dp, u8 *buf, size_t len)
8091+{
8092+	memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len));
8093+
8094+	return 0;
8095+}
8096+EXPORT_SYMBOL_GPL(analogix_dp_audio_get_eld);
8097+
8098+int analogix_dp_loader_protect(struct analogix_dp_device *dp)
8099+{
8100 	int ret;
8101 
8102-	pm_runtime_get_sync(dp->dev);
8103+	ret = pm_runtime_resume_and_get(dp->dev);
8104+	if (ret) {
8105+		dev_err(dp->dev, "failed to get runtime PM: %d\n", ret);
8106+		return ret;
8107+	}
8108 
8109-	ret = analogix_dp_detect_hpd(dp);
8110-	if (ret)
8111-		goto out;
8112+	analogix_dp_phy_power_on(dp);
8113 
8114-	ret = analogix_dp_transfer(dp, msg);
8115-out:
8116-	pm_runtime_put(dp->dev);
8117+	dp->dpms_mode = DRM_MODE_DPMS_ON;
8118 
8119-	return ret;
8120+	return 0;
8121 }
8122+EXPORT_SYMBOL_GPL(analogix_dp_loader_protect);
8123 
8124 struct analogix_dp_device *
8125 analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
8126@@ -1694,7 +1820,6 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
8127 	struct platform_device *pdev = to_platform_device(dev);
8128 	struct analogix_dp_device *dp;
8129 	struct resource *res;
8130-	unsigned int irq_flags;
8131 	int ret;
8132 
8133 	if (!plat_data) {
8134@@ -1710,7 +1835,7 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
8135 	dp->dpms_mode = DRM_MODE_DPMS_OFF;
8136 
8137 	mutex_init(&dp->panel_lock);
8138-	dp->panel_is_modeset = false;
8139+	dp->panel_is_prepared = false;
8140 
8141 	/*
8142 	 * platform dp driver need containor_of the plat_data to get
8143@@ -1739,21 +1864,19 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
8144 		}
8145 	}
8146 
8147-	dp->clock = devm_clk_get(&pdev->dev, "dp");
8148-	if (IS_ERR(dp->clock)) {
8149-		dev_err(&pdev->dev, "failed to get clock\n");
8150-		return ERR_CAST(dp->clock);
8151+	ret = devm_clk_bulk_get_all(dev, &dp->clks);
8152+	if (ret < 0) {
8153+		dev_err(dev, "failed to get clocks %d\n", ret);
8154+		return ERR_PTR(ret);
8155 	}
8156 
8157-	clk_prepare_enable(dp->clock);
8158+	dp->nr_clks = ret;
8159 
8160 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
8161 
8162 	dp->reg_base = devm_ioremap_resource(&pdev->dev, res);
8163-	if (IS_ERR(dp->reg_base)) {
8164-		ret = PTR_ERR(dp->reg_base);
8165-		goto err_disable_clk;
8166-	}
8167+	if (IS_ERR(dp->reg_base))
8168+		return ERR_CAST(dp->reg_base);
8169 
8170 	dp->force_hpd = of_property_read_bool(dev->of_node, "force-hpd");
8171 
8172@@ -1765,46 +1888,44 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
8173 	if (IS_ERR(dp->hpd_gpiod)) {
8174 		dev_err(dev, "error getting HDP GPIO: %ld\n",
8175 			PTR_ERR(dp->hpd_gpiod));
8176-		ret = PTR_ERR(dp->hpd_gpiod);
8177-		goto err_disable_clk;
8178+		return ERR_CAST(dp->hpd_gpiod);
8179 	}
8180 
8181 	if (dp->hpd_gpiod) {
8182-		/*
8183-		 * Set up the hotplug GPIO from the device tree as an interrupt.
8184-		 * Simply specifying a different interrupt in the device tree
8185-		 * doesn't work since we handle hotplug rather differently when
8186-		 * using a GPIO.  We also need the actual GPIO specifier so
8187-		 * that we can get the current state of the GPIO.
8188-		 */
8189-		dp->irq = gpiod_to_irq(dp->hpd_gpiod);
8190-		irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
8191-	} else {
8192-		dp->irq = platform_get_irq(pdev, 0);
8193-		irq_flags = 0;
8194+		ret = devm_request_threaded_irq(dev,
8195+						gpiod_to_irq(dp->hpd_gpiod),
8196+						NULL,
8197+						analogix_dp_hpd_irq_handler,
8198+						IRQF_TRIGGER_RISING |
8199+						IRQF_TRIGGER_FALLING |
8200+						IRQF_ONESHOT,
8201+						"analogix-hpd", dp);
8202+		if (ret) {
8203+			dev_err(dev, "failed to request hpd IRQ: %d\n", ret);
8204+			return ERR_PTR(ret);
8205+		}
8206 	}
8207 
8208+	dp->irq = platform_get_irq(pdev, 0);
8209 	if (dp->irq == -ENXIO) {
8210 		dev_err(&pdev->dev, "failed to get irq\n");
8211-		ret = -ENODEV;
8212-		goto err_disable_clk;
8213+		return ERR_PTR(-ENODEV);
8214 	}
8215 
8216+	irq_set_status_flags(dp->irq, IRQ_NOAUTOEN);
8217 	ret = devm_request_threaded_irq(&pdev->dev, dp->irq,
8218 					analogix_dp_hardirq,
8219 					analogix_dp_irq_thread,
8220-					irq_flags, "analogix-dp", dp);
8221+					0, "analogix-dp", dp);
8222 	if (ret) {
8223 		dev_err(&pdev->dev, "failed to request irq\n");
8224-		goto err_disable_clk;
8225+		return ERR_PTR(ret);
8226 	}
8227-	disable_irq(dp->irq);
8228 
8229-	return dp;
8230+	dp->bridge.driver_private = dp;
8231+	dp->bridge.funcs = &analogix_dp_bridge_funcs;
8232 
8233-err_disable_clk:
8234-	clk_disable_unprepare(dp->clock);
8235-	return ERR_PTR(ret);
8236+	return dp;
8237 }
8238 EXPORT_SYMBOL_GPL(analogix_dp_probe);
8239 
8240@@ -1825,9 +1946,9 @@ int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev)
8241 
8242 	pm_runtime_enable(dp->dev);
8243 
8244-	ret = analogix_dp_create_bridge(drm_dev, dp);
8245+	ret = analogix_dp_bridge_init(dp);
8246 	if (ret) {
8247-		DRM_ERROR("failed to create bridge (%d)\n", ret);
8248+		DRM_ERROR("failed to init bridge (%d)\n", ret);
8249 		goto err_disable_pm_runtime;
8250 	}
8251 
8252@@ -1842,14 +1963,7 @@ EXPORT_SYMBOL_GPL(analogix_dp_bind);
8253 
8254 void analogix_dp_unbind(struct analogix_dp_device *dp)
8255 {
8256-	analogix_dp_bridge_disable(dp->bridge);
8257 	dp->connector.funcs->destroy(&dp->connector);
8258-
8259-	if (dp->plat_data->panel) {
8260-		if (drm_panel_unprepare(dp->plat_data->panel))
8261-			DRM_ERROR("failed to turnoff the panel\n");
8262-	}
8263-
8264 	drm_dp_aux_unregister(&dp->aux);
8265 	pm_runtime_disable(dp->dev);
8266 }
8267@@ -1857,32 +1971,22 @@ EXPORT_SYMBOL_GPL(analogix_dp_unbind);
8268 
8269 void analogix_dp_remove(struct analogix_dp_device *dp)
8270 {
8271-	clk_disable_unprepare(dp->clock);
8272 }
8273 EXPORT_SYMBOL_GPL(analogix_dp_remove);
8274 
8275-#ifdef CONFIG_PM
8276-int analogix_dp_suspend(struct analogix_dp_device *dp)
8277+int analogix_dp_runtime_suspend(struct analogix_dp_device *dp)
8278 {
8279-	clk_disable_unprepare(dp->clock);
8280+	clk_bulk_disable_unprepare(dp->nr_clks, dp->clks);
8281+
8282 	return 0;
8283 }
8284-EXPORT_SYMBOL_GPL(analogix_dp_suspend);
8285+EXPORT_SYMBOL_GPL(analogix_dp_runtime_suspend);
8286 
8287-int analogix_dp_resume(struct analogix_dp_device *dp)
8288+int analogix_dp_runtime_resume(struct analogix_dp_device *dp)
8289 {
8290-	int ret;
8291-
8292-	ret = clk_prepare_enable(dp->clock);
8293-	if (ret < 0) {
8294-		DRM_ERROR("Failed to prepare_enable the clock clk [%d]\n", ret);
8295-		return ret;
8296-	}
8297-
8298-	return 0;
8299+	return clk_bulk_prepare_enable(dp->nr_clks, dp->clks);
8300 }
8301-EXPORT_SYMBOL_GPL(analogix_dp_resume);
8302-#endif
8303+EXPORT_SYMBOL_GPL(analogix_dp_runtime_resume);
8304 
8305 int analogix_dp_start_crc(struct drm_connector *connector)
8306 {
8307diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
8308index c051502d7..804a87d59 100644
8309--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
8310+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
8311@@ -10,6 +10,7 @@
8312 #define _ANALOGIX_DP_CORE_H
8313 
8314 #include <drm/drm_crtc.h>
8315+#include <drm/drm_bridge.h>
8316 #include <drm/drm_dp_helper.h>
8317 
8318 #define DP_TIMEOUT_LOOP_COUNT 100
8319@@ -69,6 +70,7 @@ enum pattern_set {
8320 	D10_2,
8321 	TRAINING_PTN1,
8322 	TRAINING_PTN2,
8323+	TRAINING_PTN3,
8324 	DP_NONE
8325 };
8326 
8327@@ -129,6 +131,7 @@ enum dp_irq_type {
8328 
8329 struct video_info {
8330 	char *name;
8331+	struct drm_display_mode mode;
8332 
8333 	bool h_sync_polarity;
8334 	bool v_sync_polarity;
8335@@ -141,6 +144,8 @@ struct video_info {
8336 
8337 	int max_link_rate;
8338 	enum link_lane_count_type max_lane_count;
8339+
8340+	bool video_bist_enable;
8341 };
8342 
8343 struct link_train {
8344@@ -150,6 +155,8 @@ struct link_train {
8345 	u8 link_rate;
8346 	u8 lane_count;
8347 	u8 training_lane[4];
8348+	bool ssc;
8349+	bool enhanced_framing;
8350 
8351 	enum link_training_state lt_state;
8352 };
8353@@ -159,15 +166,17 @@ struct analogix_dp_device {
8354 	struct device		*dev;
8355 	struct drm_device	*drm_dev;
8356 	struct drm_connector	connector;
8357-	struct drm_bridge	*bridge;
8358+	struct drm_bridge	bridge;
8359 	struct drm_dp_aux       aux;
8360-	struct clk		*clock;
8361+	struct clk_bulk_data	*clks;
8362+	int			nr_clks;
8363 	unsigned int		irq;
8364 	void __iomem		*reg_base;
8365 
8366 	struct video_info	video_info;
8367 	struct link_train	link_train;
8368 	struct phy		*phy;
8369+	bool			phy_enabled;
8370 	int			dpms_mode;
8371 	struct gpio_desc	*hpd_gpiod;
8372 	bool                    force_hpd;
8373@@ -175,8 +184,9 @@ struct analogix_dp_device {
8374 	bool			psr_supported;
8375 
8376 	struct mutex		panel_lock;
8377-	bool			panel_is_modeset;
8378+	bool			panel_is_prepared;
8379 
8380+	u8 dpcd[DP_RECEIVER_CAP_SIZE];
8381 	struct analogix_dp_plat_data *plat_data;
8382 };
8383 
8384@@ -213,26 +223,8 @@ void analogix_dp_enable_enhanced_mode(struct analogix_dp_device *dp,
8385 				      bool enable);
8386 void analogix_dp_set_training_pattern(struct analogix_dp_device *dp,
8387 				      enum pattern_set pattern);
8388-void analogix_dp_set_lane0_pre_emphasis(struct analogix_dp_device *dp,
8389-					u32 level);
8390-void analogix_dp_set_lane1_pre_emphasis(struct analogix_dp_device *dp,
8391-					u32 level);
8392-void analogix_dp_set_lane2_pre_emphasis(struct analogix_dp_device *dp,
8393-					u32 level);
8394-void analogix_dp_set_lane3_pre_emphasis(struct analogix_dp_device *dp,
8395-					u32 level);
8396-void analogix_dp_set_lane0_link_training(struct analogix_dp_device *dp,
8397-					 u32 training_lane);
8398-void analogix_dp_set_lane1_link_training(struct analogix_dp_device *dp,
8399-					 u32 training_lane);
8400-void analogix_dp_set_lane2_link_training(struct analogix_dp_device *dp,
8401-					 u32 training_lane);
8402-void analogix_dp_set_lane3_link_training(struct analogix_dp_device *dp,
8403-					 u32 training_lane);
8404-u32 analogix_dp_get_lane0_link_training(struct analogix_dp_device *dp);
8405-u32 analogix_dp_get_lane1_link_training(struct analogix_dp_device *dp);
8406-u32 analogix_dp_get_lane2_link_training(struct analogix_dp_device *dp);
8407-u32 analogix_dp_get_lane3_link_training(struct analogix_dp_device *dp);
8408+void analogix_dp_set_lane_link_training(struct analogix_dp_device *dp);
8409+u32 analogix_dp_get_lane_link_training(struct analogix_dp_device *dp, u8 lane);
8410 void analogix_dp_reset_macro(struct analogix_dp_device *dp);
8411 void analogix_dp_init_video(struct analogix_dp_device *dp);
8412 
8413@@ -255,5 +247,14 @@ int analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
8414 			     struct dp_sdp *vsc, bool blocking);
8415 ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
8416 			     struct drm_dp_aux_msg *msg);
8417+void analogix_dp_set_video_format(struct analogix_dp_device *dp);
8418+void analogix_dp_video_bist_enable(struct analogix_dp_device *dp);
8419+bool analogix_dp_ssc_supported(struct analogix_dp_device *dp);
8420+void analogix_dp_phy_power_on(struct analogix_dp_device *dp);
8421+void analogix_dp_phy_power_off(struct analogix_dp_device *dp);
8422+void analogix_dp_audio_config_spdif(struct analogix_dp_device *dp);
8423+void analogix_dp_audio_config_i2s(struct analogix_dp_device *dp);
8424+void analogix_dp_audio_enable(struct analogix_dp_device *dp);
8425+void analogix_dp_audio_disable(struct analogix_dp_device *dp);
8426 
8427 #endif /* _ANALOGIX_DP_CORE_H */
8428diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
8429index cab3f5c4e..e76c66c7c 100644
8430--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
8431+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
8432@@ -11,6 +11,7 @@
8433 #include <linux/gpio/consumer.h>
8434 #include <linux/io.h>
8435 #include <linux/iopoll.h>
8436+#include <linux/phy/phy.h>
8437 
8438 #include <drm/bridge/analogix_dp.h>
8439 
8440@@ -21,20 +22,37 @@
8441 #define COMMON_INT_MASK_2	0
8442 #define COMMON_INT_MASK_3	0
8443 #define COMMON_INT_MASK_4	(HOTPLUG_CHG | HPD_LOST | PLUG)
8444-#define INT_STA_MASK		INT_HPD
8445+
8446+static void analogix_dp_write(struct analogix_dp_device *dp, u32 reg, u32 val)
8447+{
8448+	if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) {
8449+		readl(dp->reg_base);
8450+		writel(val, dp->reg_base + reg);
8451+	}
8452+
8453+	writel(val, dp->reg_base + reg);
8454+}
8455+
8456+static u32 analogix_dp_read(struct analogix_dp_device *dp, u32 reg)
8457+{
8458+	if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
8459+		readl(dp->reg_base + reg);
8460+
8461+	return readl(dp->reg_base + reg);
8462+}
8463 
8464 void analogix_dp_enable_video_mute(struct analogix_dp_device *dp, bool enable)
8465 {
8466 	u32 reg;
8467 
8468 	if (enable) {
8469-		reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1);
8470+		reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_1);
8471 		reg |= HDCP_VIDEO_MUTE;
8472-		writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1);
8473+		analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_1, reg);
8474 	} else {
8475-		reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1);
8476+		reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_1);
8477 		reg &= ~HDCP_VIDEO_MUTE;
8478-		writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1);
8479+		analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_1, reg);
8480 	}
8481 }
8482 
8483@@ -42,9 +60,9 @@ void analogix_dp_stop_video(struct analogix_dp_device *dp)
8484 {
8485 	u32 reg;
8486 
8487-	reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1);
8488+	reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_1);
8489 	reg &= ~VIDEO_EN;
8490-	writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1);
8491+	analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_1, reg);
8492 }
8493 
8494 void analogix_dp_lane_swap(struct analogix_dp_device *dp, bool enable)
8495@@ -58,7 +76,7 @@ void analogix_dp_lane_swap(struct analogix_dp_device *dp, bool enable)
8496 		reg = LANE3_MAP_LOGIC_LANE_3 | LANE2_MAP_LOGIC_LANE_2 |
8497 		      LANE1_MAP_LOGIC_LANE_1 | LANE0_MAP_LOGIC_LANE_0;
8498 
8499-	writel(reg, dp->reg_base + ANALOGIX_DP_LANE_MAP);
8500+	analogix_dp_write(dp, ANALOGIX_DP_LANE_MAP, reg);
8501 }
8502 
8503 void analogix_dp_init_analog_param(struct analogix_dp_device *dp)
8504@@ -66,53 +84,54 @@ void analogix_dp_init_analog_param(struct analogix_dp_device *dp)
8505 	u32 reg;
8506 
8507 	reg = TX_TERMINAL_CTRL_50_OHM;
8508-	writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_1);
8509+	analogix_dp_write(dp, ANALOGIX_DP_ANALOG_CTL_1, reg);
8510 
8511 	reg = SEL_24M | TX_DVDD_BIT_1_0625V;
8512-	writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_2);
8513+	analogix_dp_write(dp, ANALOGIX_DP_ANALOG_CTL_2, reg);
8514 
8515 	if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) {
8516 		reg = REF_CLK_24M;
8517 		if (dp->plat_data->dev_type == RK3288_DP)
8518 			reg ^= REF_CLK_MASK;
8519 
8520-		writel(reg, dp->reg_base + ANALOGIX_DP_PLL_REG_1);
8521-		writel(0x95, dp->reg_base + ANALOGIX_DP_PLL_REG_2);
8522-		writel(0x40, dp->reg_base + ANALOGIX_DP_PLL_REG_3);
8523-		writel(0x58, dp->reg_base + ANALOGIX_DP_PLL_REG_4);
8524-		writel(0x22, dp->reg_base + ANALOGIX_DP_PLL_REG_5);
8525+		analogix_dp_write(dp, ANALOGIX_DP_PLL_REG_1, reg);
8526+		analogix_dp_write(dp, ANALOGIX_DP_PLL_REG_2, 0x99);
8527+		analogix_dp_write(dp, ANALOGIX_DP_PLL_REG_3, 0x40);
8528+		analogix_dp_write(dp, ANALOGIX_DP_PLL_REG_4, 0x58);
8529+		analogix_dp_write(dp, ANALOGIX_DP_PLL_REG_5, 0x22);
8530+		analogix_dp_write(dp, ANALOGIX_DP_BIAS, 0x44);
8531 	}
8532 
8533 	reg = DRIVE_DVDD_BIT_1_0625V | VCO_BIT_600_MICRO;
8534-	writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_3);
8535+	analogix_dp_write(dp, ANALOGIX_DP_ANALOG_CTL_3, reg);
8536 
8537 	reg = PD_RING_OSC | AUX_TERMINAL_CTRL_50_OHM |
8538 		TX_CUR1_2X | TX_CUR_16_MA;
8539-	writel(reg, dp->reg_base + ANALOGIX_DP_PLL_FILTER_CTL_1);
8540+	analogix_dp_write(dp, ANALOGIX_DP_PLL_FILTER_CTL_1, reg);
8541 
8542 	reg = CH3_AMP_400_MV | CH2_AMP_400_MV |
8543 		CH1_AMP_400_MV | CH0_AMP_400_MV;
8544-	writel(reg, dp->reg_base + ANALOGIX_DP_TX_AMP_TUNING_CTL);
8545+	analogix_dp_write(dp, ANALOGIX_DP_TX_AMP_TUNING_CTL, reg);
8546 }
8547 
8548 void analogix_dp_init_interrupt(struct analogix_dp_device *dp)
8549 {
8550 	/* Set interrupt pin assertion polarity as high */
8551-	writel(INT_POL1 | INT_POL0, dp->reg_base + ANALOGIX_DP_INT_CTL);
8552+	analogix_dp_write(dp, ANALOGIX_DP_INT_CTL, INT_POL1 | INT_POL0);
8553 
8554 	/* Clear pending regisers */
8555-	writel(0xff, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_1);
8556-	writel(0x4f, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_2);
8557-	writel(0xe0, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_3);
8558-	writel(0xe7, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_4);
8559-	writel(0x63, dp->reg_base + ANALOGIX_DP_INT_STA);
8560+	analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_STA_1, 0xff);
8561+	analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_STA_2, 0x4f);
8562+	analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_STA_3, 0xe0);
8563+	analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_STA_4, 0xe7);
8564+	analogix_dp_write(dp, ANALOGIX_DP_INT_STA, 0x63);
8565 
8566 	/* 0:mask,1: unmask */
8567-	writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_1);
8568-	writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_2);
8569-	writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_3);
8570-	writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4);
8571-	writel(0x00, dp->reg_base + ANALOGIX_DP_INT_STA_MASK);
8572+	analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_MASK_1, 0x00);
8573+	analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_MASK_2, 0x00);
8574+	analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_MASK_3, 0x00);
8575+	analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_MASK_4, 0x00);
8576+	analogix_dp_write(dp, ANALOGIX_DP_INT_STA_MASK, 0x00);
8577 }
8578 
8579 void analogix_dp_reset(struct analogix_dp_device *dp)
8580@@ -130,44 +149,44 @@ void analogix_dp_reset(struct analogix_dp_device *dp)
8581 			AUD_FIFO_FUNC_EN_N | AUD_FUNC_EN_N |
8582 			HDCP_FUNC_EN_N | SW_FUNC_EN_N;
8583 
8584-	writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
8585+	analogix_dp_write(dp, ANALOGIX_DP_FUNC_EN_1, reg);
8586 
8587 	reg = SSC_FUNC_EN_N | AUX_FUNC_EN_N |
8588 		SERDES_FIFO_FUNC_EN_N |
8589 		LS_CLK_DOMAIN_FUNC_EN_N;
8590-	writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
8591+	analogix_dp_write(dp, ANALOGIX_DP_FUNC_EN_2, reg);
8592 
8593 	usleep_range(20, 30);
8594 
8595 	analogix_dp_lane_swap(dp, 0);
8596 
8597-	writel(0x0, dp->reg_base + ANALOGIX_DP_SYS_CTL_1);
8598-	writel(0x40, dp->reg_base + ANALOGIX_DP_SYS_CTL_2);
8599-	writel(0x0, dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
8600-	writel(0x0, dp->reg_base + ANALOGIX_DP_SYS_CTL_4);
8601+	analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_1, 0x0);
8602+	analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_2, 0x40);
8603+	analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_3, 0x0);
8604+	analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_4, 0x0);
8605 
8606-	writel(0x0, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
8607-	writel(0x0, dp->reg_base + ANALOGIX_DP_HDCP_CTL);
8608+	analogix_dp_write(dp, ANALOGIX_DP_PKT_SEND_CTL, 0x0);
8609+	analogix_dp_write(dp, ANALOGIX_DP_HDCP_CTL, 0x0);
8610 
8611-	writel(0x5e, dp->reg_base + ANALOGIX_DP_HPD_DEGLITCH_L);
8612-	writel(0x1a, dp->reg_base + ANALOGIX_DP_HPD_DEGLITCH_H);
8613+	analogix_dp_write(dp, ANALOGIX_DP_HPD_DEGLITCH_L, 0x5e);
8614+	analogix_dp_write(dp, ANALOGIX_DP_HPD_DEGLITCH_H, 0x1a);
8615 
8616-	writel(0x10, dp->reg_base + ANALOGIX_DP_LINK_DEBUG_CTL);
8617+	analogix_dp_write(dp, ANALOGIX_DP_LINK_DEBUG_CTL, 0x10);
8618 
8619-	writel(0x0, dp->reg_base + ANALOGIX_DP_PHY_TEST);
8620+	analogix_dp_write(dp, ANALOGIX_DP_PHY_TEST, 0x0);
8621 
8622-	writel(0x0, dp->reg_base + ANALOGIX_DP_VIDEO_FIFO_THRD);
8623-	writel(0x20, dp->reg_base + ANALOGIX_DP_AUDIO_MARGIN);
8624+	analogix_dp_write(dp, ANALOGIX_DP_VIDEO_FIFO_THRD, 0x0);
8625+	analogix_dp_write(dp, ANALOGIX_DP_AUDIO_MARGIN, 0x20);
8626 
8627-	writel(0x4, dp->reg_base + ANALOGIX_DP_M_VID_GEN_FILTER_TH);
8628-	writel(0x2, dp->reg_base + ANALOGIX_DP_M_AUD_GEN_FILTER_TH);
8629+	analogix_dp_write(dp, ANALOGIX_DP_M_VID_GEN_FILTER_TH, 0x4);
8630+	analogix_dp_write(dp, ANALOGIX_DP_M_AUD_GEN_FILTER_TH, 0x2);
8631 
8632-	writel(0x00000101, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL);
8633+	analogix_dp_write(dp, ANALOGIX_DP_SOC_GENERAL_CTL, 0x00000101);
8634 }
8635 
8636 void analogix_dp_swreset(struct analogix_dp_device *dp)
8637 {
8638-	writel(RESET_DP_TX, dp->reg_base + ANALOGIX_DP_TX_SW_RESET);
8639+	analogix_dp_write(dp, ANALOGIX_DP_TX_SW_RESET, RESET_DP_TX);
8640 }
8641 
8642 void analogix_dp_config_interrupt(struct analogix_dp_device *dp)
8643@@ -176,19 +195,18 @@ void analogix_dp_config_interrupt(struct analogix_dp_device *dp)
8644 
8645 	/* 0: mask, 1: unmask */
8646 	reg = COMMON_INT_MASK_1;
8647-	writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_1);
8648+	analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_MASK_1, reg);
8649 
8650 	reg = COMMON_INT_MASK_2;
8651-	writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_2);
8652+	analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_MASK_2, reg);
8653 
8654 	reg = COMMON_INT_MASK_3;
8655-	writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_3);
8656+	analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_MASK_3, reg);
8657 
8658-	reg = COMMON_INT_MASK_4;
8659-	writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4);
8660-
8661-	reg = INT_STA_MASK;
8662-	writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA_MASK);
8663+	if (dp->force_hpd || dp->hpd_gpiod)
8664+		analogix_dp_mute_hpd_interrupt(dp);
8665+	else
8666+		analogix_dp_unmute_hpd_interrupt(dp);
8667 }
8668 
8669 void analogix_dp_mute_hpd_interrupt(struct analogix_dp_device *dp)
8670@@ -196,13 +214,13 @@ void analogix_dp_mute_hpd_interrupt(struct analogix_dp_device *dp)
8671 	u32 reg;
8672 
8673 	/* 0: mask, 1: unmask */
8674-	reg = readl(dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4);
8675+	reg = analogix_dp_read(dp, ANALOGIX_DP_COMMON_INT_MASK_4);
8676 	reg &= ~COMMON_INT_MASK_4;
8677-	writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4);
8678+	analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_MASK_4, reg);
8679 
8680-	reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA_MASK);
8681-	reg &= ~INT_STA_MASK;
8682-	writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA_MASK);
8683+	reg = analogix_dp_read(dp, ANALOGIX_DP_INT_STA_MASK);
8684+	reg &= ~INT_HPD;
8685+	analogix_dp_write(dp, ANALOGIX_DP_INT_STA_MASK, reg);
8686 }
8687 
8688 void analogix_dp_unmute_hpd_interrupt(struct analogix_dp_device *dp)
8689@@ -211,17 +229,18 @@ void analogix_dp_unmute_hpd_interrupt(struct analogix_dp_device *dp)
8690 
8691 	/* 0: mask, 1: unmask */
8692 	reg = COMMON_INT_MASK_4;
8693-	writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4);
8694+	analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_MASK_4, reg);
8695 
8696-	reg = INT_STA_MASK;
8697-	writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA_MASK);
8698+	reg = analogix_dp_read(dp, ANALOGIX_DP_INT_STA_MASK);
8699+	reg |= INT_HPD;
8700+	analogix_dp_write(dp, ANALOGIX_DP_INT_STA_MASK, reg);
8701 }
8702 
8703 enum pll_status analogix_dp_get_pll_lock_status(struct analogix_dp_device *dp)
8704 {
8705 	u32 reg;
8706 
8707-	reg = readl(dp->reg_base + ANALOGIX_DP_DEBUG_CTL);
8708+	reg = analogix_dp_read(dp, ANALOGIX_DP_DEBUG_CTL);
8709 	if (reg & PLL_LOCK)
8710 		return PLL_LOCKED;
8711 	else
8712@@ -239,12 +258,12 @@ void analogix_dp_set_pll_power_down(struct analogix_dp_device *dp, bool enable)
8713 		mask = RK_PLL_PD;
8714 	}
8715 
8716-	reg = readl(dp->reg_base + pd_addr);
8717+	reg = analogix_dp_read(dp, pd_addr);
8718 	if (enable)
8719 		reg |= mask;
8720 	else
8721 		reg &= ~mask;
8722-	writel(reg, dp->reg_base + pd_addr);
8723+	analogix_dp_write(dp, pd_addr, reg);
8724 }
8725 
8726 void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp,
8727@@ -265,52 +284,54 @@ void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp,
8728 		else
8729 			mask = AUX_PD;
8730 
8731-		reg = readl(dp->reg_base + phy_pd_addr);
8732-		if (enable)
8733+		reg = analogix_dp_read(dp, phy_pd_addr);
8734+		if (enable) {
8735+			reg &= ~(DP_INC_BG | DP_EXP_BG);
8736 			reg |= mask;
8737-		else
8738+		} else {
8739 			reg &= ~mask;
8740-		writel(reg, dp->reg_base + phy_pd_addr);
8741+		}
8742+		analogix_dp_write(dp, phy_pd_addr, reg);
8743 		break;
8744 	case CH0_BLOCK:
8745 		mask = CH0_PD;
8746-		reg = readl(dp->reg_base + phy_pd_addr);
8747+		reg = analogix_dp_read(dp, phy_pd_addr);
8748 
8749 		if (enable)
8750 			reg |= mask;
8751 		else
8752 			reg &= ~mask;
8753-		writel(reg, dp->reg_base + phy_pd_addr);
8754+		analogix_dp_write(dp, phy_pd_addr, reg);
8755 		break;
8756 	case CH1_BLOCK:
8757 		mask = CH1_PD;
8758-		reg = readl(dp->reg_base + phy_pd_addr);
8759+		reg = analogix_dp_read(dp, phy_pd_addr);
8760 
8761 		if (enable)
8762 			reg |= mask;
8763 		else
8764 			reg &= ~mask;
8765-		writel(reg, dp->reg_base + phy_pd_addr);
8766+		analogix_dp_write(dp, phy_pd_addr, reg);
8767 		break;
8768 	case CH2_BLOCK:
8769 		mask = CH2_PD;
8770-		reg = readl(dp->reg_base + phy_pd_addr);
8771+		reg = analogix_dp_read(dp, phy_pd_addr);
8772 
8773 		if (enable)
8774 			reg |= mask;
8775 		else
8776 			reg &= ~mask;
8777-		writel(reg, dp->reg_base + phy_pd_addr);
8778+		analogix_dp_write(dp, phy_pd_addr, reg);
8779 		break;
8780 	case CH3_BLOCK:
8781 		mask = CH3_PD;
8782-		reg = readl(dp->reg_base + phy_pd_addr);
8783+		reg = analogix_dp_read(dp, phy_pd_addr);
8784 
8785 		if (enable)
8786 			reg |= mask;
8787 		else
8788 			reg &= ~mask;
8789-		writel(reg, dp->reg_base + phy_pd_addr);
8790+		analogix_dp_write(dp, phy_pd_addr, reg);
8791 		break;
8792 	case ANALOG_TOTAL:
8793 		/*
8794@@ -323,29 +344,29 @@ void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp,
8795 		else
8796 			mask = DP_PHY_PD;
8797 
8798-		reg = readl(dp->reg_base + phy_pd_addr);
8799+		reg = analogix_dp_read(dp, phy_pd_addr);
8800 		if (enable)
8801 			reg |= mask;
8802 		else
8803 			reg &= ~mask;
8804 
8805-		writel(reg, dp->reg_base + phy_pd_addr);
8806+		analogix_dp_write(dp, phy_pd_addr, reg);
8807 		if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
8808 			usleep_range(10, 15);
8809 		break;
8810 	case POWER_ALL:
8811 		if (enable) {
8812 			reg = DP_ALL_PD;
8813-			writel(reg, dp->reg_base + phy_pd_addr);
8814+			analogix_dp_write(dp, phy_pd_addr, reg);
8815 		} else {
8816 			reg = DP_ALL_PD;
8817-			writel(reg, dp->reg_base + phy_pd_addr);
8818+			analogix_dp_write(dp, phy_pd_addr, reg);
8819 			usleep_range(10, 15);
8820 			reg &= ~DP_INC_BG;
8821-			writel(reg, dp->reg_base + phy_pd_addr);
8822+			analogix_dp_write(dp, phy_pd_addr, reg);
8823 			usleep_range(10, 15);
8824 
8825-			writel(0x00, dp->reg_base + phy_pd_addr);
8826+			analogix_dp_write(dp, phy_pd_addr, 0x00);
8827 		}
8828 		break;
8829 	default:
8830@@ -356,36 +377,24 @@ void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp,
8831 int analogix_dp_init_analog_func(struct analogix_dp_device *dp)
8832 {
8833 	u32 reg;
8834-	int timeout_loop = 0;
8835 
8836 	analogix_dp_set_analog_power_down(dp, POWER_ALL, 0);
8837 
8838 	reg = PLL_LOCK_CHG;
8839-	writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_1);
8840+	analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_STA_1, reg);
8841 
8842-	reg = readl(dp->reg_base + ANALOGIX_DP_DEBUG_CTL);
8843+	reg = analogix_dp_read(dp, ANALOGIX_DP_DEBUG_CTL);
8844 	reg &= ~(F_PLL_LOCK | PLL_LOCK_CTRL);
8845-	writel(reg, dp->reg_base + ANALOGIX_DP_DEBUG_CTL);
8846+	analogix_dp_write(dp, ANALOGIX_DP_DEBUG_CTL, reg);
8847 
8848 	/* Power up PLL */
8849-	if (analogix_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
8850-		analogix_dp_set_pll_power_down(dp, 0);
8851-
8852-		while (analogix_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
8853-			timeout_loop++;
8854-			if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
8855-				dev_err(dp->dev, "failed to get pll lock status\n");
8856-				return -ETIMEDOUT;
8857-			}
8858-			usleep_range(10, 20);
8859-		}
8860-	}
8861+	analogix_dp_set_pll_power_down(dp, 0);
8862 
8863 	/* Enable Serdes FIFO function and Link symbol clock domain module */
8864-	reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
8865+	reg = analogix_dp_read(dp, ANALOGIX_DP_FUNC_EN_2);
8866 	reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N
8867 		| AUX_FUNC_EN_N);
8868-	writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
8869+	analogix_dp_write(dp, ANALOGIX_DP_FUNC_EN_2, reg);
8870 	return 0;
8871 }
8872 
8873@@ -397,10 +406,10 @@ void analogix_dp_clear_hotplug_interrupts(struct analogix_dp_device *dp)
8874 		return;
8875 
8876 	reg = HOTPLUG_CHG | HPD_LOST | PLUG;
8877-	writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_4);
8878+	analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_STA_4, reg);
8879 
8880 	reg = INT_HPD;
8881-	writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA);
8882+	analogix_dp_write(dp, ANALOGIX_DP_INT_STA, reg);
8883 }
8884 
8885 void analogix_dp_init_hpd(struct analogix_dp_device *dp)
8886@@ -412,45 +421,37 @@ void analogix_dp_init_hpd(struct analogix_dp_device *dp)
8887 
8888 	analogix_dp_clear_hotplug_interrupts(dp);
8889 
8890-	reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
8891+	reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_3);
8892 	reg &= ~(F_HPD | HPD_CTRL);
8893-	writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
8894+	analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_3, reg);
8895 }
8896 
8897 void analogix_dp_force_hpd(struct analogix_dp_device *dp)
8898 {
8899 	u32 reg;
8900 
8901-	reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
8902-	reg = (F_HPD | HPD_CTRL);
8903-	writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
8904+	reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_3);
8905+	reg |= (F_HPD | HPD_CTRL);
8906+	analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_3, reg);
8907 }
8908 
8909 enum dp_irq_type analogix_dp_get_irq_type(struct analogix_dp_device *dp)
8910 {
8911 	u32 reg;
8912 
8913-	if (dp->hpd_gpiod) {
8914-		reg = gpiod_get_value(dp->hpd_gpiod);
8915-		if (reg)
8916-			return DP_IRQ_TYPE_HP_CABLE_IN;
8917-		else
8918-			return DP_IRQ_TYPE_HP_CABLE_OUT;
8919-	} else {
8920-		/* Parse hotplug interrupt status register */
8921-		reg = readl(dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_4);
8922+	/* Parse hotplug interrupt status register */
8923+	reg = analogix_dp_read(dp, ANALOGIX_DP_COMMON_INT_STA_4);
8924 
8925-		if (reg & PLUG)
8926-			return DP_IRQ_TYPE_HP_CABLE_IN;
8927+	if (reg & PLUG)
8928+		return DP_IRQ_TYPE_HP_CABLE_IN;
8929 
8930-		if (reg & HPD_LOST)
8931-			return DP_IRQ_TYPE_HP_CABLE_OUT;
8932+	if (reg & HPD_LOST)
8933+		return DP_IRQ_TYPE_HP_CABLE_OUT;
8934 
8935-		if (reg & HOTPLUG_CHG)
8936-			return DP_IRQ_TYPE_HP_CHANGE;
8937+	if (reg & HOTPLUG_CHG)
8938+		return DP_IRQ_TYPE_HP_CHANGE;
8939 
8940-		return DP_IRQ_TYPE_UNKNOWN;
8941-	}
8942+	return DP_IRQ_TYPE_UNKNOWN;
8943 }
8944 
8945 void analogix_dp_reset_aux(struct analogix_dp_device *dp)
8946@@ -458,9 +459,9 @@ void analogix_dp_reset_aux(struct analogix_dp_device *dp)
8947 	u32 reg;
8948 
8949 	/* Disable AUX channel module */
8950-	reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
8951+	reg = analogix_dp_read(dp, ANALOGIX_DP_FUNC_EN_2);
8952 	reg |= AUX_FUNC_EN_N;
8953-	writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
8954+	analogix_dp_write(dp, ANALOGIX_DP_FUNC_EN_2, reg);
8955 }
8956 
8957 void analogix_dp_init_aux(struct analogix_dp_device *dp)
8958@@ -469,7 +470,7 @@ void analogix_dp_init_aux(struct analogix_dp_device *dp)
8959 
8960 	/* Clear inerrupts related to AUX channel */
8961 	reg = RPLY_RECEIV | AUX_ERR;
8962-	writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA);
8963+	analogix_dp_write(dp, ANALOGIX_DP_INT_STA, reg);
8964 
8965 	analogix_dp_set_analog_power_down(dp, AUX_BLOCK, true);
8966 	usleep_range(10, 11);
8967@@ -487,16 +488,17 @@ void analogix_dp_init_aux(struct analogix_dp_device *dp)
8968 	reg |= AUX_HW_RETRY_COUNT_SEL(0) |
8969 	       AUX_HW_RETRY_INTERVAL_600_MICROSECONDS;
8970 
8971-	writel(reg, dp->reg_base + ANALOGIX_DP_AUX_HW_RETRY_CTL);
8972+	analogix_dp_write(dp, ANALOGIX_DP_AUX_HW_RETRY_CTL, reg);
8973 
8974 	/* Receive AUX Channel DEFER commands equal to DEFFER_COUNT*64 */
8975 	reg = DEFER_CTRL_EN | DEFER_COUNT(1);
8976-	writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_DEFER_CTL);
8977+	analogix_dp_write(dp, ANALOGIX_DP_AUX_CH_DEFER_CTL, reg);
8978 
8979 	/* Enable AUX channel module */
8980-	reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
8981+	analogix_dp_enable_sw_function(dp);
8982+	reg = analogix_dp_read(dp, ANALOGIX_DP_FUNC_EN_2);
8983 	reg &= ~AUX_FUNC_EN_N;
8984-	writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
8985+	analogix_dp_write(dp, ANALOGIX_DP_FUNC_EN_2, reg);
8986 }
8987 
8988 int analogix_dp_get_plug_in_status(struct analogix_dp_device *dp)
8989@@ -507,7 +509,7 @@ int analogix_dp_get_plug_in_status(struct analogix_dp_device *dp)
8990 		if (gpiod_get_value(dp->hpd_gpiod))
8991 			return 0;
8992 	} else {
8993-		reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
8994+		reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_3);
8995 		if (reg & HPD_STATUS)
8996 			return 0;
8997 	}
8998@@ -519,145 +521,181 @@ void analogix_dp_enable_sw_function(struct analogix_dp_device *dp)
8999 {
9000 	u32 reg;
9001 
9002-	reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
9003+	reg = analogix_dp_read(dp, ANALOGIX_DP_FUNC_EN_1);
9004 	reg &= ~SW_FUNC_EN_N;
9005-	writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
9006+	analogix_dp_write(dp, ANALOGIX_DP_FUNC_EN_1, reg);
9007 }
9008 
9009-int analogix_dp_start_aux_transaction(struct analogix_dp_device *dp)
9010+static void analogix_dp_ssc_enable(struct analogix_dp_device *dp)
9011 {
9012-	int reg;
9013-	int retval = 0;
9014-	int timeout_loop = 0;
9015-
9016-	/* Enable AUX CH operation */
9017-	reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
9018-	reg |= AUX_EN;
9019-	writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
9020-
9021-	/* Is AUX CH command reply received? */
9022-	reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
9023-	while (!(reg & RPLY_RECEIV)) {
9024-		timeout_loop++;
9025-		if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
9026-			dev_err(dp->dev, "AUX CH command reply failed!\n");
9027-			return -ETIMEDOUT;
9028-		}
9029-		reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
9030-		usleep_range(10, 11);
9031-	}
9032-
9033-	/* Clear interrupt source for AUX CH command reply */
9034-	writel(RPLY_RECEIV, dp->reg_base + ANALOGIX_DP_INT_STA);
9035-
9036-	/* Clear interrupt source for AUX CH access error */
9037-	reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
9038-	if (reg & AUX_ERR) {
9039-		writel(AUX_ERR, dp->reg_base + ANALOGIX_DP_INT_STA);
9040-		return -EREMOTEIO;
9041-	}
9042-
9043-	/* Check AUX CH error access status */
9044-	reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_STA);
9045-	if ((reg & AUX_STATUS_MASK) != 0) {
9046-		dev_err(dp->dev, "AUX CH error happens: %d\n\n",
9047-			reg & AUX_STATUS_MASK);
9048-		return -EREMOTEIO;
9049-	}
9050+	u32 reg;
9051 
9052-	return retval;
9053+	/* 4500ppm */
9054+	writel(0x19, dp->reg_base + ANALOIGX_DP_SSC_REG);
9055+	/*
9056+	 * To apply updated SSC parameters into SSC operation,
9057+	 * firmware must disable and enable this bit.
9058+	 */
9059+	reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
9060+	reg |= SSC_FUNC_EN_N;
9061+	writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
9062+	reg &= ~SSC_FUNC_EN_N;
9063+	writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
9064 }
9065 
9066-int analogix_dp_write_byte_to_dpcd(struct analogix_dp_device *dp,
9067-				   unsigned int reg_addr,
9068-				   unsigned char data)
9069+static void analogix_dp_ssc_disable(struct analogix_dp_device *dp)
9070 {
9071 	u32 reg;
9072-	int i;
9073-	int retval;
9074-
9075-	for (i = 0; i < 3; i++) {
9076-		/* Clear AUX CH data buffer */
9077-		reg = BUF_CLR;
9078-		writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
9079-
9080-		/* Select DPCD device address */
9081-		reg = AUX_ADDR_7_0(reg_addr);
9082-		writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
9083-		reg = AUX_ADDR_15_8(reg_addr);
9084-		writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
9085-		reg = AUX_ADDR_19_16(reg_addr);
9086-		writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
9087-
9088-		/* Write data buffer */
9089-		reg = (unsigned int)data;
9090-		writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0);
9091 
9092-		/*
9093-		 * Set DisplayPort transaction and write 1 byte
9094-		 * If bit 3 is 1, DisplayPort transaction.
9095-		 * If Bit 3 is 0, I2C transaction.
9096-		 */
9097-		reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE;
9098-		writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
9099-
9100-		/* Start AUX transaction */
9101-		retval = analogix_dp_start_aux_transaction(dp);
9102-		if (retval == 0)
9103-			break;
9104-
9105-		dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__);
9106-	}
9107+	reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
9108+	reg |= SSC_FUNC_EN_N;
9109+	writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
9110+}
9111 
9112-	return retval;
9113+bool analogix_dp_ssc_supported(struct analogix_dp_device *dp)
9114+{
9115+	/* Check if SSC is supported by both sides */
9116+	return dp->plat_data->ssc && dp->link_train.ssc;
9117 }
9118 
9119 void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype)
9120 {
9121-	u32 reg;
9122+	u32 status;
9123+	int ret;
9124 
9125-	reg = bwtype;
9126-	if ((bwtype == DP_LINK_BW_2_7) || (bwtype == DP_LINK_BW_1_62))
9127-		writel(reg, dp->reg_base + ANALOGIX_DP_LINK_BW_SET);
9128+	analogix_dp_write(dp, ANALOGIX_DP_LINK_BW_SET, bwtype);
9129+
9130+	if (dp->phy) {
9131+		union phy_configure_opts phy_cfg = {0};
9132+
9133+		phy_cfg.dp.lanes = dp->link_train.lane_count;
9134+		phy_cfg.dp.link_rate =
9135+			drm_dp_bw_code_to_link_rate(dp->link_train.link_rate) / 100;
9136+		phy_cfg.dp.ssc = analogix_dp_ssc_supported(dp);
9137+		phy_cfg.dp.set_lanes = false;
9138+		phy_cfg.dp.set_rate = true;
9139+		phy_cfg.dp.set_voltages = false;
9140+		ret = phy_configure(dp->phy, &phy_cfg);
9141+		if (ret && ret != -EOPNOTSUPP) {
9142+			dev_err(dp->dev, "%s: phy_configure failed: %d\n",
9143+				__func__, ret);
9144+			return;
9145+		}
9146+	} else {
9147+		if (analogix_dp_ssc_supported(dp))
9148+			analogix_dp_ssc_enable(dp);
9149+		else
9150+			analogix_dp_ssc_disable(dp);
9151+	}
9152+
9153+	ret = readx_poll_timeout(analogix_dp_get_pll_lock_status, dp, status,
9154+				 status != PLL_UNLOCKED, 120,
9155+				 120 * DP_TIMEOUT_LOOP_COUNT);
9156+	if (ret) {
9157+		dev_err(dp->dev, "Wait for pll lock failed %d\n", ret);
9158+		return;
9159+	}
9160 }
9161 
9162 void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype)
9163 {
9164 	u32 reg;
9165 
9166-	reg = readl(dp->reg_base + ANALOGIX_DP_LINK_BW_SET);
9167+	reg = analogix_dp_read(dp, ANALOGIX_DP_LINK_BW_SET);
9168 	*bwtype = reg;
9169 }
9170 
9171 void analogix_dp_set_lane_count(struct analogix_dp_device *dp, u32 count)
9172 {
9173 	u32 reg;
9174+	int ret;
9175 
9176 	reg = count;
9177-	writel(reg, dp->reg_base + ANALOGIX_DP_LANE_COUNT_SET);
9178+	analogix_dp_write(dp, ANALOGIX_DP_LANE_COUNT_SET, reg);
9179+
9180+	if (dp->phy) {
9181+		union phy_configure_opts phy_cfg = {0};
9182+
9183+		phy_cfg.dp.lanes = dp->link_train.lane_count;
9184+		phy_cfg.dp.set_lanes = true;
9185+		phy_cfg.dp.set_rate = false;
9186+		phy_cfg.dp.set_voltages = false;
9187+		ret = phy_configure(dp->phy, &phy_cfg);
9188+		if (ret && ret != -EOPNOTSUPP) {
9189+			dev_err(dp->dev, "%s: phy_configure() failed: %d\n",
9190+				__func__, ret);
9191+			return;
9192+		}
9193+	}
9194 }
9195 
9196 void analogix_dp_get_lane_count(struct analogix_dp_device *dp, u32 *count)
9197 {
9198 	u32 reg;
9199 
9200-	reg = readl(dp->reg_base + ANALOGIX_DP_LANE_COUNT_SET);
9201+	reg = analogix_dp_read(dp, ANALOGIX_DP_LANE_COUNT_SET);
9202 	*count = reg;
9203 }
9204 
9205+void analogix_dp_set_lane_link_training(struct analogix_dp_device *dp)
9206+{
9207+	u8 lane;
9208+	int ret;
9209+
9210+	for (lane = 0; lane < dp->link_train.lane_count; lane++)
9211+		analogix_dp_write(dp,
9212+				  ANALOGIX_DP_LN0_LINK_TRAINING_CTL + 4 * lane,
9213+				  dp->link_train.training_lane[lane]);
9214+
9215+	if (dp->phy) {
9216+		union phy_configure_opts phy_cfg = {0};
9217+
9218+		for (lane = 0; lane < dp->link_train.lane_count; lane++) {
9219+			u8 training_lane = dp->link_train.training_lane[lane];
9220+			u8 vs, pe;
9221+
9222+			vs = (training_lane & DP_TRAIN_VOLTAGE_SWING_MASK) >>
9223+			     DP_TRAIN_VOLTAGE_SWING_SHIFT;
9224+			pe = (training_lane & DP_TRAIN_PRE_EMPHASIS_MASK) >>
9225+			     DP_TRAIN_PRE_EMPHASIS_SHIFT;
9226+			phy_cfg.dp.voltage[lane] = vs;
9227+			phy_cfg.dp.pre[lane] = pe;
9228+		}
9229+
9230+		phy_cfg.dp.lanes = dp->link_train.lane_count;
9231+		phy_cfg.dp.link_rate =
9232+			drm_dp_bw_code_to_link_rate(dp->link_train.link_rate) / 100;
9233+		phy_cfg.dp.set_lanes = false;
9234+		phy_cfg.dp.set_rate = false;
9235+		phy_cfg.dp.set_voltages = true;
9236+		ret = phy_configure(dp->phy, &phy_cfg);
9237+		if (ret && ret != -EOPNOTSUPP) {
9238+			dev_err(dp->dev, "%s: phy_configure() failed: %d\n",
9239+				__func__, ret);
9240+			return;
9241+		}
9242+	}
9243+}
9244+
9245+u32 analogix_dp_get_lane_link_training(struct analogix_dp_device *dp, u8 lane)
9246+{
9247+	return analogix_dp_read(dp,
9248+				ANALOGIX_DP_LN0_LINK_TRAINING_CTL + 4 * lane);
9249+}
9250+
9251 void analogix_dp_enable_enhanced_mode(struct analogix_dp_device *dp,
9252 				      bool enable)
9253 {
9254 	u32 reg;
9255 
9256 	if (enable) {
9257-		reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4);
9258+		reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_4);
9259 		reg |= ENHANCED;
9260-		writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4);
9261+		analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_4, reg);
9262 	} else {
9263-		reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4);
9264+		reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_4);
9265 		reg &= ~ENHANCED;
9266-		writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4);
9267+		analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_4, reg);
9268 	}
9269 }
9270 
9271@@ -669,144 +707,48 @@ void analogix_dp_set_training_pattern(struct analogix_dp_device *dp,
9272 	switch (pattern) {
9273 	case PRBS7:
9274 		reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_PRBS7;
9275-		writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
9276+		analogix_dp_write(dp, ANALOGIX_DP_TRAINING_PTN_SET, reg);
9277 		break;
9278 	case D10_2:
9279 		reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_D10_2;
9280-		writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
9281+		analogix_dp_write(dp, ANALOGIX_DP_TRAINING_PTN_SET, reg);
9282 		break;
9283 	case TRAINING_PTN1:
9284 		reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN1;
9285-		writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
9286+		analogix_dp_write(dp, ANALOGIX_DP_TRAINING_PTN_SET, reg);
9287 		break;
9288 	case TRAINING_PTN2:
9289 		reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN2;
9290-		writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
9291+		analogix_dp_write(dp, ANALOGIX_DP_TRAINING_PTN_SET, reg);
9292+		break;
9293+	case TRAINING_PTN3:
9294+		reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN3;
9295+		analogix_dp_write(dp, ANALOGIX_DP_TRAINING_PTN_SET, reg);
9296 		break;
9297 	case DP_NONE:
9298 		reg = SCRAMBLING_ENABLE |
9299 			LINK_QUAL_PATTERN_SET_DISABLE |
9300 			SW_TRAINING_PATTERN_SET_NORMAL;
9301-		writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
9302+		analogix_dp_write(dp, ANALOGIX_DP_TRAINING_PTN_SET, reg);
9303 		break;
9304 	default:
9305 		break;
9306 	}
9307 }
9308 
9309-void analogix_dp_set_lane0_pre_emphasis(struct analogix_dp_device *dp,
9310-					u32 level)
9311-{
9312-	u32 reg;
9313-
9314-	reg = readl(dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL);
9315-	reg &= ~PRE_EMPHASIS_SET_MASK;
9316-	reg |= level << PRE_EMPHASIS_SET_SHIFT;
9317-	writel(reg, dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL);
9318-}
9319-
9320-void analogix_dp_set_lane1_pre_emphasis(struct analogix_dp_device *dp,
9321-					u32 level)
9322-{
9323-	u32 reg;
9324-
9325-	reg = readl(dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL);
9326-	reg &= ~PRE_EMPHASIS_SET_MASK;
9327-	reg |= level << PRE_EMPHASIS_SET_SHIFT;
9328-	writel(reg, dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL);
9329-}
9330-
9331-void analogix_dp_set_lane2_pre_emphasis(struct analogix_dp_device *dp,
9332-					u32 level)
9333-{
9334-	u32 reg;
9335-
9336-	reg = readl(dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL);
9337-	reg &= ~PRE_EMPHASIS_SET_MASK;
9338-	reg |= level << PRE_EMPHASIS_SET_SHIFT;
9339-	writel(reg, dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL);
9340-}
9341-
9342-void analogix_dp_set_lane3_pre_emphasis(struct analogix_dp_device *dp,
9343-					u32 level)
9344-{
9345-	u32 reg;
9346-
9347-	reg = readl(dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL);
9348-	reg &= ~PRE_EMPHASIS_SET_MASK;
9349-	reg |= level << PRE_EMPHASIS_SET_SHIFT;
9350-	writel(reg, dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL);
9351-}
9352-
9353-void analogix_dp_set_lane0_link_training(struct analogix_dp_device *dp,
9354-					 u32 training_lane)
9355-{
9356-	u32 reg;
9357-
9358-	reg = training_lane;
9359-	writel(reg, dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL);
9360-}
9361-
9362-void analogix_dp_set_lane1_link_training(struct analogix_dp_device *dp,
9363-					 u32 training_lane)
9364-{
9365-	u32 reg;
9366-
9367-	reg = training_lane;
9368-	writel(reg, dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL);
9369-}
9370-
9371-void analogix_dp_set_lane2_link_training(struct analogix_dp_device *dp,
9372-					 u32 training_lane)
9373-{
9374-	u32 reg;
9375-
9376-	reg = training_lane;
9377-	writel(reg, dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL);
9378-}
9379-
9380-void analogix_dp_set_lane3_link_training(struct analogix_dp_device *dp,
9381-					 u32 training_lane)
9382-{
9383-	u32 reg;
9384-
9385-	reg = training_lane;
9386-	writel(reg, dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL);
9387-}
9388-
9389-u32 analogix_dp_get_lane0_link_training(struct analogix_dp_device *dp)
9390-{
9391-	return readl(dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL);
9392-}
9393-
9394-u32 analogix_dp_get_lane1_link_training(struct analogix_dp_device *dp)
9395-{
9396-	return readl(dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL);
9397-}
9398-
9399-u32 analogix_dp_get_lane2_link_training(struct analogix_dp_device *dp)
9400-{
9401-	return readl(dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL);
9402-}
9403-
9404-u32 analogix_dp_get_lane3_link_training(struct analogix_dp_device *dp)
9405-{
9406-	return readl(dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL);
9407-}
9408-
9409 void analogix_dp_reset_macro(struct analogix_dp_device *dp)
9410 {
9411 	u32 reg;
9412 
9413-	reg = readl(dp->reg_base + ANALOGIX_DP_PHY_TEST);
9414+	reg = analogix_dp_read(dp, ANALOGIX_DP_PHY_TEST);
9415 	reg |= MACRO_RST;
9416-	writel(reg, dp->reg_base + ANALOGIX_DP_PHY_TEST);
9417+	analogix_dp_write(dp, ANALOGIX_DP_PHY_TEST, reg);
9418 
9419 	/* 10 us is the minimum reset time. */
9420 	usleep_range(10, 20);
9421 
9422 	reg &= ~MACRO_RST;
9423-	writel(reg, dp->reg_base + ANALOGIX_DP_PHY_TEST);
9424+	analogix_dp_write(dp, ANALOGIX_DP_PHY_TEST, reg);
9425 }
9426 
9427 void analogix_dp_init_video(struct analogix_dp_device *dp)
9428@@ -814,19 +756,19 @@ void analogix_dp_init_video(struct analogix_dp_device *dp)
9429 	u32 reg;
9430 
9431 	reg = VSYNC_DET | VID_FORMAT_CHG | VID_CLK_CHG;
9432-	writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_1);
9433+	analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_STA_1, reg);
9434 
9435 	reg = 0x0;
9436-	writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_1);
9437+	analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_1, reg);
9438 
9439 	reg = CHA_CRI(4) | CHA_CTRL;
9440-	writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_2);
9441+	analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_2, reg);
9442 
9443 	reg = 0x0;
9444-	writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
9445+	analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_3, reg);
9446 
9447 	reg = VID_HRES_TH(2) | VID_VRES_TH(0);
9448-	writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_8);
9449+	analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_8, reg);
9450 }
9451 
9452 void analogix_dp_set_video_color_format(struct analogix_dp_device *dp)
9453@@ -837,36 +779,36 @@ void analogix_dp_set_video_color_format(struct analogix_dp_device *dp)
9454 	reg = (dp->video_info.dynamic_range << IN_D_RANGE_SHIFT) |
9455 		(dp->video_info.color_depth << IN_BPC_SHIFT) |
9456 		(dp->video_info.color_space << IN_COLOR_F_SHIFT);
9457-	writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_2);
9458+	analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_2, reg);
9459 
9460 	/* Set Input Color YCbCr Coefficients to ITU601 or ITU709 */
9461-	reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3);
9462+	reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_3);
9463 	reg &= ~IN_YC_COEFFI_MASK;
9464 	if (dp->video_info.ycbcr_coeff)
9465 		reg |= IN_YC_COEFFI_ITU709;
9466 	else
9467 		reg |= IN_YC_COEFFI_ITU601;
9468-	writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3);
9469+	analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_3, reg);
9470 }
9471 
9472 int analogix_dp_is_slave_video_stream_clock_on(struct analogix_dp_device *dp)
9473 {
9474 	u32 reg;
9475 
9476-	reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_1);
9477-	writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_1);
9478+	reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_1);
9479+	analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_1, reg);
9480 
9481-	reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_1);
9482+	reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_1);
9483 
9484 	if (!(reg & DET_STA)) {
9485 		dev_dbg(dp->dev, "Input stream clock not detected.\n");
9486 		return -EINVAL;
9487 	}
9488 
9489-	reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_2);
9490-	writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_2);
9491+	reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_2);
9492+	analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_2, reg);
9493 
9494-	reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_2);
9495+	reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_2);
9496 	dev_dbg(dp->dev, "wait SYS_CTL_2.\n");
9497 
9498 	if (reg & CHA_STA) {
9499@@ -884,30 +826,30 @@ void analogix_dp_set_video_cr_mn(struct analogix_dp_device *dp,
9500 	u32 reg;
9501 
9502 	if (type == REGISTER_M) {
9503-		reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4);
9504+		reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_4);
9505 		reg |= FIX_M_VID;
9506-		writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4);
9507+		analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_4, reg);
9508 		reg = m_value & 0xff;
9509-		writel(reg, dp->reg_base + ANALOGIX_DP_M_VID_0);
9510+		analogix_dp_write(dp, ANALOGIX_DP_M_VID_0, reg);
9511 		reg = (m_value >> 8) & 0xff;
9512-		writel(reg, dp->reg_base + ANALOGIX_DP_M_VID_1);
9513+		analogix_dp_write(dp, ANALOGIX_DP_M_VID_1, reg);
9514 		reg = (m_value >> 16) & 0xff;
9515-		writel(reg, dp->reg_base + ANALOGIX_DP_M_VID_2);
9516+		analogix_dp_write(dp, ANALOGIX_DP_M_VID_2, reg);
9517 
9518 		reg = n_value & 0xff;
9519-		writel(reg, dp->reg_base + ANALOGIX_DP_N_VID_0);
9520+		analogix_dp_write(dp, ANALOGIX_DP_N_VID_0, reg);
9521 		reg = (n_value >> 8) & 0xff;
9522-		writel(reg, dp->reg_base + ANALOGIX_DP_N_VID_1);
9523+		analogix_dp_write(dp, ANALOGIX_DP_N_VID_1, reg);
9524 		reg = (n_value >> 16) & 0xff;
9525-		writel(reg, dp->reg_base + ANALOGIX_DP_N_VID_2);
9526+		analogix_dp_write(dp, ANALOGIX_DP_N_VID_2, reg);
9527 	} else  {
9528-		reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4);
9529+		reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_4);
9530 		reg &= ~FIX_M_VID;
9531-		writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4);
9532+		analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_4, reg);
9533 
9534-		writel(0x00, dp->reg_base + ANALOGIX_DP_N_VID_0);
9535-		writel(0x80, dp->reg_base + ANALOGIX_DP_N_VID_1);
9536-		writel(0x00, dp->reg_base + ANALOGIX_DP_N_VID_2);
9537+		analogix_dp_write(dp, ANALOGIX_DP_N_VID_0, 0x00);
9538+		analogix_dp_write(dp, ANALOGIX_DP_N_VID_1, 0x80);
9539+		analogix_dp_write(dp, ANALOGIX_DP_N_VID_2, 0x00);
9540 	}
9541 }
9542 
9543@@ -916,13 +858,13 @@ void analogix_dp_set_video_timing_mode(struct analogix_dp_device *dp, u32 type)
9544 	u32 reg;
9545 
9546 	if (type == VIDEO_TIMING_FROM_CAPTURE) {
9547-		reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
9548+		reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_10);
9549 		reg &= ~FORMAT_SEL;
9550-		writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
9551+		analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_10, reg);
9552 	} else {
9553-		reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
9554+		reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_10);
9555 		reg |= FORMAT_SEL;
9556-		writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
9557+		analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_10, reg);
9558 	}
9559 }
9560 
9561@@ -931,15 +873,15 @@ void analogix_dp_enable_video_master(struct analogix_dp_device *dp, bool enable)
9562 	u32 reg;
9563 
9564 	if (enable) {
9565-		reg = readl(dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL);
9566+		reg = analogix_dp_read(dp, ANALOGIX_DP_SOC_GENERAL_CTL);
9567 		reg &= ~VIDEO_MODE_MASK;
9568 		reg |= VIDEO_MASTER_MODE_EN | VIDEO_MODE_MASTER_MODE;
9569-		writel(reg, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL);
9570+		analogix_dp_write(dp, ANALOGIX_DP_SOC_GENERAL_CTL, reg);
9571 	} else {
9572-		reg = readl(dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL);
9573+		reg = analogix_dp_read(dp, ANALOGIX_DP_SOC_GENERAL_CTL);
9574 		reg &= ~VIDEO_MODE_MASK;
9575 		reg |= VIDEO_MODE_SLAVE_MODE;
9576-		writel(reg, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL);
9577+		analogix_dp_write(dp, ANALOGIX_DP_SOC_GENERAL_CTL, reg);
9578 	}
9579 }
9580 
9581@@ -947,19 +889,19 @@ void analogix_dp_start_video(struct analogix_dp_device *dp)
9582 {
9583 	u32 reg;
9584 
9585-	reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1);
9586+	reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_1);
9587 	reg |= VIDEO_EN;
9588-	writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1);
9589+	analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_1, reg);
9590 }
9591 
9592 int analogix_dp_is_video_stream_on(struct analogix_dp_device *dp)
9593 {
9594 	u32 reg;
9595 
9596-	reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
9597-	writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
9598+	reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_3);
9599+	analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_3, reg);
9600 
9601-	reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
9602+	reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_3);
9603 	if (!(reg & STRM_VALID)) {
9604 		dev_dbg(dp->dev, "Input video stream is not detected.\n");
9605 		return -EINVAL;
9606@@ -972,55 +914,55 @@ void analogix_dp_config_video_slave_mode(struct analogix_dp_device *dp)
9607 {
9608 	u32 reg;
9609 
9610-	reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
9611+	reg = analogix_dp_read(dp, ANALOGIX_DP_FUNC_EN_1);
9612 	if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) {
9613 		reg &= ~(RK_VID_CAP_FUNC_EN_N | RK_VID_FIFO_FUNC_EN_N);
9614 	} else {
9615 		reg &= ~(MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N);
9616 		reg |= MASTER_VID_FUNC_EN_N;
9617 	}
9618-	writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
9619+	analogix_dp_write(dp, ANALOGIX_DP_FUNC_EN_1, reg);
9620 
9621-	reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
9622+	reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_10);
9623 	reg &= ~INTERACE_SCAN_CFG;
9624 	reg |= (dp->video_info.interlaced << 2);
9625-	writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
9626+	analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_10, reg);
9627 
9628-	reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
9629+	reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_10);
9630 	reg &= ~VSYNC_POLARITY_CFG;
9631 	reg |= (dp->video_info.v_sync_polarity << 1);
9632-	writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
9633+	analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_10, reg);
9634 
9635-	reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
9636+	reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_10);
9637 	reg &= ~HSYNC_POLARITY_CFG;
9638 	reg |= (dp->video_info.h_sync_polarity << 0);
9639-	writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
9640+	analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_10, reg);
9641 
9642 	reg = AUDIO_MODE_SPDIF_MODE | VIDEO_MODE_SLAVE_MODE;
9643-	writel(reg, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL);
9644+	analogix_dp_write(dp, ANALOGIX_DP_SOC_GENERAL_CTL, reg);
9645 }
9646 
9647 void analogix_dp_enable_scrambling(struct analogix_dp_device *dp)
9648 {
9649 	u32 reg;
9650 
9651-	reg = readl(dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
9652+	reg = analogix_dp_read(dp, ANALOGIX_DP_TRAINING_PTN_SET);
9653 	reg &= ~SCRAMBLING_DISABLE;
9654-	writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
9655+	analogix_dp_write(dp, ANALOGIX_DP_TRAINING_PTN_SET, reg);
9656 }
9657 
9658 void analogix_dp_disable_scrambling(struct analogix_dp_device *dp)
9659 {
9660 	u32 reg;
9661 
9662-	reg = readl(dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
9663+	reg = analogix_dp_read(dp, ANALOGIX_DP_TRAINING_PTN_SET);
9664 	reg |= SCRAMBLING_DISABLE;
9665-	writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
9666+	analogix_dp_write(dp, ANALOGIX_DP_TRAINING_PTN_SET, reg);
9667 }
9668 
9669 void analogix_dp_enable_psr_crc(struct analogix_dp_device *dp)
9670 {
9671-	writel(PSR_VID_CRC_ENABLE, dp->reg_base + ANALOGIX_DP_CRC_CON);
9672+	analogix_dp_write(dp, ANALOGIX_DP_CRC_CON, PSR_VID_CRC_ENABLE);
9673 }
9674 
9675 static ssize_t analogix_dp_get_psr_status(struct analogix_dp_device *dp)
9676@@ -1044,63 +986,53 @@ int analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
9677 	ssize_t psr_status;
9678 
9679 	/* don't send info frame */
9680-	val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
9681+	val = analogix_dp_read(dp, ANALOGIX_DP_PKT_SEND_CTL);
9682 	val &= ~IF_EN;
9683-	writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
9684+	analogix_dp_write(dp, ANALOGIX_DP_PKT_SEND_CTL, val);
9685 
9686 	/* configure single frame update mode */
9687-	writel(PSR_FRAME_UP_TYPE_BURST | PSR_CRC_SEL_HARDWARE,
9688-	       dp->reg_base + ANALOGIX_DP_PSR_FRAME_UPDATE_CTRL);
9689+	analogix_dp_write(dp, ANALOGIX_DP_PSR_FRAME_UPDATE_CTRL,
9690+			  PSR_FRAME_UP_TYPE_BURST | PSR_CRC_SEL_HARDWARE);
9691 
9692 	/* configure VSC HB0~HB3 */
9693-	writel(vsc->sdp_header.HB0, dp->reg_base + ANALOGIX_DP_SPD_HB0);
9694-	writel(vsc->sdp_header.HB1, dp->reg_base + ANALOGIX_DP_SPD_HB1);
9695-	writel(vsc->sdp_header.HB2, dp->reg_base + ANALOGIX_DP_SPD_HB2);
9696-	writel(vsc->sdp_header.HB3, dp->reg_base + ANALOGIX_DP_SPD_HB3);
9697+	analogix_dp_write(dp, ANALOGIX_DP_SPD_HB0, vsc->sdp_header.HB0);
9698+	analogix_dp_write(dp, ANALOGIX_DP_SPD_HB1, vsc->sdp_header.HB1);
9699+	analogix_dp_write(dp, ANALOGIX_DP_SPD_HB2, vsc->sdp_header.HB2);
9700+	analogix_dp_write(dp, ANALOGIX_DP_SPD_HB3, vsc->sdp_header.HB3);
9701 
9702 	/* configure reused VSC PB0~PB3, magic number from vendor */
9703-	writel(0x00, dp->reg_base + ANALOGIX_DP_SPD_PB0);
9704-	writel(0x16, dp->reg_base + ANALOGIX_DP_SPD_PB1);
9705-	writel(0xCE, dp->reg_base + ANALOGIX_DP_SPD_PB2);
9706-	writel(0x5D, dp->reg_base + ANALOGIX_DP_SPD_PB3);
9707+	analogix_dp_write(dp, ANALOGIX_DP_SPD_PB0, 0x00);
9708+	analogix_dp_write(dp, ANALOGIX_DP_SPD_PB1, 0x16);
9709+	analogix_dp_write(dp, ANALOGIX_DP_SPD_PB2, 0xCE);
9710+	analogix_dp_write(dp, ANALOGIX_DP_SPD_PB3, 0x5D);
9711 
9712 	/* configure DB0 / DB1 values */
9713-	writel(vsc->db[0], dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB0);
9714-	writel(vsc->db[1], dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB1);
9715+	analogix_dp_write(dp, ANALOGIX_DP_VSC_SHADOW_DB0, vsc->db[0]);
9716+	analogix_dp_write(dp, ANALOGIX_DP_VSC_SHADOW_DB1, vsc->db[1]);
9717 
9718 	/* set reuse spd inforframe */
9719-	val = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3);
9720+	val = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_3);
9721 	val |= REUSE_SPD_EN;
9722-	writel(val, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3);
9723+	analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_3, val);
9724 
9725 	/* mark info frame update */
9726-	val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
9727+	val = analogix_dp_read(dp, ANALOGIX_DP_PKT_SEND_CTL);
9728 	val = (val | IF_UP) & ~IF_EN;
9729-	writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
9730+	analogix_dp_write(dp, ANALOGIX_DP_PKT_SEND_CTL, val);
9731 
9732 	/* send info frame */
9733-	val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
9734+	val = analogix_dp_read(dp, ANALOGIX_DP_PKT_SEND_CTL);
9735 	val |= IF_EN;
9736-	writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
9737+	analogix_dp_write(dp, ANALOGIX_DP_PKT_SEND_CTL, val);
9738 
9739 	if (!blocking)
9740 		return 0;
9741 
9742-	/*
9743-	 * db[1]!=0: entering PSR, wait for fully active remote frame buffer.
9744-	 * db[1]==0: exiting PSR, wait for either
9745-	 *  (a) ACTIVE_RESYNC - the sink "must display the
9746-	 *      incoming active frames from the Source device with no visible
9747-	 *      glitches and/or artifacts", even though timings may still be
9748-	 *      re-synchronizing; or
9749-	 *  (b) INACTIVE - the transition is fully complete.
9750-	 */
9751 	ret = readx_poll_timeout(analogix_dp_get_psr_status, dp, psr_status,
9752 		psr_status >= 0 &&
9753 		((vsc->db[1] && psr_status == DP_PSR_SINK_ACTIVE_RFB) ||
9754-		(!vsc->db[1] && (psr_status == DP_PSR_SINK_ACTIVE_RESYNC ||
9755-				 psr_status == DP_PSR_SINK_INACTIVE))),
9756-		1500, DP_TIMEOUT_PSR_LOOP_MS * 1000);
9757+		(!vsc->db[1] && psr_status == DP_PSR_SINK_INACTIVE)), 1500,
9758+		DP_TIMEOUT_PSR_LOOP_MS * 1000);
9759 	if (ret) {
9760 		dev_warn(dp->dev, "Failed to apply PSR %d\n", ret);
9761 		return ret;
9762@@ -1108,11 +1040,43 @@ int analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
9763 	return 0;
9764 }
9765 
9766+void analogix_dp_phy_power_on(struct analogix_dp_device *dp)
9767+{
9768+	if (dp->phy_enabled)
9769+		return;
9770+
9771+	phy_set_mode(dp->phy, PHY_MODE_DP);
9772+	phy_power_on(dp->phy);
9773+
9774+	dp->phy_enabled = true;
9775+}
9776+
9777+void analogix_dp_phy_power_off(struct analogix_dp_device *dp)
9778+{
9779+	if (!dp->phy_enabled)
9780+		return;
9781+
9782+	phy_power_off(dp->phy);
9783+
9784+	dp->phy_enabled = false;
9785+}
9786+
9787+enum {
9788+	AUX_STATUS_OK,
9789+	AUX_STATUS_NACK_ERROR,
9790+	AUX_STATUS_TIMEOUT_ERROR,
9791+	AUX_STATUS_UNKNOWN_ERROR,
9792+	AUX_STATUS_MUCH_DEFER_ERROR,
9793+	AUX_STATUS_TX_SHORT_ERROR,
9794+	AUX_STATUS_RX_SHORT_ERROR,
9795+	AUX_STATUS_NACK_WITHOUT_M_ERROR,
9796+	AUX_STATUS_I2C_NACK_ERROR
9797+};
9798+
9799 ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
9800 			     struct drm_dp_aux_msg *msg)
9801 {
9802 	u32 reg;
9803-	u32 status_reg;
9804 	u8 *buffer = msg->buffer;
9805 	unsigned int i;
9806 	int num_transferred = 0;
9807@@ -1122,9 +1086,15 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
9808 	if (WARN_ON(msg->size > 16))
9809 		return -E2BIG;
9810 
9811+	reg = analogix_dp_read(dp, ANALOGIX_DP_FUNC_EN_2);
9812+	if (reg & AUX_FUNC_EN_N) {
9813+		analogix_dp_phy_power_on(dp);
9814+		analogix_dp_init_aux(dp);
9815+	}
9816+
9817 	/* Clear AUX CH data buffer */
9818 	reg = BUF_CLR;
9819-	writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
9820+	analogix_dp_write(dp, ANALOGIX_DP_BUFFER_DATA_CTL, reg);
9821 
9822 	switch (msg->request & ~DP_AUX_I2C_MOT) {
9823 	case DP_AUX_I2C_WRITE:
9824@@ -1152,21 +1122,21 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
9825 	}
9826 
9827 	reg |= AUX_LENGTH(msg->size);
9828-	writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
9829+	analogix_dp_write(dp, ANALOGIX_DP_AUX_CH_CTL_1, reg);
9830 
9831 	/* Select DPCD device address */
9832 	reg = AUX_ADDR_7_0(msg->address);
9833-	writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
9834+	analogix_dp_write(dp, ANALOGIX_DP_AUX_ADDR_7_0, reg);
9835 	reg = AUX_ADDR_15_8(msg->address);
9836-	writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
9837+	analogix_dp_write(dp, ANALOGIX_DP_AUX_ADDR_15_8, reg);
9838 	reg = AUX_ADDR_19_16(msg->address);
9839-	writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
9840+	analogix_dp_write(dp, ANALOGIX_DP_AUX_ADDR_19_16, reg);
9841 
9842 	if (!(msg->request & DP_AUX_I2C_READ)) {
9843 		for (i = 0; i < msg->size; i++) {
9844 			reg = buffer[i];
9845-			writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0 +
9846-			       4 * i);
9847+			analogix_dp_write(dp, ANALOGIX_DP_BUF_DATA_0 + 4 * i,
9848+					  reg);
9849 			num_transferred++;
9850 		}
9851 	}
9852@@ -1178,7 +1148,7 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
9853 	if (msg->size < 1)
9854 		reg |= ADDR_ONLY;
9855 
9856-	writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
9857+	analogix_dp_write(dp, ANALOGIX_DP_AUX_CH_CTL_2, reg);
9858 
9859 	ret = readx_poll_timeout(readl, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2,
9860 				 reg, !(reg & AUX_EN), 25, 500 * 1000);
9861@@ -1197,30 +1167,31 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
9862 	}
9863 
9864 	/* Clear interrupt source for AUX CH command reply */
9865-	writel(RPLY_RECEIV, dp->reg_base + ANALOGIX_DP_INT_STA);
9866-
9867-	/* Clear interrupt source for AUX CH access error */
9868-	reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
9869-	status_reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_STA);
9870-	if ((reg & AUX_ERR) || (status_reg & AUX_STATUS_MASK)) {
9871-		writel(AUX_ERR, dp->reg_base + ANALOGIX_DP_INT_STA);
9872+	analogix_dp_write(dp, ANALOGIX_DP_INT_STA, RPLY_RECEIV);
9873 
9874-		dev_warn(dp->dev, "AUX CH error happened: %#x (%d)\n",
9875-			 status_reg & AUX_STATUS_MASK, !!(reg & AUX_ERR));
9876-		goto aux_error;
9877-	}
9878+	reg = analogix_dp_read(dp, ANALOGIX_DP_AUX_CH_STA);
9879+	if ((reg & AUX_STATUS_MASK) == AUX_STATUS_TIMEOUT_ERROR)
9880+		return -ETIMEDOUT;
9881 
9882 	if (msg->request & DP_AUX_I2C_READ) {
9883+		size_t buf_data_count;
9884+
9885+		reg = analogix_dp_read(dp, ANALOGIX_DP_BUFFER_DATA_CTL);
9886+		buf_data_count = BUF_DATA_COUNT(reg);
9887+
9888+		if (buf_data_count != msg->size)
9889+			return -EBUSY;
9890+
9891 		for (i = 0; i < msg->size; i++) {
9892-			reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0 +
9893-				    4 * i);
9894+			reg = analogix_dp_read(dp, ANALOGIX_DP_BUF_DATA_0 +
9895+					       4 * i);
9896 			buffer[i] = (unsigned char)reg;
9897 			num_transferred++;
9898 		}
9899 	}
9900 
9901 	/* Check if Rx sends defer */
9902-	reg = readl(dp->reg_base + ANALOGIX_DP_AUX_RX_COMM);
9903+	reg = analogix_dp_read(dp, ANALOGIX_DP_AUX_RX_COMM);
9904 	if (reg == AUX_RX_COMM_AUX_DEFER)
9905 		msg->reply = DP_AUX_NATIVE_REPLY_DEFER;
9906 	else if (reg == AUX_RX_COMM_I2C_DEFER)
9907@@ -1232,7 +1203,7 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
9908 		 (msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_READ)
9909 		msg->reply = DP_AUX_NATIVE_REPLY_ACK;
9910 
9911-	return num_transferred > 0 ? num_transferred : -EBUSY;
9912+	return (num_transferred == msg->size) ? num_transferred : -EBUSY;
9913 
9914 aux_error:
9915 	/* if aux err happen, reset aux */
9916@@ -1240,3 +1211,119 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
9917 
9918 	return -EREMOTEIO;
9919 }
9920+
9921+void analogix_dp_set_video_format(struct analogix_dp_device *dp)
9922+{
9923+	struct video_info *video = &dp->video_info;
9924+	const struct drm_display_mode *mode = &video->mode;
9925+	unsigned int hsw, hfp, hbp, vsw, vfp, vbp;
9926+
9927+	hsw = mode->hsync_end - mode->hsync_start;
9928+	hfp = mode->hsync_start - mode->hdisplay;
9929+	hbp = mode->htotal - mode->hsync_end;
9930+	vsw = mode->vsync_end - mode->vsync_start;
9931+	vfp = mode->vsync_start - mode->vdisplay;
9932+	vbp = mode->vtotal - mode->vsync_end;
9933+
9934+	/* Set Video Format Parameters */
9935+	analogix_dp_write(dp, ANALOGIX_DP_TOTAL_LINE_CFG_L,
9936+			  TOTAL_LINE_CFG_L(mode->vtotal));
9937+	analogix_dp_write(dp, ANALOGIX_DP_TOTAL_LINE_CFG_H,
9938+			  TOTAL_LINE_CFG_H(mode->vtotal >> 8));
9939+	analogix_dp_write(dp, ANALOGIX_DP_ACTIVE_LINE_CFG_L,
9940+			  ACTIVE_LINE_CFG_L(mode->vdisplay));
9941+	analogix_dp_write(dp, ANALOGIX_DP_ACTIVE_LINE_CFG_H,
9942+			  ACTIVE_LINE_CFG_H(mode->vdisplay >> 8));
9943+	analogix_dp_write(dp, ANALOGIX_DP_V_F_PORCH_CFG,
9944+			  V_F_PORCH_CFG(vfp));
9945+	analogix_dp_write(dp, ANALOGIX_DP_V_SYNC_WIDTH_CFG,
9946+			  V_SYNC_WIDTH_CFG(vsw));
9947+	analogix_dp_write(dp, ANALOGIX_DP_V_B_PORCH_CFG,
9948+			  V_B_PORCH_CFG(vbp));
9949+	analogix_dp_write(dp, ANALOGIX_DP_TOTAL_PIXEL_CFG_L,
9950+			  TOTAL_PIXEL_CFG_L(mode->htotal));
9951+	analogix_dp_write(dp, ANALOGIX_DP_TOTAL_PIXEL_CFG_H,
9952+			  TOTAL_PIXEL_CFG_H(mode->htotal >> 8));
9953+	analogix_dp_write(dp, ANALOGIX_DP_ACTIVE_PIXEL_CFG_L,
9954+			  ACTIVE_PIXEL_CFG_L(mode->hdisplay));
9955+	analogix_dp_write(dp, ANALOGIX_DP_ACTIVE_PIXEL_CFG_H,
9956+			  ACTIVE_PIXEL_CFG_H(mode->hdisplay >> 8));
9957+	analogix_dp_write(dp, ANALOGIX_DP_H_F_PORCH_CFG_L,
9958+			  H_F_PORCH_CFG_L(hfp));
9959+	analogix_dp_write(dp, ANALOGIX_DP_H_F_PORCH_CFG_H,
9960+			  H_F_PORCH_CFG_H(hfp >> 8));
9961+	analogix_dp_write(dp, ANALOGIX_DP_H_SYNC_CFG_L,
9962+			  H_SYNC_CFG_L(hsw));
9963+	analogix_dp_write(dp, ANALOGIX_DP_H_SYNC_CFG_H,
9964+			  H_SYNC_CFG_H(hsw >> 8));
9965+	analogix_dp_write(dp, ANALOGIX_DP_H_B_PORCH_CFG_L,
9966+			  H_B_PORCH_CFG_L(hbp));
9967+	analogix_dp_write(dp, ANALOGIX_DP_H_B_PORCH_CFG_H,
9968+			  H_B_PORCH_CFG_H(hbp >> 8));
9969+}
9970+
9971+void analogix_dp_video_bist_enable(struct analogix_dp_device *dp)
9972+{
9973+	u32 reg;
9974+
9975+	/* Enable Video BIST */
9976+	analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_4, BIST_EN);
9977+
9978+	/*
9979+	 * Note that if BIST_EN is set to 1, F_SEL must be cleared to 0
9980+	 * although video format information comes from registers set by user.
9981+	 */
9982+	reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_10);
9983+	reg &= ~FORMAT_SEL;
9984+	analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_10, reg);
9985+}
9986+
9987+void analogix_dp_audio_config_i2s(struct analogix_dp_device *dp)
9988+{
9989+	u32 reg;
9990+
9991+	reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_4);
9992+	reg &= ~FIX_M_AUD;
9993+	analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_4, reg);
9994+
9995+	reg = analogix_dp_read(dp, ANALOGIX_DP_I2S_CTRL);
9996+	reg |= I2S_EN;
9997+	analogix_dp_write(dp, ANALOGIX_DP_I2S_CTRL, reg);
9998+}
9999+
10000+void analogix_dp_audio_config_spdif(struct analogix_dp_device *dp)
10001+{
10002+	u32 reg;
10003+
10004+	reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_4);
10005+	reg &= ~FIX_M_AUD;
10006+	analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_4, reg);
10007+
10008+	reg = analogix_dp_read(dp, ANALOGIX_DP_SPDIF_AUDIO_CTL_0);
10009+	reg |= AUD_SPDIF_EN;
10010+	analogix_dp_write(dp, ANALOGIX_DP_SPDIF_AUDIO_CTL_0, reg);
10011+}
10012+
10013+void analogix_dp_audio_enable(struct analogix_dp_device *dp)
10014+{
10015+	u32 reg;
10016+
10017+	reg = analogix_dp_read(dp, ANALOGIX_DP_FUNC_EN_1);
10018+	reg &= ~(AUD_FIFO_FUNC_EN_N | AUD_FUNC_EN_N);
10019+	analogix_dp_write(dp, ANALOGIX_DP_FUNC_EN_1, reg);
10020+
10021+	reg = analogix_dp_read(dp, ANALOGIX_DP_AUD_CTL);
10022+	reg |= MISC_CTRL_RESET | DP_AUDIO_EN;
10023+	analogix_dp_write(dp, ANALOGIX_DP_AUD_CTL, reg);
10024+}
10025+
10026+void analogix_dp_audio_disable(struct analogix_dp_device *dp)
10027+{
10028+	u32 reg;
10029+
10030+	analogix_dp_write(dp, ANALOGIX_DP_AUD_CTL, 0);
10031+
10032+	reg = analogix_dp_read(dp, ANALOGIX_DP_FUNC_EN_1);
10033+	reg |= AUD_FIFO_FUNC_EN_N | AUD_FUNC_EN_N;
10034+	analogix_dp_write(dp, ANALOGIX_DP_FUNC_EN_1, reg);
10035+}
10036diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
10037index e284ee8da..df88f1ad0 100644
10038--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
10039+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
10040@@ -15,9 +15,27 @@
10041 #define ANALOGIX_DP_VIDEO_CTL_1			0x20
10042 #define ANALOGIX_DP_VIDEO_CTL_2			0x24
10043 #define ANALOGIX_DP_VIDEO_CTL_3			0x28
10044+#define ANALOGIX_DP_VIDEO_CTL_4			0x2C
10045 
10046 #define ANALOGIX_DP_VIDEO_CTL_8			0x3C
10047 #define ANALOGIX_DP_VIDEO_CTL_10		0x44
10048+#define ANALOGIX_DP_TOTAL_LINE_CFG_L		0x48
10049+#define ANALOGIX_DP_TOTAL_LINE_CFG_H		0x4C
10050+#define ANALOGIX_DP_ACTIVE_LINE_CFG_L		0x50
10051+#define ANALOGIX_DP_ACTIVE_LINE_CFG_H		0x54
10052+#define ANALOGIX_DP_V_F_PORCH_CFG		0x58
10053+#define ANALOGIX_DP_V_SYNC_WIDTH_CFG		0x5C
10054+#define ANALOGIX_DP_V_B_PORCH_CFG		0x60
10055+#define ANALOGIX_DP_TOTAL_PIXEL_CFG_L		0x64
10056+#define ANALOGIX_DP_TOTAL_PIXEL_CFG_H		0x68
10057+#define ANALOGIX_DP_ACTIVE_PIXEL_CFG_L		0x6C
10058+#define ANALOGIX_DP_ACTIVE_PIXEL_CFG_H		0x70
10059+#define ANALOGIX_DP_H_F_PORCH_CFG_L		0x74
10060+#define ANALOGIX_DP_H_F_PORCH_CFG_H		0x78
10061+#define ANALOGIX_DP_H_SYNC_CFG_L		0x7C
10062+#define ANALOGIX_DP_H_SYNC_CFG_H		0x80
10063+#define ANALOGIX_DP_H_B_PORCH_CFG_L		0x84
10064+#define ANALOGIX_DP_H_B_PORCH_CFG_H		0x88
10065 
10066 #define ANALOGIX_DP_SPDIF_AUDIO_CTL_0		0xD8
10067 
10068@@ -27,6 +45,8 @@
10069 #define ANALOGIX_DP_PLL_REG_4			0x9ec
10070 #define ANALOGIX_DP_PLL_REG_5			0xa00
10071 
10072+#define ANALOIGX_DP_SSC_REG			0x104
10073+#define ANALOGIX_DP_BIAS			0x124
10074 #define ANALOGIX_DP_PD				0x12c
10075 
10076 #define ANALOGIX_DP_IF_TYPE			0x244
10077@@ -70,7 +90,7 @@
10078 #define ANALOGIX_DP_SYS_CTL_2			0x604
10079 #define ANALOGIX_DP_SYS_CTL_3			0x608
10080 #define ANALOGIX_DP_SYS_CTL_4			0x60C
10081-
10082+#define ANALOGIX_DP_AUD_CTL			0x618
10083 #define ANALOGIX_DP_PKT_SEND_CTL		0x640
10084 #define ANALOGIX_DP_HDCP_CTL			0x648
10085 
10086@@ -116,8 +136,9 @@
10087 #define ANALOGIX_DP_BUF_DATA_0			0x7C0
10088 
10089 #define ANALOGIX_DP_SOC_GENERAL_CTL		0x800
10090-
10091+#define ANALOGIX_DP_AUD_CHANNEL_CTL		0x834
10092 #define ANALOGIX_DP_CRC_CON			0x890
10093+#define ANALOGIX_DP_I2S_CTRL			0x9C8
10094 
10095 /* ANALOGIX_DP_TX_SW_RESET */
10096 #define RESET_DP_TX				(0x1 << 0)
10097@@ -171,6 +192,11 @@
10098 #define VID_CHK_UPDATE_TYPE_0			(0x0 << 4)
10099 #define REUSE_SPD_EN				(0x1 << 3)
10100 
10101+/* ANALOGIX_DP_VIDEO_CTL_4 */
10102+#define BIST_EN					(0x1 << 3)
10103+#define BIST_WIDTH(x)				(((x) & 0x1) << 2)
10104+#define BIST_TYPE(x)				(((x) & 0x3) << 0)
10105+
10106 /* ANALOGIX_DP_VIDEO_CTL_8 */
10107 #define VID_HRES_TH(x)				(((x) & 0xf) << 4)
10108 #define VID_VRES_TH(x)				(((x) & 0xf) << 0)
10109@@ -181,6 +207,60 @@
10110 #define VSYNC_POLARITY_CFG			(0x1 << 1)
10111 #define HSYNC_POLARITY_CFG			(0x1 << 0)
10112 
10113+/* ANALOGIX_DP_TOTAL_LINE_CFG_L */
10114+#define TOTAL_LINE_CFG_L(x)			(((x) & 0xff) << 0)
10115+
10116+/* ANALOGIX_DP_TOTAL_LINE_CFG_H */
10117+#define TOTAL_LINE_CFG_H(x)			(((x) & 0xf) << 0)
10118+
10119+/* ANALOGIX_DP_ACTIVE_LINE_CFG_L */
10120+#define ACTIVE_LINE_CFG_L(x)			(((x) & 0xff) << 0)
10121+
10122+/* ANALOGIX_DP_ACTIVE_LINE_CFG_H */
10123+#define ACTIVE_LINE_CFG_H(x)			(((x) & 0xf) << 0)
10124+
10125+/* ANALOGIX_DP_V_F_PORCH_CFG */
10126+#define V_F_PORCH_CFG(x)			(((x) & 0xff) << 0)
10127+
10128+/* ANALOGIX_DP_V_SYNC_WIDTH_CFG */
10129+#define V_SYNC_WIDTH_CFG(x)			(((x) & 0xff) << 0)
10130+
10131+/* ANALOGIX_DP_V_B_PORCH_CFG */
10132+#define V_B_PORCH_CFG(x)			(((x) & 0xff) << 0)
10133+
10134+/* ANALOGIX_DP_TOTAL_PIXEL_CFG_L */
10135+#define TOTAL_PIXEL_CFG_L(x)			(((x) & 0xff) << 0)
10136+
10137+/* ANALOGIX_DP_TOTAL_PIXEL_CFG_H */
10138+#define TOTAL_PIXEL_CFG_H(x)			(((x) & 0x3f) << 0)
10139+
10140+/* ANALOGIX_DP_ACTIVE_PIXEL_CFG_L */
10141+#define ACTIVE_PIXEL_CFG_L(x)			(((x) & 0xff) << 0)
10142+
10143+/* ANALOGIX_DP_ACTIVE_PIXEL_CFG_H */
10144+#define ACTIVE_PIXEL_CFG_H(x)			(((x) & 0x3f) << 0)
10145+
10146+/* ANALOGIX_DP_H_F_PORCH_CFG_L */
10147+#define H_F_PORCH_CFG_L(x)			(((x) & 0xff) << 0)
10148+
10149+/* ANALOGIX_DP_H_F_PORCH_CFG_H */
10150+#define H_F_PORCH_CFG_H(x)			(((x) & 0xf) << 0)
10151+
10152+/* ANALOGIX_DP_H_SYNC_CFG_L */
10153+#define H_SYNC_CFG_L(x)				(((x) & 0xff) << 0)
10154+
10155+/* ANALOGIX_DP_H_SYNC_CFG_H */
10156+#define H_SYNC_CFG_H(x)				(((x) & 0xf) << 0)
10157+
10158+/* ANALOGIX_DP_H_B_PORCH_CFG_L */
10159+#define H_B_PORCH_CFG_L(x)			(((x) & 0xff) << 0)
10160+
10161+/* ANALOGIX_DP_H_B_PORCH_CFG_H */
10162+#define H_B_PORCH_CFG_H(x)			(((x) & 0xf) << 0)
10163+
10164+/* ANALOGIX_DP_SPDIF_AUDIO_CTL_0 */
10165+#define AUD_SPDIF_EN				(0x1 << 7)
10166+
10167 /* ANALOGIX_DP_PLL_REG_1 */
10168 #define REF_CLK_24M				(0x1 << 0)
10169 #define REF_CLK_27M				(0x0 << 0)
10170@@ -309,6 +389,10 @@
10171 #define FIX_M_VID				(0x1 << 2)
10172 #define M_VID_UPDATE_CTRL			(0x3 << 0)
10173 
10174+/* ANALOGIX_DP_AUD_CTL */
10175+#define MISC_CTRL_RESET				(0x1 << 4)
10176+#define DP_AUDIO_EN				(0x1 << 0)
10177+
10178 /* ANALOGIX_DP_TRAINING_PTN_SET */
10179 #define SCRAMBLER_TYPE				(0x1 << 9)
10180 #define HW_LINK_TRAINING_PATTERN		(0x1 << 8)
10181@@ -319,6 +403,7 @@
10182 #define LINK_QUAL_PATTERN_SET_D10_2		(0x1 << 2)
10183 #define LINK_QUAL_PATTERN_SET_DISABLE		(0x0 << 2)
10184 #define SW_TRAINING_PATTERN_SET_MASK		(0x3 << 0)
10185+#define SW_TRAINING_PATTERN_SET_PTN3		(0x3 << 0)
10186 #define SW_TRAINING_PATTERN_SET_PTN2		(0x2 << 0)
10187 #define SW_TRAINING_PATTERN_SET_PTN1		(0x1 << 0)
10188 #define SW_TRAINING_PATTERN_SET_NORMAL		(0x0 << 0)
10189@@ -406,6 +491,11 @@
10190 #define VIDEO_MODE_SLAVE_MODE			(0x1 << 0)
10191 #define VIDEO_MODE_MASTER_MODE			(0x0 << 0)
10192 
10193+/* ANALOGIX_DP_AUD_CHANNEL_CTL */
10194+#define AUD_CHANNEL_COUNT_6			(0x5 << 0)
10195+#define AUD_CHANNEL_COUNT_4			(0x3 << 0)
10196+#define AUD_CHANNEL_COUNT_2			(0x1 << 0)
10197+
10198 /* ANALOGIX_DP_PKT_SEND_CTL */
10199 #define IF_UP					(0x1 << 4)
10200 #define IF_EN					(0x1 << 0)
10201@@ -414,4 +504,7 @@
10202 #define PSR_VID_CRC_FLUSH			(0x1 << 2)
10203 #define PSR_VID_CRC_ENABLE			(0x1 << 0)
10204 
10205+/* ANALOGIX_DP_I2S_CTRL */
10206+#define I2S_EN					(0x1 << 4)
10207+
10208 #endif /* _ANALOGIX_DP_REG_H */
10209diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h
10210index f72d27208..20c818225 100644
10211--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h
10212+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h
10213@@ -10,10 +10,12 @@ struct dw_hdmi_audio_data {
10214 	int irq;
10215 	struct dw_hdmi *hdmi;
10216 	u8 *(*get_eld)(struct dw_hdmi *hdmi);
10217+	u8 *eld;
10218 };
10219 
10220 struct dw_hdmi_i2s_audio_data {
10221 	struct dw_hdmi *hdmi;
10222+	u8 *eld;
10223 
10224 	void (*write)(struct dw_hdmi *hdmi, u8 val, int offset);
10225 	u8 (*read)(struct dw_hdmi *hdmi, int offset);
10226diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
10227index 70ab4fbdc..48fc36d56 100644
10228--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
10229+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
10230@@ -12,6 +12,7 @@
10231 #include <linux/slab.h>
10232 
10233 #include <drm/drm_edid.h>
10234+#include <drm/bridge/dw_hdmi.h>
10235 
10236 #include <media/cec.h>
10237 #include <media/cec-notifier.h>
10238@@ -262,6 +263,8 @@ static int dw_hdmi_cec_probe(struct platform_device *pdev)
10239 	if (IS_ERR(cec->adap))
10240 		return PTR_ERR(cec->adap);
10241 
10242+	dw_hdmi_set_cec_adap(cec->hdmi, cec->adap);
10243+
10244 	/* override the module pointer */
10245 	cec->adap->owner = THIS_MODULE;
10246 
10247diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
10248index 2c3c743df..268ecdf3c 100644
10249--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
10250+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
10251@@ -9,6 +9,8 @@
10252 #include <linux/clk.h>
10253 #include <linux/delay.h>
10254 #include <linux/err.h>
10255+#include <linux/extcon.h>
10256+#include <linux/extcon-provider.h>
10257 #include <linux/hdmi.h>
10258 #include <linux/irq.h>
10259 #include <linux/module.h>
10260@@ -18,6 +20,7 @@
10261 #include <linux/regmap.h>
10262 #include <linux/dma-mapping.h>
10263 #include <linux/spinlock.h>
10264+#include <linux/pinctrl/consumer.h>
10265 
10266 #include <media/cec-notifier.h>
10267 
10268@@ -36,6 +39,7 @@
10269 
10270 #include "dw-hdmi-audio.h"
10271 #include "dw-hdmi-cec.h"
10272+#include "dw-hdmi-hdcp.h"
10273 #include "dw-hdmi.h"
10274 
10275 #define DDC_CI_ADDR		0x37
10276@@ -48,6 +52,11 @@
10277 
10278 #define HDMI14_MAX_TMDSCLK	340000000
10279 
10280+static const unsigned int dw_hdmi_cable[] = {
10281+	EXTCON_DISP_HDMI,
10282+	EXTCON_NONE,
10283+};
10284+
10285 enum hdmi_datamap {
10286 	RGB444_8B = 0x01,
10287 	RGB444_10B = 0x03,
10288@@ -62,6 +71,61 @@ enum hdmi_datamap {
10289 	YCbCr422_12B = 0x12,
10290 };
10291 
10292+/*
10293+ * Unless otherwise noted, entries in this table are 100% optimization.
10294+ * Values can be obtained from hdmi_compute_n() but that function is
10295+ * slow so we pre-compute values we expect to see.
10296+ *
10297+ * All 32k and 48k values are expected to be the same (due to the way
10298+ * the math works) for any rate that's an exact kHz.
10299+ */
10300+static const struct dw_hdmi_audio_tmds_n common_tmds_n_table[] = {
10301+	{ .tmds = 25175000, .n_32k = 4096, .n_44k1 = 12854, .n_48k = 6144, },
10302+	{ .tmds = 25200000, .n_32k = 4096, .n_44k1 = 5656, .n_48k = 6144, },
10303+	{ .tmds = 27000000, .n_32k = 4096, .n_44k1 = 5488, .n_48k = 6144, },
10304+	{ .tmds = 28320000, .n_32k = 4096, .n_44k1 = 5586, .n_48k = 6144, },
10305+	{ .tmds = 30240000, .n_32k = 4096, .n_44k1 = 5642, .n_48k = 6144, },
10306+	{ .tmds = 31500000, .n_32k = 4096, .n_44k1 = 5600, .n_48k = 6144, },
10307+	{ .tmds = 32000000, .n_32k = 4096, .n_44k1 = 5733, .n_48k = 6144, },
10308+	{ .tmds = 33750000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, },
10309+	{ .tmds = 36000000, .n_32k = 4096, .n_44k1 = 5684, .n_48k = 6144, },
10310+	{ .tmds = 40000000, .n_32k = 4096, .n_44k1 = 5733, .n_48k = 6144, },
10311+	{ .tmds = 49500000, .n_32k = 4096, .n_44k1 = 5488, .n_48k = 6144, },
10312+	{ .tmds = 50000000, .n_32k = 4096, .n_44k1 = 5292, .n_48k = 6144, },
10313+	{ .tmds = 54000000, .n_32k = 4096, .n_44k1 = 5684, .n_48k = 6144, },
10314+	{ .tmds = 65000000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, },
10315+	{ .tmds = 68250000, .n_32k = 4096, .n_44k1 = 5376, .n_48k = 6144, },
10316+	{ .tmds = 71000000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, },
10317+	{ .tmds = 72000000, .n_32k = 4096, .n_44k1 = 5635, .n_48k = 6144, },
10318+	{ .tmds = 73250000, .n_32k = 4096, .n_44k1 = 14112, .n_48k = 6144, },
10319+	{ .tmds = 74250000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, },
10320+	{ .tmds = 75000000, .n_32k = 4096, .n_44k1 = 5880, .n_48k = 6144, },
10321+	{ .tmds = 78750000, .n_32k = 4096, .n_44k1 = 5600, .n_48k = 6144, },
10322+	{ .tmds = 78800000, .n_32k = 4096, .n_44k1 = 5292, .n_48k = 6144, },
10323+	{ .tmds = 79500000, .n_32k = 4096, .n_44k1 = 4704, .n_48k = 6144, },
10324+	{ .tmds = 83500000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, },
10325+	{ .tmds = 85500000, .n_32k = 4096, .n_44k1 = 5488, .n_48k = 6144, },
10326+	{ .tmds = 88750000, .n_32k = 4096, .n_44k1 = 14112, .n_48k = 6144, },
10327+	{ .tmds = 97750000, .n_32k = 4096, .n_44k1 = 14112, .n_48k = 6144, },
10328+	{ .tmds = 101000000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, },
10329+	{ .tmds = 106500000, .n_32k = 4096, .n_44k1 = 4704, .n_48k = 6144, },
10330+	{ .tmds = 108000000, .n_32k = 4096, .n_44k1 = 5684, .n_48k = 6144, },
10331+	{ .tmds = 115500000, .n_32k = 4096, .n_44k1 = 5712, .n_48k = 6144, },
10332+	{ .tmds = 119000000, .n_32k = 4096, .n_44k1 = 5544, .n_48k = 6144, },
10333+	{ .tmds = 135000000, .n_32k = 4096, .n_44k1 = 5488, .n_48k = 6144, },
10334+	{ .tmds = 146250000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, },
10335+	{ .tmds = 148500000, .n_32k = 4096, .n_44k1 = 5488, .n_48k = 6144, },
10336+	{ .tmds = 154000000, .n_32k = 4096, .n_44k1 = 5544, .n_48k = 6144, },
10337+	{ .tmds = 162000000, .n_32k = 4096, .n_44k1 = 5684, .n_48k = 6144, },
10338+
10339+	/* For 297 MHz+ HDMI spec have some other rule for setting N */
10340+	{ .tmds = 297000000, .n_32k = 3073, .n_44k1 = 4704, .n_48k = 5120, },
10341+	{ .tmds = 594000000, .n_32k = 3073, .n_44k1 = 9408, .n_48k = 10240, },
10342+
10343+	/* End of table */
10344+	{ .tmds = 0,         .n_32k = 0,    .n_44k1 = 0,    .n_48k = 0, },
10345+};
10346+
10347 static const u16 csc_coeff_default[3][4] = {
10348 	{ 0x2000, 0x0000, 0x0000, 0x0000 },
10349 	{ 0x0000, 0x2000, 0x0000, 0x0000 },
10350@@ -98,12 +162,47 @@ static const u16 csc_coeff_rgb_full_to_rgb_limited[3][4] = {
10351 	{ 0x0000, 0x0000, 0x1b7c, 0x0020 }
10352 };
10353 
10354+static const struct drm_display_mode dw_hdmi_default_modes[] = {
10355+	/* 4 - 1280x720@60Hz 16:9 */
10356+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
10357+		   1430, 1650, 0, 720, 725, 730, 750, 0,
10358+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
10359+	  .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
10360+	/* 16 - 1920x1080@60Hz 16:9 */
10361+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
10362+		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
10363+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
10364+	  .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
10365+	/* 31 - 1920x1080@50Hz 16:9 */
10366+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
10367+		   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
10368+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
10369+	  .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
10370+	/* 19 - 1280x720@50Hz 16:9 */
10371+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
10372+		   1760, 1980, 0, 720, 725, 730, 750, 0,
10373+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
10374+	  .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
10375+	/* 17 - 720x576@50Hz 4:3 */
10376+	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
10377+		   796, 864, 0, 576, 581, 586, 625, 0,
10378+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10379+	  .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
10380+	/* 2 - 720x480@60Hz 4:3 */
10381+	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
10382+		   798, 858, 0, 480, 489, 495, 525, 0,
10383+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10384+	  .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
10385+};
10386+
10387 struct hdmi_vmode {
10388 	bool mdataenablepolarity;
10389 
10390+	unsigned int previous_pixelclock;
10391 	unsigned int mpixelclock;
10392 	unsigned int mpixelrepetitioninput;
10393 	unsigned int mpixelrepetitionoutput;
10394+	unsigned int previous_tmdsclock;
10395 	unsigned int mtmdsclock;
10396 };
10397 
10398@@ -112,8 +211,8 @@ struct hdmi_data_info {
10399 	unsigned int enc_out_bus_format;
10400 	unsigned int enc_in_encoding;
10401 	unsigned int enc_out_encoding;
10402+	unsigned int quant_range;
10403 	unsigned int pix_repet_factor;
10404-	unsigned int hdcp_enable;
10405 	struct hdmi_vmode video_mode;
10406 	bool rgb_limited_range;
10407 };
10408@@ -128,6 +227,9 @@ struct dw_hdmi_i2c {
10409 	u8			slave_reg;
10410 	bool			is_regaddr;
10411 	bool			is_segment;
10412+
10413+	unsigned int		scl_high_ns;
10414+	unsigned int		scl_low_ns;
10415 };
10416 
10417 struct dw_hdmi_phy_data {
10418@@ -143,6 +245,8 @@ struct dw_hdmi_phy_data {
10419 struct dw_hdmi {
10420 	struct drm_connector connector;
10421 	struct drm_bridge bridge;
10422+	struct drm_bridge *next_bridge;
10423+	struct platform_device *hdcp_dev;
10424 
10425 	unsigned int version;
10426 
10427@@ -156,8 +260,10 @@ struct dw_hdmi {
10428 
10429 	struct hdmi_data_info hdmi_data;
10430 	const struct dw_hdmi_plat_data *plat_data;
10431+	struct dw_hdcp *hdcp;
10432 
10433 	int vic;
10434+	int irq;
10435 
10436 	u8 edid[HDMI_EDID_LEN];
10437 
10438@@ -174,6 +280,13 @@ struct dw_hdmi {
10439 	void __iomem *regs;
10440 	bool sink_is_hdmi;
10441 	bool sink_has_audio;
10442+	bool hpd_state;
10443+	bool support_hdmi;
10444+	bool force_logo;
10445+	int force_output;
10446+
10447+	struct delayed_work work;
10448+	struct workqueue_struct *workqueue;
10449 
10450 	struct pinctrl *pinctrl;
10451 	struct pinctrl_state *default_state;
10452@@ -190,10 +303,14 @@ struct dw_hdmi {
10453 
10454 	spinlock_t audio_lock;
10455 	struct mutex audio_mutex;
10456+	struct dentry *debugfs_dir;
10457 	unsigned int sample_rate;
10458 	unsigned int audio_cts;
10459 	unsigned int audio_n;
10460 	bool audio_enable;
10461+	bool scramble_low_rates;
10462+
10463+	struct extcon_dev *extcon;
10464 
10465 	unsigned int reg_shift;
10466 	struct regmap *regm;
10467@@ -202,10 +319,12 @@ struct dw_hdmi {
10468 
10469 	struct mutex cec_notifier_mutex;
10470 	struct cec_notifier *cec_notifier;
10471+	struct cec_adapter *cec_adap;
10472 
10473 	hdmi_codec_plugged_cb plugged_cb;
10474 	struct device *codec_dev;
10475 	enum drm_connector_status last_connector_result;
10476+	bool initialized;		/* hdmi is enabled before bind */
10477 };
10478 
10479 #define HDMI_IH_PHY_STAT0_RX_SENSE \
10480@@ -263,6 +382,124 @@ static void hdmi_mask_writeb(struct dw_hdmi *hdmi, u8 data, unsigned int reg,
10481 	hdmi_modb(hdmi, data << shift, mask, reg);
10482 }
10483 
10484+static bool dw_hdmi_check_output_type_changed(struct dw_hdmi *hdmi)
10485+{
10486+	bool sink_hdmi;
10487+
10488+	sink_hdmi = hdmi->sink_is_hdmi;
10489+
10490+	if (hdmi->force_output == 1)
10491+		hdmi->sink_is_hdmi = true;
10492+	else if (hdmi->force_output == 2)
10493+		hdmi->sink_is_hdmi = false;
10494+	else
10495+		hdmi->sink_is_hdmi = hdmi->support_hdmi;
10496+
10497+	if (sink_hdmi != hdmi->sink_is_hdmi)
10498+		return true;
10499+
10500+	return false;
10501+}
10502+
10503+static void repo_hpd_event(struct work_struct *p_work)
10504+{
10505+	struct dw_hdmi *hdmi = container_of(p_work, struct dw_hdmi, work.work);
10506+	enum drm_connector_status status = hdmi->hpd_state ?
10507+		connector_status_connected : connector_status_disconnected;
10508+	u8 phy_stat = hdmi_readb(hdmi, HDMI_PHY_STAT0);
10509+
10510+	mutex_lock(&hdmi->mutex);
10511+	if (!(phy_stat & HDMI_PHY_RX_SENSE))
10512+		hdmi->rxsense = false;
10513+	if (phy_stat & HDMI_PHY_HPD)
10514+		hdmi->rxsense = true;
10515+	mutex_unlock(&hdmi->mutex);
10516+
10517+	if (hdmi->bridge.dev) {
10518+		bool change;
10519+
10520+		change = drm_helper_hpd_irq_event(hdmi->bridge.dev);
10521+		if (change && hdmi->cec_adap &&
10522+		    hdmi->cec_adap->devnode.registered)
10523+			cec_queue_pin_hpd_event(hdmi->cec_adap,
10524+						hdmi->hpd_state,
10525+						ktime_get());
10526+		drm_bridge_hpd_notify(&hdmi->bridge, status);
10527+	}
10528+}
10529+
10530+static bool check_hdmi_irq(struct dw_hdmi *hdmi, int intr_stat,
10531+			   int phy_int_pol)
10532+{
10533+	int msecs;
10534+
10535+	/* To determine whether interrupt type is HPD */
10536+	if (!(intr_stat & HDMI_IH_PHY_STAT0_HPD))
10537+		return false;
10538+
10539+	if (phy_int_pol & HDMI_PHY_HPD) {
10540+		dev_dbg(hdmi->dev, "dw hdmi plug in\n");
10541+		msecs = 150;
10542+		hdmi->hpd_state = true;
10543+	} else {
10544+		dev_dbg(hdmi->dev, "dw hdmi plug out\n");
10545+		msecs = 20;
10546+		hdmi->hpd_state = false;
10547+	}
10548+	mod_delayed_work(hdmi->workqueue, &hdmi->work, msecs_to_jiffies(msecs));
10549+
10550+	return true;
10551+}
10552+
10553+static void init_hpd_work(struct dw_hdmi *hdmi)
10554+{
10555+	hdmi->workqueue = create_workqueue("hpd_queue");
10556+	INIT_DELAYED_WORK(&hdmi->work, repo_hpd_event);
10557+}
10558+
10559+static void dw_hdmi_i2c_set_divs(struct dw_hdmi *hdmi)
10560+{
10561+	unsigned long clk_rate_khz;
10562+	unsigned long low_ns, high_ns;
10563+	unsigned long div_low, div_high;
10564+
10565+	/* Standard-mode */
10566+	if (hdmi->i2c->scl_high_ns < 4000)
10567+		high_ns = 4708;
10568+	else
10569+		high_ns = hdmi->i2c->scl_high_ns;
10570+
10571+	if (hdmi->i2c->scl_low_ns < 4700)
10572+		low_ns = 4916;
10573+	else
10574+		low_ns = hdmi->i2c->scl_low_ns;
10575+
10576+	/* Adjust to avoid overflow */
10577+	clk_rate_khz = DIV_ROUND_UP(clk_get_rate(hdmi->isfr_clk), 1000);
10578+
10579+	div_low = (clk_rate_khz * low_ns) / 1000000;
10580+	if ((clk_rate_khz * low_ns) % 1000000)
10581+		div_low++;
10582+
10583+	div_high = (clk_rate_khz * high_ns) / 1000000;
10584+	if ((clk_rate_khz * high_ns) % 1000000)
10585+		div_high++;
10586+
10587+	/* Maximum divider supported by hw is 0xffff */
10588+	if (div_low > 0xffff)
10589+		div_low = 0xffff;
10590+
10591+	if (div_high > 0xffff)
10592+		div_high = 0xffff;
10593+
10594+	hdmi_writeb(hdmi, div_high & 0xff, HDMI_I2CM_SS_SCL_HCNT_0_ADDR);
10595+	hdmi_writeb(hdmi, (div_high >> 8) & 0xff,
10596+		    HDMI_I2CM_SS_SCL_HCNT_1_ADDR);
10597+	hdmi_writeb(hdmi, div_low & 0xff, HDMI_I2CM_SS_SCL_LCNT_0_ADDR);
10598+	hdmi_writeb(hdmi, (div_low >> 8) & 0xff,
10599+		    HDMI_I2CM_SS_SCL_LCNT_1_ADDR);
10600+}
10601+
10602 static void dw_hdmi_i2c_init(struct dw_hdmi *hdmi)
10603 {
10604 	hdmi_writeb(hdmi, HDMI_PHY_I2CM_INT_ADDR_DONE_POL,
10605@@ -276,7 +513,8 @@ static void dw_hdmi_i2c_init(struct dw_hdmi *hdmi)
10606 	hdmi_writeb(hdmi, 0x00, HDMI_I2CM_SOFTRSTZ);
10607 
10608 	/* Set Standard Mode speed (determined to be 100KHz on iMX6) */
10609-	hdmi_writeb(hdmi, 0x00, HDMI_I2CM_DIV);
10610+	hdmi_modb(hdmi, HDMI_I2CM_DIV_STD_MODE,
10611+		  HDMI_I2CM_DIV_FAST_STD_MODE, HDMI_I2CM_DIV);
10612 
10613 	/* Set done, not acknowledged and arbitration interrupt polarities */
10614 	hdmi_writeb(hdmi, HDMI_I2CM_INT_DONE_POL, HDMI_I2CM_INT);
10615@@ -290,6 +528,11 @@ static void dw_hdmi_i2c_init(struct dw_hdmi *hdmi)
10616 	/* Mute DONE and ERROR interrupts */
10617 	hdmi_writeb(hdmi, HDMI_IH_I2CM_STAT0_ERROR | HDMI_IH_I2CM_STAT0_DONE,
10618 		    HDMI_IH_MUTE_I2CM_STAT0);
10619+
10620+	/* set SDA high level holding time */
10621+	hdmi_writeb(hdmi, 0x48, HDMI_I2CM_SDA_HOLD);
10622+
10623+	dw_hdmi_i2c_set_divs(hdmi);
10624 }
10625 
10626 static bool dw_hdmi_i2c_unwedge(struct dw_hdmi *hdmi)
10627@@ -461,6 +704,8 @@ static int dw_hdmi_i2c_xfer(struct i2c_adapter *adap,
10628 	hdmi_writeb(hdmi, 0x00, HDMI_IH_MUTE_I2CM_STAT0);
10629 
10630 	/* Set slave device address taken from the first I2C message */
10631+	if (addr == DDC_SEGMENT_ADDR && msgs[0].len == 1)
10632+		addr = DDC_ADDR;
10633 	hdmi_writeb(hdmi, addr, HDMI_I2CM_SLAVE);
10634 
10635 	/* Set slave device register address on transfer */
10636@@ -570,60 +815,117 @@ static void hdmi_set_cts_n(struct dw_hdmi *hdmi, unsigned int cts,
10637 	hdmi_writeb(hdmi, n & 0xff, HDMI_AUD_N1);
10638 }
10639 
10640-static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk)
10641+static int hdmi_match_tmds_n_table(struct dw_hdmi *hdmi,
10642+				   unsigned long pixel_clk,
10643+				   unsigned long freq)
10644 {
10645-	unsigned int n = (128 * freq) / 1000;
10646-	unsigned int mult = 1;
10647+	const struct dw_hdmi_plat_data *plat_data = hdmi->plat_data;
10648+	const struct dw_hdmi_audio_tmds_n *tmds_n = NULL;
10649+	int i;
10650+
10651+	if (plat_data->tmds_n_table) {
10652+		for (i = 0; plat_data->tmds_n_table[i].tmds != 0; i++) {
10653+			if (pixel_clk == plat_data->tmds_n_table[i].tmds) {
10654+				tmds_n = &plat_data->tmds_n_table[i];
10655+				break;
10656+			}
10657+		}
10658+	}
10659 
10660-	while (freq > 48000) {
10661-		mult *= 2;
10662-		freq /= 2;
10663+	if (tmds_n == NULL) {
10664+		for (i = 0; common_tmds_n_table[i].tmds != 0; i++) {
10665+			if (pixel_clk == common_tmds_n_table[i].tmds) {
10666+				tmds_n = &common_tmds_n_table[i];
10667+				break;
10668+			}
10669+		}
10670 	}
10671 
10672+	if (tmds_n == NULL)
10673+		return -ENOENT;
10674+
10675 	switch (freq) {
10676 	case 32000:
10677-		if (pixel_clk == 25175000)
10678-			n = 4576;
10679-		else if (pixel_clk == 27027000)
10680-			n = 4096;
10681-		else if (pixel_clk == 74176000 || pixel_clk == 148352000)
10682-			n = 11648;
10683-		else
10684-			n = 4096;
10685-		n *= mult;
10686-		break;
10687-
10688+		return tmds_n->n_32k;
10689 	case 44100:
10690-		if (pixel_clk == 25175000)
10691-			n = 7007;
10692-		else if (pixel_clk == 74176000)
10693-			n = 17836;
10694-		else if (pixel_clk == 148352000)
10695-			n = 8918;
10696-		else
10697-			n = 6272;
10698-		n *= mult;
10699-		break;
10700-
10701+	case 88200:
10702+	case 176400:
10703+		return (freq / 44100) * tmds_n->n_44k1;
10704 	case 48000:
10705-		if (pixel_clk == 25175000)
10706-			n = 6864;
10707-		else if (pixel_clk == 27027000)
10708-			n = 6144;
10709-		else if (pixel_clk == 74176000)
10710-			n = 11648;
10711-		else if (pixel_clk == 148352000)
10712-			n = 5824;
10713-		else
10714-			n = 6144;
10715-		n *= mult;
10716-		break;
10717-
10718+	case 96000:
10719+	case 192000:
10720+		return (freq / 48000) * tmds_n->n_48k;
10721 	default:
10722-		break;
10723+		return -ENOENT;
10724+	}
10725+}
10726+
10727+static u64 hdmi_audio_math_diff(unsigned int freq, unsigned int n,
10728+				unsigned int pixel_clk)
10729+{
10730+	u64 final, diff;
10731+	u64 cts;
10732+
10733+	final = (u64)pixel_clk * n;
10734+
10735+	cts = final;
10736+	do_div(cts, 128 * freq);
10737+
10738+	diff = final - (u64)cts * (128 * freq);
10739+
10740+	return diff;
10741+}
10742+
10743+static unsigned int hdmi_compute_n(struct dw_hdmi *hdmi,
10744+				   unsigned long pixel_clk,
10745+				   unsigned long freq)
10746+{
10747+	unsigned int min_n = DIV_ROUND_UP((128 * freq), 1500);
10748+	unsigned int max_n = (128 * freq) / 300;
10749+	unsigned int ideal_n = (128 * freq) / 1000;
10750+	unsigned int best_n_distance = ideal_n;
10751+	unsigned int best_n = 0;
10752+	u64 best_diff = U64_MAX;
10753+	int n;
10754+
10755+	/* If the ideal N could satisfy the audio math, then just take it */
10756+	if (hdmi_audio_math_diff(freq, ideal_n, pixel_clk) == 0)
10757+		return ideal_n;
10758+
10759+	for (n = min_n; n <= max_n; n++) {
10760+		u64 diff = hdmi_audio_math_diff(freq, n, pixel_clk);
10761+
10762+		if (diff < best_diff || (diff == best_diff &&
10763+		    abs(n - ideal_n) < best_n_distance)) {
10764+			best_n = n;
10765+			best_diff = diff;
10766+			best_n_distance = abs(best_n - ideal_n);
10767+		}
10768+
10769+		/*
10770+		 * The best N already satisfy the audio math, and also be
10771+		 * the closest value to ideal N, so just cut the loop.
10772+		 */
10773+		if ((best_diff == 0) && (abs(n - ideal_n) > best_n_distance))
10774+			break;
10775 	}
10776 
10777-	return n;
10778+	return best_n;
10779+}
10780+
10781+static unsigned int hdmi_find_n(struct dw_hdmi *hdmi, unsigned long pixel_clk,
10782+				unsigned long sample_rate)
10783+{
10784+	int n;
10785+
10786+	n = hdmi_match_tmds_n_table(hdmi, pixel_clk, sample_rate);
10787+	if (n > 0)
10788+		return n;
10789+
10790+	dev_warn(hdmi->dev, "Rate %lu missing; compute N dynamically\n",
10791+		 pixel_clk);
10792+
10793+	return hdmi_compute_n(hdmi, pixel_clk, sample_rate);
10794 }
10795 
10796 /*
10797@@ -654,7 +956,7 @@ static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi,
10798 	u8 config3;
10799 	u64 tmp;
10800 
10801-	n = hdmi_compute_n(sample_rate, pixel_clk);
10802+	n = hdmi_find_n(hdmi, pixel_clk, sample_rate);
10803 
10804 	config3 = hdmi_readb(hdmi, HDMI_CONFIG3_ID);
10805 
10806@@ -756,14 +1058,6 @@ static void hdmi_enable_audio_clk(struct dw_hdmi *hdmi, bool enable)
10807 	hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
10808 }
10809 
10810-static u8 *hdmi_audio_get_eld(struct dw_hdmi *hdmi)
10811-{
10812-	if (!hdmi->curr_conn)
10813-		return NULL;
10814-
10815-	return hdmi->curr_conn->eld;
10816-}
10817-
10818 static void dw_hdmi_ahb_audio_enable(struct dw_hdmi *hdmi)
10819 {
10820 	hdmi_set_cts_n(hdmi, hdmi->audio_cts, hdmi->audio_n);
10821@@ -1013,6 +1307,15 @@ static bool is_csc_needed(struct dw_hdmi *hdmi)
10822 	       is_color_space_interpolation(hdmi);
10823 }
10824 
10825+static bool is_rgb_full_to_limited_needed(struct dw_hdmi *hdmi)
10826+{
10827+	if (hdmi->hdmi_data.quant_range == HDMI_QUANTIZATION_RANGE_LIMITED ||
10828+	    (!hdmi->hdmi_data.quant_range && hdmi->hdmi_data.rgb_limited_range))
10829+		return true;
10830+
10831+	return false;
10832+}
10833+
10834 static void dw_hdmi_update_csc_coeffs(struct dw_hdmi *hdmi)
10835 {
10836 	const u16 (*csc_coeff)[3][4] = &csc_coeff_default;
10837@@ -1035,7 +1338,7 @@ static void dw_hdmi_update_csc_coeffs(struct dw_hdmi *hdmi)
10838 			csc_coeff = &csc_coeff_rgb_in_eitu709;
10839 		csc_scale = 0;
10840 	} else if (is_input_rgb && is_output_rgb &&
10841-		   hdmi->hdmi_data.rgb_limited_range) {
10842+		   is_rgb_full_to_limited_needed(hdmi)) {
10843 		csc_coeff = &csc_coeff_rgb_full_to_rgb_limited;
10844 	}
10845 
10846@@ -1067,7 +1370,7 @@ static void hdmi_video_csc(struct dw_hdmi *hdmi)
10847 	if (is_color_space_interpolation(hdmi))
10848 		interpolation = HDMI_CSC_CFG_INTMODE_CHROMA_INT_FORMULA1;
10849 	else if (is_color_space_decimation(hdmi))
10850-		decimation = HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA3;
10851+		decimation = HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA1;
10852 
10853 	switch (hdmi_bus_fmt_color_depth(hdmi->hdmi_data.enc_out_bus_format)) {
10854 	case 8:
10855@@ -1114,7 +1417,7 @@ static void hdmi_video_packetize(struct dw_hdmi *hdmi)
10856 		switch (hdmi_bus_fmt_color_depth(
10857 					hdmi->hdmi_data.enc_out_bus_format)) {
10858 		case 8:
10859-			color_depth = 4;
10860+			color_depth = 0;
10861 			output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS;
10862 			break;
10863 		case 10:
10864@@ -1152,18 +1455,15 @@ static void hdmi_video_packetize(struct dw_hdmi *hdmi)
10865 	}
10866 
10867 	/* set the packetizer registers */
10868-	val = ((color_depth << HDMI_VP_PR_CD_COLOR_DEPTH_OFFSET) &
10869-		HDMI_VP_PR_CD_COLOR_DEPTH_MASK) |
10870-		((hdmi_data->pix_repet_factor <<
10871-		HDMI_VP_PR_CD_DESIRED_PR_FACTOR_OFFSET) &
10872-		HDMI_VP_PR_CD_DESIRED_PR_FACTOR_MASK);
10873+	val = (color_depth << HDMI_VP_PR_CD_COLOR_DEPTH_OFFSET) &
10874+	      HDMI_VP_PR_CD_COLOR_DEPTH_MASK;
10875 	hdmi_writeb(hdmi, val, HDMI_VP_PR_CD);
10876 
10877 	hdmi_modb(hdmi, HDMI_VP_STUFF_PR_STUFFING_STUFFING_MODE,
10878 		  HDMI_VP_STUFF_PR_STUFFING_MASK, HDMI_VP_STUFF);
10879 
10880 	/* Data from pixel repeater block */
10881-	if (hdmi_data->pix_repet_factor > 1) {
10882+	if (hdmi_data->pix_repet_factor > 0) {
10883 		vp_conf = HDMI_VP_CONF_PR_EN_ENABLE |
10884 			  HDMI_VP_CONF_BYPASS_SELECT_PIX_REPEATER;
10885 	} else { /* data from packetizer block */
10886@@ -1175,8 +1475,13 @@ static void hdmi_video_packetize(struct dw_hdmi *hdmi)
10887 		  HDMI_VP_CONF_PR_EN_MASK |
10888 		  HDMI_VP_CONF_BYPASS_SELECT_MASK, HDMI_VP_CONF);
10889 
10890-	hdmi_modb(hdmi, 1 << HDMI_VP_STUFF_IDEFAULT_PHASE_OFFSET,
10891-		  HDMI_VP_STUFF_IDEFAULT_PHASE_MASK, HDMI_VP_STUFF);
10892+	if ((color_depth == 5 && hdmi->previous_mode.htotal % 4) ||
10893+	    (color_depth == 6 && hdmi->previous_mode.htotal % 2))
10894+		hdmi_modb(hdmi, 0, HDMI_VP_STUFF_IDEFAULT_PHASE_MASK,
10895+			  HDMI_VP_STUFF);
10896+	else
10897+		hdmi_modb(hdmi, 1 << HDMI_VP_STUFF_IDEFAULT_PHASE_OFFSET,
10898+			HDMI_VP_STUFF_IDEFAULT_PHASE_MASK, HDMI_VP_STUFF);
10899 
10900 	hdmi_writeb(hdmi, remap_size, HDMI_VP_REMAP);
10901 
10902@@ -1277,6 +1582,23 @@ static bool dw_hdmi_support_scdc(struct dw_hdmi *hdmi,
10903 	return true;
10904 }
10905 
10906+static int hdmi_phy_i2c_read(struct dw_hdmi *hdmi, unsigned char addr)
10907+{
10908+	int val;
10909+
10910+	hdmi_writeb(hdmi, 0xFF, HDMI_IH_I2CMPHY_STAT0);
10911+	hdmi_writeb(hdmi, addr, HDMI_PHY_I2CM_ADDRESS_ADDR);
10912+	hdmi_writeb(hdmi, 0, HDMI_PHY_I2CM_DATAI_1_ADDR);
10913+	hdmi_writeb(hdmi, 0, HDMI_PHY_I2CM_DATAI_0_ADDR);
10914+	hdmi_writeb(hdmi, HDMI_PHY_I2CM_OPERATION_ADDR_READ,
10915+		    HDMI_PHY_I2CM_OPERATION_ADDR);
10916+	hdmi_phy_wait_i2c_done(hdmi, 1000);
10917+	val = hdmi_readb(hdmi, HDMI_PHY_I2CM_DATAI_1_ADDR);
10918+	val = (val & 0xff) << 8;
10919+	val += hdmi_readb(hdmi, HDMI_PHY_I2CM_DATAI_0_ADDR) & 0xff;
10920+	return val;
10921+}
10922+
10923 /*
10924  * HDMI2.0 Specifies the following procedure for High TMDS Bit Rates:
10925  * - The Source shall suspend transmission of the TMDS clock and data
10926@@ -1454,6 +1776,13 @@ static int hdmi_phy_configure_dwc_hdmi_3d_tx(struct dw_hdmi *hdmi,
10927 	const struct dw_hdmi_mpll_config *mpll_config = pdata->mpll_cfg;
10928 	const struct dw_hdmi_curr_ctrl *curr_ctrl = pdata->cur_ctr;
10929 	const struct dw_hdmi_phy_config *phy_config = pdata->phy_config;
10930+	unsigned int tmdsclock = hdmi->hdmi_data.video_mode.mtmdsclock;
10931+	unsigned int depth =
10932+		hdmi_bus_fmt_color_depth(hdmi->hdmi_data.enc_out_bus_format);
10933+
10934+	if (hdmi_bus_fmt_is_yuv420(hdmi->hdmi_data.enc_out_bus_format) &&
10935+	    pdata->mpll_cfg_420)
10936+		mpll_config = pdata->mpll_cfg_420;
10937 
10938 	/* TOFIX Will need 420 specific PHY configuration tables */
10939 
10940@@ -1463,11 +1792,11 @@ static int hdmi_phy_configure_dwc_hdmi_3d_tx(struct dw_hdmi *hdmi,
10941 			break;
10942 
10943 	for (; curr_ctrl->mpixelclock != ~0UL; curr_ctrl++)
10944-		if (mpixelclock <= curr_ctrl->mpixelclock)
10945+		if (tmdsclock <= curr_ctrl->mpixelclock)
10946 			break;
10947 
10948 	for (; phy_config->mpixelclock != ~0UL; phy_config++)
10949-		if (mpixelclock <= phy_config->mpixelclock)
10950+		if (tmdsclock <= phy_config->mpixelclock)
10951 			break;
10952 
10953 	if (mpll_config->mpixelclock == ~0UL ||
10954@@ -1475,11 +1804,18 @@ static int hdmi_phy_configure_dwc_hdmi_3d_tx(struct dw_hdmi *hdmi,
10955 	    phy_config->mpixelclock == ~0UL)
10956 		return -EINVAL;
10957 
10958-	dw_hdmi_phy_i2c_write(hdmi, mpll_config->res[0].cpce,
10959+	if (!hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format))
10960+		depth = fls(depth - 8);
10961+	else
10962+		depth = 0;
10963+	if (depth)
10964+		depth--;
10965+
10966+	dw_hdmi_phy_i2c_write(hdmi, mpll_config->res[depth].cpce,
10967 			      HDMI_3D_TX_PHY_CPCE_CTRL);
10968-	dw_hdmi_phy_i2c_write(hdmi, mpll_config->res[0].gmp,
10969+	dw_hdmi_phy_i2c_write(hdmi, mpll_config->res[depth].gmp,
10970 			      HDMI_3D_TX_PHY_GMPCTRL);
10971-	dw_hdmi_phy_i2c_write(hdmi, curr_ctrl->curr[0],
10972+	dw_hdmi_phy_i2c_write(hdmi, curr_ctrl->curr[depth],
10973 			      HDMI_3D_TX_PHY_CURRCTRL);
10974 
10975 	dw_hdmi_phy_i2c_write(hdmi, 0, HDMI_3D_TX_PHY_PLLPHBYCTRL);
10976@@ -1492,10 +1828,6 @@ static int hdmi_phy_configure_dwc_hdmi_3d_tx(struct dw_hdmi *hdmi,
10977 	dw_hdmi_phy_i2c_write(hdmi, phy_config->vlev_ctr,
10978 			      HDMI_3D_TX_PHY_VLEVCTRL);
10979 
10980-	/* Override and disable clock termination. */
10981-	dw_hdmi_phy_i2c_write(hdmi, HDMI_3D_TX_PHY_CKCALCTRL_OVERRIDE,
10982-			      HDMI_3D_TX_PHY_CKCALCTRL);
10983-
10984 	return 0;
10985 }
10986 
10987@@ -1597,14 +1929,16 @@ void dw_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data)
10988 	hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE,
10989 		    HDMI_IH_PHY_STAT0);
10990 
10991-	/* Enable cable hot plug irq. */
10992-	hdmi_writeb(hdmi, hdmi->phy_mask, HDMI_PHY_MASK0);
10993+	if (!hdmi->next_bridge) {
10994+		/* Enable cable hot plug irq. */
10995+		hdmi_writeb(hdmi, hdmi->phy_mask, HDMI_PHY_MASK0);
10996 
10997-	/* Clear and unmute interrupts. */
10998-	hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE,
10999-		    HDMI_IH_PHY_STAT0);
11000-	hdmi_writeb(hdmi, ~(HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE),
11001-		    HDMI_IH_MUTE_PHY_STAT0);
11002+		/* Clear and unmute interrupts. */
11003+		hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE,
11004+			    HDMI_IH_PHY_STAT0);
11005+		hdmi_writeb(hdmi, ~(HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE),
11006+			    HDMI_IH_MUTE_PHY_STAT0);
11007+	}
11008 }
11009 EXPORT_SYMBOL_GPL(dw_hdmi_phy_setup_hpd);
11010 
11011@@ -1620,23 +1954,36 @@ static const struct dw_hdmi_phy_ops dw_hdmi_synopsys_phy_ops = {
11012  * HDMI TX Setup
11013  */
11014 
11015-static void hdmi_tx_hdcp_config(struct dw_hdmi *hdmi)
11016+static void hdmi_tx_hdcp_config(struct dw_hdmi *hdmi,
11017+				const struct drm_display_mode *mode)
11018 {
11019-	u8 de;
11020-
11021-	if (hdmi->hdmi_data.video_mode.mdataenablepolarity)
11022-		de = HDMI_A_VIDPOLCFG_DATAENPOL_ACTIVE_HIGH;
11023-	else
11024-		de = HDMI_A_VIDPOLCFG_DATAENPOL_ACTIVE_LOW;
11025-
11026-	/* disable rx detect */
11027-	hdmi_modb(hdmi, HDMI_A_HDCPCFG0_RXDETECT_DISABLE,
11028-		  HDMI_A_HDCPCFG0_RXDETECT_MASK, HDMI_A_HDCPCFG0);
11029-
11030-	hdmi_modb(hdmi, de, HDMI_A_VIDPOLCFG_DATAENPOL_MASK, HDMI_A_VIDPOLCFG);
11031-
11032-	hdmi_modb(hdmi, HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_DISABLE,
11033-		  HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_MASK, HDMI_A_HDCPCFG1);
11034+	struct hdmi_vmode *vmode = &hdmi->hdmi_data.video_mode;
11035+	u8 vsync_pol, hsync_pol, data_pol, hdmi_dvi;
11036+
11037+	/* Configure the video polarity */
11038+	vsync_pol = mode->flags & DRM_MODE_FLAG_PVSYNC ?
11039+		    HDMI_A_VIDPOLCFG_VSYNCPOL_ACTIVE_HIGH :
11040+		    HDMI_A_VIDPOLCFG_VSYNCPOL_ACTIVE_LOW;
11041+	hsync_pol = mode->flags & DRM_MODE_FLAG_PHSYNC ?
11042+		    HDMI_A_VIDPOLCFG_HSYNCPOL_ACTIVE_HIGH :
11043+		    HDMI_A_VIDPOLCFG_HSYNCPOL_ACTIVE_LOW;
11044+	data_pol = vmode->mdataenablepolarity ?
11045+		    HDMI_A_VIDPOLCFG_DATAENPOL_ACTIVE_HIGH :
11046+		    HDMI_A_VIDPOLCFG_DATAENPOL_ACTIVE_LOW;
11047+	hdmi_modb(hdmi, vsync_pol | hsync_pol | data_pol,
11048+		  HDMI_A_VIDPOLCFG_VSYNCPOL_MASK |
11049+		  HDMI_A_VIDPOLCFG_HSYNCPOL_MASK |
11050+		  HDMI_A_VIDPOLCFG_DATAENPOL_MASK,
11051+		  HDMI_A_VIDPOLCFG);
11052+
11053+	/* Config the display mode */
11054+	hdmi_dvi = hdmi->sink_is_hdmi ? HDMI_A_HDCPCFG0_HDMIDVI_HDMI :
11055+		   HDMI_A_HDCPCFG0_HDMIDVI_DVI;
11056+	hdmi_modb(hdmi, hdmi_dvi, HDMI_A_HDCPCFG0_HDMIDVI_MASK,
11057+		  HDMI_A_HDCPCFG0);
11058+
11059+	if (hdmi->hdcp && hdmi->hdcp->hdcp_start)
11060+		hdmi->hdcp->hdcp_start(hdmi->hdcp);
11061 }
11062 
11063 static void hdmi_config_AVI(struct dw_hdmi *hdmi,
11064@@ -1650,10 +1997,15 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi,
11065 	drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
11066 
11067 	if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) {
11068-		drm_hdmi_avi_infoframe_quant_range(&frame, connector, mode,
11069-						   hdmi->hdmi_data.rgb_limited_range ?
11070-						   HDMI_QUANTIZATION_RANGE_LIMITED :
11071-						   HDMI_QUANTIZATION_RANGE_FULL);
11072+		/* default range */
11073+		if (!hdmi->hdmi_data.quant_range)
11074+			drm_hdmi_avi_infoframe_quant_range(&frame, connector, mode,
11075+							   hdmi->hdmi_data.rgb_limited_range ?
11076+							   HDMI_QUANTIZATION_RANGE_LIMITED :
11077+							   HDMI_QUANTIZATION_RANGE_FULL);
11078+		else
11079+			drm_hdmi_avi_infoframe_quant_range(&frame, connector, mode,
11080+							   hdmi->hdmi_data.quant_range);
11081 	} else {
11082 		frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
11083 		frame.ycc_quantization_range =
11084@@ -1688,6 +2040,14 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi,
11085 			frame.extended_colorimetry =
11086 					HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
11087 			break;
11088+		case V4L2_YCBCR_ENC_BT2020:
11089+			if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_BT2020)
11090+				frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
11091+			else
11092+				frame.colorimetry = HDMI_COLORIMETRY_ITU_709;
11093+			frame.extended_colorimetry =
11094+				HDMI_EXTENDED_COLORIMETRY_BT2020;
11095+		break;
11096 		default: /* Carries no data */
11097 			frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
11098 			frame.extended_colorimetry =
11099@@ -1824,17 +2184,44 @@ static void hdmi_config_drm_infoframe(struct dw_hdmi *hdmi,
11100 				      const struct drm_connector *connector)
11101 {
11102 	const struct drm_connector_state *conn_state = connector->state;
11103+	struct hdr_output_metadata *hdr_metadata;
11104 	struct hdmi_drm_infoframe frame;
11105 	u8 buffer[30];
11106 	ssize_t err;
11107 	int i;
11108 
11109+	/* Dynamic Range and Mastering Infoframe is introduced in v2.11a. */
11110+	if (hdmi->version < 0x211a) {
11111+		DRM_ERROR("Not support DRM Infoframe\n");
11112+		return;
11113+	}
11114+
11115 	if (!hdmi->plat_data->use_drm_infoframe)
11116 		return;
11117 
11118 	hdmi_modb(hdmi, HDMI_FC_PACKET_TX_EN_DRM_DISABLE,
11119 		  HDMI_FC_PACKET_TX_EN_DRM_MASK, HDMI_FC_PACKET_TX_EN);
11120 
11121+	if (!hdmi->connector.hdr_sink_metadata.hdmi_type1.eotf) {
11122+		DRM_DEBUG("No need to set HDR metadata in infoframe\n");
11123+		return;
11124+	}
11125+
11126+	if (!conn_state->hdr_output_metadata) {
11127+		DRM_DEBUG("source metadata not set yet\n");
11128+		return;
11129+	}
11130+
11131+	hdr_metadata = (struct hdr_output_metadata *)
11132+		conn_state->hdr_output_metadata->data;
11133+
11134+	if (!(hdmi->connector.hdr_sink_metadata.hdmi_type1.eotf &
11135+	    BIT(hdr_metadata->hdmi_metadata_type1.eotf))) {
11136+		DRM_ERROR("Not support EOTF %d\n",
11137+			  hdr_metadata->hdmi_metadata_type1.eotf);
11138+		return;
11139+	}
11140+
11141 	err = drm_hdmi_infoframe_set_hdr_metadata(&frame, conn_state);
11142 	if (err < 0)
11143 		return;
11144@@ -1854,51 +2241,66 @@ static void hdmi_config_drm_infoframe(struct dw_hdmi *hdmi,
11145 	hdmi_writeb(hdmi, 1, HDMI_FC_DRM_UP);
11146 	hdmi_modb(hdmi, HDMI_FC_PACKET_TX_EN_DRM_ENABLE,
11147 		  HDMI_FC_PACKET_TX_EN_DRM_MASK, HDMI_FC_PACKET_TX_EN);
11148+
11149+	DRM_DEBUG("%s eotf %d end\n", __func__,
11150+		  hdr_metadata->hdmi_metadata_type1.eotf);
11151 }
11152 
11153-static void hdmi_av_composer(struct dw_hdmi *hdmi,
11154-			     const struct drm_display_info *display,
11155-			     const struct drm_display_mode *mode)
11156+static unsigned int
11157+hdmi_get_tmdsclock(struct dw_hdmi *hdmi, unsigned long mpixelclock)
11158 {
11159-	u8 inv_val, bytes;
11160-	const struct drm_hdmi_info *hdmi_info = &display->hdmi;
11161-	struct hdmi_vmode *vmode = &hdmi->hdmi_data.video_mode;
11162-	int hblank, vblank, h_de_hs, v_de_vs, hsync_len, vsync_len;
11163-	unsigned int vdisplay, hdisplay;
11164-
11165-	vmode->mpixelclock = mode->clock * 1000;
11166-
11167-	dev_dbg(hdmi->dev, "final pixclk = %d\n", vmode->mpixelclock);
11168-
11169-	vmode->mtmdsclock = vmode->mpixelclock;
11170+	unsigned int tmdsclock = mpixelclock;
11171+	unsigned int depth =
11172+		hdmi_bus_fmt_color_depth(hdmi->hdmi_data.enc_out_bus_format);
11173 
11174 	if (!hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format)) {
11175-		switch (hdmi_bus_fmt_color_depth(
11176-				hdmi->hdmi_data.enc_out_bus_format)) {
11177+		switch (depth) {
11178 		case 16:
11179-			vmode->mtmdsclock = vmode->mpixelclock * 2;
11180+			tmdsclock = mpixelclock * 2;
11181 			break;
11182 		case 12:
11183-			vmode->mtmdsclock = vmode->mpixelclock * 3 / 2;
11184+			tmdsclock = mpixelclock * 3 / 2;
11185 			break;
11186 		case 10:
11187-			vmode->mtmdsclock = vmode->mpixelclock * 5 / 4;
11188+			tmdsclock = mpixelclock * 5 / 4;
11189+			break;
11190+		default:
11191 			break;
11192 		}
11193 	}
11194 
11195+	return tmdsclock;
11196+}
11197+
11198+static void hdmi_av_composer(struct dw_hdmi *hdmi,
11199+			     const struct drm_display_info *display,
11200+			     const struct drm_display_mode *mode)
11201+{
11202+	u8 inv_val, bytes;
11203+	const struct drm_hdmi_info *hdmi_info = &display->hdmi;
11204+	struct hdmi_vmode *vmode = &hdmi->hdmi_data.video_mode;
11205+	int hblank, vblank, h_de_hs, v_de_vs, hsync_len, vsync_len;
11206+	unsigned int vdisplay, hdisplay;
11207+
11208+	vmode->previous_pixelclock = vmode->mpixelclock;
11209+	vmode->mpixelclock = mode->crtc_clock * 1000;
11210+	if ((mode->flags & DRM_MODE_FLAG_3D_MASK) ==
11211+		DRM_MODE_FLAG_3D_FRAME_PACKING)
11212+		vmode->mpixelclock *= 2;
11213+	dev_dbg(hdmi->dev, "final pixclk = %d\n", vmode->mpixelclock);
11214+
11215+	vmode->previous_tmdsclock = vmode->mtmdsclock;
11216+	vmode->mtmdsclock = hdmi_get_tmdsclock(hdmi, vmode->mpixelclock);
11217 	if (hdmi_bus_fmt_is_yuv420(hdmi->hdmi_data.enc_out_bus_format))
11218 		vmode->mtmdsclock /= 2;
11219-
11220 	dev_dbg(hdmi->dev, "final tmdsclock = %d\n", vmode->mtmdsclock);
11221 
11222-	/* Set up HDMI_FC_INVIDCONF */
11223-	inv_val = (hdmi->hdmi_data.hdcp_enable ||
11224-		   (dw_hdmi_support_scdc(hdmi, display) &&
11225-		    (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK ||
11226-		     hdmi_info->scdc.scrambling.low_rates)) ?
11227-		HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE :
11228-		HDMI_FC_INVIDCONF_HDCP_KEEPOUT_INACTIVE);
11229+	/* Set up HDMI_FC_INVIDCONF
11230+	 * Some display equipments require that the interval
11231+	 * between Video Data and Data island must be at least 58 pixels,
11232+	 * and fc_invidconf.HDCP_keepout set (1'b1) can meet the requirement.
11233+	 */
11234+	inv_val = HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE;
11235 
11236 	inv_val |= mode->flags & DRM_MODE_FLAG_PVSYNC ?
11237 		HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_HIGH :
11238@@ -1964,7 +2366,8 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
11239 	/* Scrambling Control */
11240 	if (dw_hdmi_support_scdc(hdmi, display)) {
11241 		if (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK ||
11242-		    hdmi_info->scdc.scrambling.low_rates) {
11243+		    (hdmi_info->scdc.scrambling.low_rates &&
11244+		     hdmi->scramble_low_rates)) {
11245 			/*
11246 			 * HDMI2.0 Specifies the following procedure:
11247 			 * After the Source Device has determined that
11248@@ -1998,6 +2401,8 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
11249 				    HDMI_MC_SWRSTZ);
11250 			drm_scdc_set_scrambling(hdmi->ddc, 0);
11251 		}
11252+	} else {
11253+		hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL);
11254 	}
11255 
11256 	/* Set up horizontal active pixel width */
11257@@ -2055,6 +2460,12 @@ static void dw_hdmi_enable_video_path(struct dw_hdmi *hdmi)
11258 	hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_TMDSCLK_DISABLE;
11259 	hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
11260 
11261+	/* Enable pixel repetition path */
11262+	if (hdmi->hdmi_data.video_mode.mpixelrepetitioninput) {
11263+		hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_PREPCLK_DISABLE;
11264+		hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
11265+	}
11266+
11267 	/* Enable csc path */
11268 	if (is_csc_needed(hdmi)) {
11269 		hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE;
11270@@ -2130,6 +2541,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi,
11271 			 const struct drm_display_mode *mode)
11272 {
11273 	int ret;
11274+	void *data = hdmi->plat_data->phy_data;
11275 
11276 	hdmi_disable_overflow_interrupts(hdmi);
11277 
11278@@ -2141,48 +2553,91 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi,
11279 		dev_dbg(hdmi->dev, "CEA mode used vic=%d\n", hdmi->vic);
11280 	}
11281 
11282-	if ((hdmi->vic == 6) || (hdmi->vic == 7) ||
11283-	    (hdmi->vic == 21) || (hdmi->vic == 22) ||
11284-	    (hdmi->vic == 2) || (hdmi->vic == 3) ||
11285-	    (hdmi->vic == 17) || (hdmi->vic == 18))
11286+	if (hdmi->plat_data->get_enc_out_encoding)
11287+		hdmi->hdmi_data.enc_out_encoding =
11288+			hdmi->plat_data->get_enc_out_encoding(data);
11289+	else if ((hdmi->vic == 6) || (hdmi->vic == 7) ||
11290+		 (hdmi->vic == 21) || (hdmi->vic == 22) ||
11291+		 (hdmi->vic == 2) || (hdmi->vic == 3) ||
11292+		 (hdmi->vic == 17) || (hdmi->vic == 18))
11293 		hdmi->hdmi_data.enc_out_encoding = V4L2_YCBCR_ENC_601;
11294 	else
11295 		hdmi->hdmi_data.enc_out_encoding = V4L2_YCBCR_ENC_709;
11296 
11297-	hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0;
11298-	hdmi->hdmi_data.video_mode.mpixelrepetitioninput = 0;
11299+	if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
11300+		hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 1;
11301+		hdmi->hdmi_data.video_mode.mpixelrepetitioninput = 1;
11302+	} else {
11303+		hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0;
11304+		hdmi->hdmi_data.video_mode.mpixelrepetitioninput = 0;
11305+	}
11306+	/* TOFIX: Get input format from plat data or fallback to RGB888 */
11307+	if (hdmi->plat_data->get_input_bus_format)
11308+		hdmi->hdmi_data.enc_in_bus_format =
11309+			hdmi->plat_data->get_input_bus_format(data);
11310+	else if (hdmi->plat_data->input_bus_format)
11311+		hdmi->hdmi_data.enc_in_bus_format =
11312+			hdmi->plat_data->input_bus_format;
11313+	else
11314+		hdmi->hdmi_data.enc_in_bus_format =
11315+			MEDIA_BUS_FMT_RGB888_1X24;
11316 
11317-	if (hdmi->hdmi_data.enc_in_bus_format == MEDIA_BUS_FMT_FIXED)
11318-		hdmi->hdmi_data.enc_in_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
11319+	/* TOFIX: Default to RGB888 output format */
11320+	if (hdmi->plat_data->get_output_bus_format)
11321+		hdmi->hdmi_data.enc_out_bus_format =
11322+			hdmi->plat_data->get_output_bus_format(data);
11323+	else
11324+		hdmi->hdmi_data.enc_out_bus_format =
11325+			MEDIA_BUS_FMT_RGB888_1X24;
11326 
11327 	/* TOFIX: Get input encoding from plat data or fallback to none */
11328-	if (hdmi->plat_data->input_bus_encoding)
11329+	if (hdmi->plat_data->get_enc_in_encoding)
11330+		hdmi->hdmi_data.enc_in_encoding =
11331+			hdmi->plat_data->get_enc_in_encoding(data);
11332+	else if (hdmi->plat_data->input_bus_encoding)
11333 		hdmi->hdmi_data.enc_in_encoding =
11334 			hdmi->plat_data->input_bus_encoding;
11335 	else
11336 		hdmi->hdmi_data.enc_in_encoding = V4L2_YCBCR_ENC_DEFAULT;
11337 
11338-	if (hdmi->hdmi_data.enc_out_bus_format == MEDIA_BUS_FMT_FIXED)
11339-		hdmi->hdmi_data.enc_out_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
11340+
11341+	if (hdmi->plat_data->get_quant_range)
11342+		hdmi->hdmi_data.quant_range =
11343+			hdmi->plat_data->get_quant_range(data);
11344 
11345 	hdmi->hdmi_data.rgb_limited_range = hdmi->sink_is_hdmi &&
11346 		drm_default_rgb_quant_range(mode) ==
11347 		HDMI_QUANTIZATION_RANGE_LIMITED;
11348 
11349-	hdmi->hdmi_data.pix_repet_factor = 0;
11350-	hdmi->hdmi_data.hdcp_enable = 0;
11351+	if (!hdmi->sink_is_hdmi)
11352+		hdmi->hdmi_data.quant_range = HDMI_QUANTIZATION_RANGE_FULL;
11353+
11354+	/*
11355+	 * According to the dw-hdmi specification 6.4.2
11356+	 * vp_pr_cd[3:0]:
11357+	 * 0000b: No pixel repetition (pixel sent only once)
11358+	 * 0001b: Pixel sent two times (pixel repeated once)
11359+	 */
11360+	hdmi->hdmi_data.pix_repet_factor =
11361+		(mode->flags & DRM_MODE_FLAG_DBLCLK) ? 1 : 0;
11362 	hdmi->hdmi_data.video_mode.mdataenablepolarity = true;
11363 
11364 	/* HDMI Initialization Step B.1 */
11365 	hdmi_av_composer(hdmi, &connector->display_info, mode);
11366 
11367 	/* HDMI Initializateion Step B.2 */
11368-	ret = hdmi->phy.ops->init(hdmi, hdmi->phy.data,
11369-				  &connector->display_info,
11370-				  &hdmi->previous_mode);
11371-	if (ret)
11372-		return ret;
11373-	hdmi->phy.enabled = true;
11374+	if (!hdmi->phy.enabled ||
11375+	    hdmi->hdmi_data.video_mode.previous_pixelclock !=
11376+	    hdmi->hdmi_data.video_mode.mpixelclock ||
11377+	    hdmi->hdmi_data.video_mode.previous_tmdsclock !=
11378+	    hdmi->hdmi_data.video_mode.mtmdsclock) {
11379+		ret = hdmi->phy.ops->init(hdmi, hdmi->phy.data,
11380+					  &connector->display_info,
11381+					  &hdmi->previous_mode);
11382+		if (ret)
11383+			return ret;
11384+		hdmi->phy.enabled = true;
11385+	}
11386 
11387 	/* HDMI Initialization Step B.3 */
11388 	dw_hdmi_enable_video_path(hdmi);
11389@@ -2210,7 +2665,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi,
11390 	hdmi_video_packetize(hdmi);
11391 	hdmi_video_csc(hdmi);
11392 	hdmi_video_sample(hdmi);
11393-	hdmi_tx_hdcp_config(hdmi);
11394+	hdmi_tx_hdcp_config(hdmi, mode);
11395 
11396 	dw_hdmi_clear_overflow(hdmi);
11397 
11398@@ -2286,6 +2741,8 @@ static void dw_hdmi_poweroff(struct dw_hdmi *hdmi)
11399 		hdmi->phy.enabled = false;
11400 	}
11401 
11402+	if (hdmi->hdcp && hdmi->hdcp->hdcp_stop)
11403+		hdmi->hdcp->hdcp_stop(hdmi->hdcp);
11404 	hdmi->bridge_is_on = false;
11405 }
11406 
11407@@ -2303,6 +2760,10 @@ static void dw_hdmi_update_power(struct dw_hdmi *hdmi)
11408 	}
11409 
11410 	if (force == DRM_FORCE_OFF) {
11411+		if (hdmi->initialized) {
11412+			hdmi->initialized = false;
11413+			hdmi->disabled = true;
11414+		}
11415 		if (hdmi->bridge_is_on)
11416 			dw_hdmi_poweroff(hdmi);
11417 	} else {
11418@@ -2335,8 +2796,15 @@ static enum drm_connector_status dw_hdmi_detect(struct dw_hdmi *hdmi)
11419 {
11420 	enum drm_connector_status result;
11421 
11422-	result = hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data);
11423+	if (!hdmi->force_logo) {
11424+		mutex_lock(&hdmi->mutex);
11425+		hdmi->force = DRM_FORCE_UNSPECIFIED;
11426+		dw_hdmi_update_power(hdmi);
11427+		dw_hdmi_update_phy_mask(hdmi);
11428+		mutex_unlock(&hdmi->mutex);
11429+	}
11430 
11431+	result = hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data);
11432 	mutex_lock(&hdmi->mutex);
11433 	if (result != hdmi->last_connector_result) {
11434 		dev_dbg(hdmi->dev, "read_hpd result: %d", result);
11435@@ -2346,6 +2814,11 @@ static enum drm_connector_status dw_hdmi_detect(struct dw_hdmi *hdmi)
11436 	}
11437 	mutex_unlock(&hdmi->mutex);
11438 
11439+	if (result == connector_status_connected)
11440+		extcon_set_state_sync(hdmi->extcon, EXTCON_DISP_HDMI, true);
11441+	else
11442+		extcon_set_state_sync(hdmi->extcon, EXTCON_DISP_HDMI, false);
11443+
11444 	return result;
11445 }
11446 
11447@@ -2366,7 +2839,7 @@ static struct edid *dw_hdmi_get_edid(struct dw_hdmi *hdmi,
11448 	dev_dbg(hdmi->dev, "got edid: width[%d] x height[%d]\n",
11449 		edid->width_cm, edid->height_cm);
11450 
11451-	hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid);
11452+	hdmi->support_hdmi = drm_detect_hdmi_monitor(edid);
11453 	hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
11454 
11455 	return edid;
11456@@ -2384,21 +2857,105 @@ dw_hdmi_connector_detect(struct drm_connector *connector, bool force)
11457 	return dw_hdmi_detect(hdmi);
11458 }
11459 
11460+static int
11461+dw_hdmi_update_hdr_property(struct drm_connector *connector)
11462+{
11463+	struct drm_device *dev = connector->dev;
11464+	struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
11465+					    connector);
11466+	void *data = hdmi->plat_data->phy_data;
11467+	const struct hdr_static_metadata *metadata =
11468+		&connector->hdr_sink_metadata.hdmi_type1;
11469+	size_t size = sizeof(*metadata);
11470+	struct drm_property *property;
11471+	struct drm_property_blob *blob;
11472+	int ret;
11473+
11474+	if (hdmi->plat_data->get_hdr_property)
11475+		property = hdmi->plat_data->get_hdr_property(data);
11476+	else
11477+		return -EINVAL;
11478+
11479+	if (hdmi->plat_data->get_hdr_blob)
11480+		blob = hdmi->plat_data->get_hdr_blob(data);
11481+	else
11482+		return -EINVAL;
11483+
11484+	ret = drm_property_replace_global_blob(dev, &blob, size, metadata,
11485+					       &connector->base, property);
11486+	return ret;
11487+}
11488+
11489 static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
11490 {
11491 	struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
11492 					     connector);
11493+	struct hdr_static_metadata *metedata =
11494+			&connector->hdr_sink_metadata.hdmi_type1;
11495 	struct edid *edid;
11496-	int ret;
11497+	struct drm_display_mode *mode;
11498+	struct drm_display_info *info = &connector->display_info;
11499+	int i,  ret = 0;
11500 
11501+	memset(metedata, 0, sizeof(*metedata));
11502 	edid = dw_hdmi_get_edid(hdmi, connector);
11503-	if (!edid)
11504-		return 0;
11505+	if (edid) {
11506+		dev_dbg(hdmi->dev, "got edid: width[%d] x height[%d]\n",
11507+			edid->width_cm, edid->height_cm);
11508+		drm_connector_update_edid_property(connector, edid);
11509+		cec_notifier_set_phys_addr_from_edid(hdmi->cec_notifier, edid);
11510+		ret = drm_add_edid_modes(connector, edid);
11511+		if (hdmi->plat_data->get_color_changed)
11512+			hdmi->plat_data->get_yuv422_format(connector, edid);
11513+		dw_hdmi_update_hdr_property(connector);
11514+		kfree(edid);
11515+	} else {
11516+		hdmi->support_hdmi = true;
11517+		hdmi->sink_has_audio = true;
11518+		for (i = 0; i < ARRAY_SIZE(dw_hdmi_default_modes); i++) {
11519+			const struct drm_display_mode *ptr =
11520+				&dw_hdmi_default_modes[i];
11521+
11522+			mode = drm_mode_duplicate(connector->dev, ptr);
11523+			if (mode) {
11524+				if (!i) {
11525+					mode->type = DRM_MODE_TYPE_PREFERRED;
11526+					mode->picture_aspect_ratio =
11527+						HDMI_PICTURE_ASPECT_NONE;
11528+				}
11529+				drm_mode_probed_add(connector, mode);
11530+				ret++;
11531+			}
11532+		}
11533+		info->edid_hdmi_dc_modes = 0;
11534+		info->hdmi.y420_dc_modes = 0;
11535+		info->color_formats = 0;
11536+
11537+		dev_info(hdmi->dev, "failed to get edid\n");
11538+	}
11539+	dw_hdmi_check_output_type_changed(hdmi);
11540+
11541+	return ret;
11542+}
11543+
11544+static struct drm_encoder *
11545+dw_hdmi_connector_best_encoder(struct drm_connector *connector)
11546+{
11547+	struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
11548+					    connector);
11549+
11550+	return hdmi->bridge.encoder;
11551+}
11552+
11553+static bool dw_hdmi_color_changed(struct drm_connector *connector)
11554+{
11555+	struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
11556+					    connector);
11557+	void *data = hdmi->plat_data->phy_data;
11558+	bool ret = false;
11559 
11560-	drm_connector_update_edid_property(connector, edid);
11561-	cec_notifier_set_phys_addr_from_edid(hdmi->cec_notifier, edid);
11562-	ret = drm_add_edid_modes(connector, edid);
11563-	kfree(edid);
11564+	if (hdmi->plat_data->get_color_changed)
11565+		ret = hdmi->plat_data->get_color_changed(data);
11566 
11567 	return ret;
11568 }
11569@@ -2427,11 +2984,54 @@ static int dw_hdmi_connector_atomic_check(struct drm_connector *connector,
11570 		drm_atomic_get_new_connector_state(state, connector);
11571 	struct drm_crtc *crtc = new_state->crtc;
11572 	struct drm_crtc_state *crtc_state;
11573+	struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
11574+					    connector);
11575+	struct drm_display_mode *mode = NULL;
11576+	void *data = hdmi->plat_data->phy_data;
11577+	struct hdmi_vmode *vmode = &hdmi->hdmi_data.video_mode;
11578+	unsigned int in_bus_format = hdmi->hdmi_data.enc_in_bus_format;
11579+	unsigned int out_bus_format = hdmi->hdmi_data.enc_out_bus_format;
11580+	bool color_changed = false;
11581 
11582 	if (!crtc)
11583 		return 0;
11584 
11585-	if (!hdr_metadata_equal(old_state, new_state)) {
11586+	/*
11587+	 * If HDMI is enabled in uboot, it's need to record
11588+	 * drm_display_mode and set phy status to enabled.
11589+	 */
11590+	if (!vmode->mpixelclock) {
11591+		crtc_state = drm_atomic_get_crtc_state(state, crtc);
11592+		if (hdmi->plat_data->get_enc_in_encoding)
11593+			hdmi->hdmi_data.enc_in_encoding =
11594+				hdmi->plat_data->get_enc_in_encoding(data);
11595+		if (hdmi->plat_data->get_enc_out_encoding)
11596+			hdmi->hdmi_data.enc_out_encoding =
11597+				hdmi->plat_data->get_enc_out_encoding(data);
11598+		if (hdmi->plat_data->get_input_bus_format)
11599+			hdmi->hdmi_data.enc_in_bus_format =
11600+				hdmi->plat_data->get_input_bus_format(data);
11601+		if (hdmi->plat_data->get_output_bus_format)
11602+			hdmi->hdmi_data.enc_out_bus_format =
11603+				hdmi->plat_data->get_output_bus_format(data);
11604+
11605+		mode = &crtc_state->mode;
11606+		memcpy(&hdmi->previous_mode, mode, sizeof(hdmi->previous_mode));
11607+		vmode->mpixelclock = mode->crtc_clock * 1000;
11608+		vmode->previous_pixelclock = mode->clock;
11609+		vmode->previous_tmdsclock = mode->clock;
11610+		vmode->mtmdsclock = hdmi_get_tmdsclock(hdmi,
11611+						       vmode->mpixelclock);
11612+		if (hdmi_bus_fmt_is_yuv420(hdmi->hdmi_data.enc_out_bus_format))
11613+			vmode->mtmdsclock /= 2;
11614+
11615+		if (in_bus_format != hdmi->hdmi_data.enc_in_bus_format ||
11616+		    out_bus_format != hdmi->hdmi_data.enc_out_bus_format)
11617+			color_changed = true;
11618+	}
11619+
11620+	if (!hdr_metadata_equal(old_state, new_state) ||
11621+	    dw_hdmi_color_changed(connector) || color_changed) {
11622 		crtc_state = drm_atomic_get_crtc_state(state, crtc);
11623 		if (IS_ERR(crtc_state))
11624 			return PTR_ERR(crtc_state);
11625@@ -2442,15 +3042,108 @@ static int dw_hdmi_connector_atomic_check(struct drm_connector *connector,
11626 	return 0;
11627 }
11628 
11629-static void dw_hdmi_connector_force(struct drm_connector *connector)
11630+static int
11631+dw_hdmi_atomic_connector_set_property(struct drm_connector *connector,
11632+				      struct drm_connector_state *state,
11633+				      struct drm_property *property,
11634+				      uint64_t val)
11635 {
11636 	struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
11637 					     connector);
11638+	const struct dw_hdmi_property_ops *ops =
11639+				hdmi->plat_data->property_ops;
11640 
11641-	mutex_lock(&hdmi->mutex);
11642-	hdmi->force = connector->force;
11643-	dw_hdmi_update_power(hdmi);
11644-	dw_hdmi_update_phy_mask(hdmi);
11645+	if (ops && ops->set_property)
11646+		return ops->set_property(connector, state, property,
11647+					 val, hdmi->plat_data->phy_data);
11648+	else
11649+		return -EINVAL;
11650+}
11651+
11652+static int
11653+dw_hdmi_atomic_connector_get_property(struct drm_connector *connector,
11654+				      const struct drm_connector_state *state,
11655+				      struct drm_property *property,
11656+				      uint64_t *val)
11657+{
11658+	struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
11659+					     connector);
11660+	const struct dw_hdmi_property_ops *ops =
11661+				hdmi->plat_data->property_ops;
11662+
11663+	if (ops && ops->get_property)
11664+		return ops->get_property(connector, state, property,
11665+					 val, hdmi->plat_data->phy_data);
11666+	else
11667+		return -EINVAL;
11668+}
11669+
11670+static int
11671+dw_hdmi_connector_set_property(struct drm_connector *connector,
11672+			       struct drm_property *property, uint64_t val)
11673+{
11674+	return dw_hdmi_atomic_connector_set_property(connector, NULL,
11675+						     property, val);
11676+}
11677+
11678+void dw_hdmi_set_quant_range(struct dw_hdmi *hdmi)
11679+{
11680+	if (!hdmi->bridge_is_on)
11681+		return;
11682+
11683+	hdmi_writeb(hdmi, HDMI_FC_GCP_SET_AVMUTE, HDMI_FC_GCP);
11684+	dw_hdmi_setup(hdmi, hdmi->curr_conn, &hdmi->previous_mode);
11685+	hdmi_writeb(hdmi, HDMI_FC_GCP_CLEAR_AVMUTE, HDMI_FC_GCP);
11686+}
11687+EXPORT_SYMBOL_GPL(dw_hdmi_set_quant_range);
11688+
11689+void dw_hdmi_set_output_type(struct dw_hdmi *hdmi, u64 val)
11690+{
11691+	hdmi->force_output = val;
11692+
11693+	if (!dw_hdmi_check_output_type_changed(hdmi))
11694+		return;
11695+
11696+	if (!hdmi->bridge_is_on)
11697+		return;
11698+
11699+	hdmi_writeb(hdmi, HDMI_FC_GCP_SET_AVMUTE, HDMI_FC_GCP);
11700+	dw_hdmi_setup(hdmi, hdmi->curr_conn, &hdmi->previous_mode);
11701+	hdmi_writeb(hdmi, HDMI_FC_GCP_CLEAR_AVMUTE, HDMI_FC_GCP);
11702+}
11703+EXPORT_SYMBOL_GPL(dw_hdmi_set_output_type);
11704+
11705+bool dw_hdmi_get_output_whether_hdmi(struct dw_hdmi *hdmi)
11706+{
11707+	return hdmi->sink_is_hdmi;
11708+}
11709+EXPORT_SYMBOL_GPL(dw_hdmi_get_output_whether_hdmi);
11710+
11711+int dw_hdmi_get_output_type_cap(struct dw_hdmi *hdmi)
11712+{
11713+	return hdmi->support_hdmi;
11714+}
11715+EXPORT_SYMBOL_GPL(dw_hdmi_get_output_type_cap);
11716+
11717+static void dw_hdmi_connector_force(struct drm_connector *connector)
11718+{
11719+	struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
11720+					     connector);
11721+
11722+	mutex_lock(&hdmi->mutex);
11723+
11724+	if (hdmi->force != connector->force) {
11725+		if (!hdmi->disabled && connector->force == DRM_FORCE_OFF)
11726+			extcon_set_state_sync(hdmi->extcon, EXTCON_DISP_HDMI,
11727+					      false);
11728+		else if (hdmi->disabled && connector->force == DRM_FORCE_ON)
11729+			extcon_set_state_sync(hdmi->extcon, EXTCON_DISP_HDMI,
11730+					      true);
11731+	}
11732+
11733+	hdmi->force = connector->force;
11734+	dw_hdmi_update_power(hdmi);
11735+	dw_hdmi_update_phy_mask(hdmi);
11736 	mutex_unlock(&hdmi->mutex);
11737 }
11738 
11739@@ -2460,15 +3153,98 @@ static const struct drm_connector_funcs dw_hdmi_connector_funcs = {
11740 	.destroy = drm_connector_cleanup,
11741 	.force = dw_hdmi_connector_force,
11742 	.reset = drm_atomic_helper_connector_reset,
11743+	.set_property = dw_hdmi_connector_set_property,
11744 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
11745 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
11746+	.atomic_set_property = dw_hdmi_atomic_connector_set_property,
11747+	.atomic_get_property = dw_hdmi_atomic_connector_get_property,
11748 };
11749 
11750 static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = {
11751 	.get_modes = dw_hdmi_connector_get_modes,
11752+	.best_encoder = dw_hdmi_connector_best_encoder,
11753 	.atomic_check = dw_hdmi_connector_atomic_check,
11754 };
11755 
11756+static void dw_hdmi_attach_properties(struct dw_hdmi *hdmi)
11757+{
11758+	unsigned int color = MEDIA_BUS_FMT_RGB888_1X24;
11759+	int video_mapping, colorspace;
11760+	enum drm_connector_status connect_status =
11761+		hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data);
11762+	const struct dw_hdmi_property_ops *ops =
11763+				hdmi->plat_data->property_ops;
11764+
11765+	if (connect_status == connector_status_connected) {
11766+		video_mapping = (hdmi_readb(hdmi, HDMI_TX_INVID0) &
11767+				  HDMI_TX_INVID0_VIDEO_MAPPING_MASK);
11768+		colorspace = (hdmi_readb(hdmi, HDMI_FC_AVICONF0) &
11769+			      HDMI_FC_AVICONF0_PIX_FMT_MASK);
11770+		switch (video_mapping) {
11771+		case 0x01:
11772+			color = MEDIA_BUS_FMT_RGB888_1X24;
11773+			break;
11774+		case 0x03:
11775+			color = MEDIA_BUS_FMT_RGB101010_1X30;
11776+			break;
11777+		case 0x09:
11778+			if (colorspace == HDMI_COLORSPACE_YUV420)
11779+				color = MEDIA_BUS_FMT_UYYVYY8_0_5X24;
11780+			else if (colorspace == HDMI_COLORSPACE_YUV422)
11781+				color = MEDIA_BUS_FMT_UYVY8_1X16;
11782+			else
11783+				color = MEDIA_BUS_FMT_YUV8_1X24;
11784+			break;
11785+		case 0x0b:
11786+			if (colorspace == HDMI_COLORSPACE_YUV420)
11787+				color = MEDIA_BUS_FMT_UYYVYY10_0_5X30;
11788+			else if (colorspace == HDMI_COLORSPACE_YUV422)
11789+				color = MEDIA_BUS_FMT_UYVY10_1X20;
11790+			else
11791+				color = MEDIA_BUS_FMT_YUV10_1X30;
11792+			break;
11793+		case 0x14:
11794+			color = MEDIA_BUS_FMT_UYVY10_1X20;
11795+			break;
11796+		case 0x16:
11797+			color = MEDIA_BUS_FMT_UYVY8_1X16;
11798+			break;
11799+		default:
11800+			color = MEDIA_BUS_FMT_RGB888_1X24;
11801+			dev_err(hdmi->dev, "unexpected mapping: 0x%x\n",
11802+				video_mapping);
11803+		}
11804+
11805+		hdmi->hdmi_data.enc_in_bus_format = color;
11806+		hdmi->hdmi_data.enc_out_bus_format = color;
11807+		/*
11808+		 * input format will be set as yuv444 when output
11809+		 * format is yuv420
11810+		 */
11811+		if (color == MEDIA_BUS_FMT_UYVY10_1X20)
11812+			hdmi->hdmi_data.enc_in_bus_format =
11813+				MEDIA_BUS_FMT_YUV10_1X30;
11814+		else if (color == MEDIA_BUS_FMT_UYVY8_1X16)
11815+			hdmi->hdmi_data.enc_in_bus_format =
11816+				MEDIA_BUS_FMT_YUV8_1X24;
11817+	}
11818+
11819+	if (ops && ops->attach_properties)
11820+		return ops->attach_properties(&hdmi->connector,
11821+					      color, hdmi->version,
11822+					      hdmi->plat_data->phy_data);
11823+}
11824+
11825+static void dw_hdmi_destroy_properties(struct dw_hdmi *hdmi)
11826+{
11827+	const struct dw_hdmi_property_ops *ops =
11828+				hdmi->plat_data->property_ops;
11829+
11830+	if (ops && ops->destroy_properties)
11831+		return ops->destroy_properties(&hdmi->connector,
11832+					       hdmi->plat_data->phy_data);
11833+}
11834+
11835 static int dw_hdmi_connector_create(struct dw_hdmi *hdmi)
11836 {
11837 	struct drm_connector *connector = &hdmi->connector;
11838@@ -2505,6 +3281,8 @@ static int dw_hdmi_connector_create(struct dw_hdmi *hdmi)
11839 
11840 	drm_connector_attach_encoder(connector, hdmi->bridge.encoder);
11841 
11842+	dw_hdmi_attach_properties(hdmi);
11843+
11844 	cec_fill_conn_info_from_drm(&conn_info, connector);
11845 
11846 	notifier = cec_notifier_conn_register(hdmi->dev, NULL, &conn_info);
11847@@ -2780,16 +3558,36 @@ static int dw_hdmi_bridge_atomic_check(struct drm_bridge *bridge,
11848 				       struct drm_connector_state *conn_state)
11849 {
11850 	struct dw_hdmi *hdmi = bridge->driver_private;
11851+	void *data = hdmi->plat_data->phy_data;
11852 
11853-	hdmi->hdmi_data.enc_out_bus_format =
11854-			bridge_state->output_bus_cfg.format;
11855+	if (bridge_state->output_bus_cfg.format == MEDIA_BUS_FMT_FIXED) {
11856+		if (hdmi->plat_data->get_output_bus_format)
11857+			hdmi->hdmi_data.enc_out_bus_format =
11858+				hdmi->plat_data->get_output_bus_format(data);
11859+		else
11860+			hdmi->hdmi_data.enc_out_bus_format =
11861+				MEDIA_BUS_FMT_RGB888_1X24;
11862+
11863+		if (hdmi->plat_data->get_input_bus_format)
11864+			hdmi->hdmi_data.enc_in_bus_format =
11865+				hdmi->plat_data->get_input_bus_format(data);
11866+		else if (hdmi->plat_data->input_bus_format)
11867+			hdmi->hdmi_data.enc_in_bus_format =
11868+				hdmi->plat_data->input_bus_format;
11869+		else
11870+			hdmi->hdmi_data.enc_in_bus_format =
11871+				MEDIA_BUS_FMT_RGB888_1X24;
11872+	} else {
11873+		hdmi->hdmi_data.enc_out_bus_format =
11874+				bridge_state->output_bus_cfg.format;
11875 
11876-	hdmi->hdmi_data.enc_in_bus_format =
11877-			bridge_state->input_bus_cfg.format;
11878+		hdmi->hdmi_data.enc_in_bus_format =
11879+				bridge_state->input_bus_cfg.format;
11880 
11881-	dev_dbg(hdmi->dev, "input format 0x%04x, output format 0x%04x\n",
11882-		bridge_state->input_bus_cfg.format,
11883-		bridge_state->output_bus_cfg.format);
11884+		dev_dbg(hdmi->dev, "input format 0x%04x, output format 0x%04x\n",
11885+			bridge_state->input_bus_cfg.format,
11886+			bridge_state->output_bus_cfg.format);
11887+	}
11888 
11889 	return 0;
11890 }
11891@@ -2798,10 +3596,22 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge,
11892 				 enum drm_bridge_attach_flags flags)
11893 {
11894 	struct dw_hdmi *hdmi = bridge->driver_private;
11895+	int ret;
11896 
11897 	if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
11898 		return 0;
11899 
11900+	if (hdmi->next_bridge) {
11901+		hdmi->next_bridge->encoder = bridge->encoder;
11902+		ret = drm_bridge_attach(bridge->encoder, hdmi->next_bridge, bridge, flags);
11903+		if (ret) {
11904+			DRM_ERROR("Failed to attach bridge with dw-hdmi\n");
11905+			return ret;
11906+		}
11907+
11908+		return 0;
11909+	}
11910+
11911 	return dw_hdmi_connector_create(hdmi);
11912 }
11913 
11914@@ -2821,17 +3631,16 @@ dw_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
11915 			  const struct drm_display_mode *mode)
11916 {
11917 	struct dw_hdmi *hdmi = bridge->driver_private;
11918+	struct drm_connector *connector = &hdmi->connector;
11919 	const struct dw_hdmi_plat_data *pdata = hdmi->plat_data;
11920 	enum drm_mode_status mode_status = MODE_OK;
11921 
11922-	/* We don't support double-clocked modes */
11923-	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
11924-		return MODE_BAD;
11925+	if (hdmi->next_bridge)
11926+		return MODE_OK;
11927 
11928 	if (pdata->mode_valid)
11929-		mode_status = pdata->mode_valid(hdmi, pdata->priv_data, info,
11930-						mode);
11931-
11932+		mode_status = pdata->mode_valid(connector, pdata->priv_data,
11933+						info, mode);
11934 	return mode_status;
11935 }
11936 
11937@@ -2912,6 +3721,12 @@ static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
11938 	.get_edid = dw_hdmi_bridge_get_edid,
11939 };
11940 
11941+void dw_hdmi_set_cec_adap(struct dw_hdmi *hdmi, struct cec_adapter *adap)
11942+{
11943+	hdmi->cec_adap = adap;
11944+}
11945+EXPORT_SYMBOL_GPL(dw_hdmi_set_cec_adap);
11946+
11947 /* -----------------------------------------------------------------------------
11948  * IRQ Handling
11949  */
11950@@ -2937,7 +3752,7 @@ static irqreturn_t dw_hdmi_i2c_irq(struct dw_hdmi *hdmi)
11951 static irqreturn_t dw_hdmi_hardirq(int irq, void *dev_id)
11952 {
11953 	struct dw_hdmi *hdmi = dev_id;
11954-	u8 intr_stat;
11955+	u8 intr_stat, hdcp_stat;
11956 	irqreturn_t ret = IRQ_NONE;
11957 
11958 	if (hdmi->i2c)
11959@@ -2949,6 +3764,13 @@ static irqreturn_t dw_hdmi_hardirq(int irq, void *dev_id)
11960 		return IRQ_WAKE_THREAD;
11961 	}
11962 
11963+	hdcp_stat = hdmi_readb(hdmi, HDMI_A_APIINTSTAT);
11964+	if (hdcp_stat) {
11965+		dev_dbg(hdmi->dev, "HDCP irq %#x\n", hdcp_stat);
11966+		hdmi_writeb(hdmi, 0xff, HDMI_A_APIINTMSK);
11967+		return IRQ_WAKE_THREAD;
11968+	}
11969+
11970 	return ret;
11971 }
11972 
11973@@ -2956,7 +3778,7 @@ void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense)
11974 {
11975 	mutex_lock(&hdmi->mutex);
11976 
11977-	if (!hdmi->force) {
11978+	if (!hdmi->force && !hdmi->force_logo) {
11979 		/*
11980 		 * If the RX sense status indicates we're disconnected,
11981 		 * clear the software rxsense status.
11982@@ -2983,8 +3805,7 @@ EXPORT_SYMBOL_GPL(dw_hdmi_setup_rx_sense);
11983 static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
11984 {
11985 	struct dw_hdmi *hdmi = dev_id;
11986-	u8 intr_stat, phy_int_pol, phy_pol_mask, phy_stat;
11987-	enum drm_connector_status status = connector_status_unknown;
11988+	u8 intr_stat, phy_int_pol, phy_pol_mask, phy_stat, hdcp_stat;
11989 
11990 	intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0);
11991 	phy_int_pol = hdmi_readb(hdmi, HDMI_PHY_POL0);
11992@@ -3023,29 +3844,23 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
11993 			cec_notifier_phys_addr_invalidate(hdmi->cec_notifier);
11994 			mutex_unlock(&hdmi->cec_notifier_mutex);
11995 		}
11996-
11997-		if (phy_stat & HDMI_PHY_HPD)
11998-			status = connector_status_connected;
11999-
12000-		if (!(phy_stat & (HDMI_PHY_HPD | HDMI_PHY_RX_SENSE)))
12001-			status = connector_status_disconnected;
12002 	}
12003 
12004-	if (status != connector_status_unknown) {
12005-		dev_dbg(hdmi->dev, "EVENT=%s\n",
12006-			status == connector_status_connected ?
12007-			"plugin" : "plugout");
12008-
12009-		if (hdmi->bridge.dev) {
12010-			drm_helper_hpd_irq_event(hdmi->bridge.dev);
12011-			drm_bridge_hpd_notify(&hdmi->bridge, status);
12012-		}
12013-	}
12014+	check_hdmi_irq(hdmi, intr_stat, phy_int_pol);
12015 
12016 	hdmi_writeb(hdmi, intr_stat, HDMI_IH_PHY_STAT0);
12017-	hdmi_writeb(hdmi, ~(HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE),
12018-		    HDMI_IH_MUTE_PHY_STAT0);
12019-
12020+	if (!hdmi->next_bridge)
12021+		hdmi_writeb(hdmi, ~(HDMI_IH_PHY_STAT0_HPD |
12022+			    HDMI_IH_PHY_STAT0_RX_SENSE),
12023+			    HDMI_IH_MUTE_PHY_STAT0);
12024+
12025+	hdcp_stat = hdmi_readb(hdmi, HDMI_A_APIINTSTAT);
12026+	if (hdcp_stat) {
12027+		if (hdmi->hdcp)
12028+			hdmi->hdcp->hdcp_isr(hdmi->hdcp, hdcp_stat);
12029+		hdmi_writeb(hdmi, hdcp_stat, HDMI_A_APIINTCLR);
12030+		hdmi_writeb(hdmi, 0x00, HDMI_A_APIINTMSK);
12031+	}
12032 	return IRQ_HANDLED;
12033 }
12034 
12035@@ -3179,12 +3994,363 @@ static void dw_hdmi_init_hw(struct dw_hdmi *hdmi)
12036 	 * Even if we are using a separate i2c adapter doing this doesn't
12037 	 * hurt.
12038 	 */
12039-	dw_hdmi_i2c_init(hdmi);
12040+	if (hdmi->i2c)
12041+		dw_hdmi_i2c_init(hdmi);
12042 
12043 	if (hdmi->phy.ops->setup_hpd)
12044 		hdmi->phy.ops->setup_hpd(hdmi, hdmi->phy.data);
12045 }
12046 
12047+static int dw_hdmi_status_show(struct seq_file *s, void *v)
12048+{
12049+	struct dw_hdmi *hdmi = s->private;
12050+	u32 val;
12051+
12052+	seq_puts(s, "PHY: ");
12053+	if (!hdmi->phy.enabled) {
12054+		seq_puts(s, "disabled\n");
12055+		return 0;
12056+	}
12057+	seq_puts(s, "enabled\t\t\tMode: ");
12058+	if (hdmi->sink_is_hdmi)
12059+		seq_puts(s, "HDMI\n");
12060+	else
12061+		seq_puts(s, "DVI\n");
12062+	if (hdmi->hdmi_data.video_mode.mtmdsclock > 340000000)
12063+		val = hdmi->hdmi_data.video_mode.mtmdsclock / 4;
12064+	else
12065+		val = hdmi->hdmi_data.video_mode.mtmdsclock;
12066+	seq_printf(s, "Pixel Clk: %uHz\t\tTMDS Clk: %uHz\n",
12067+		   hdmi->hdmi_data.video_mode.mpixelclock, val);
12068+	seq_puts(s, "Color Format: ");
12069+	if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format))
12070+		seq_puts(s, "RGB");
12071+	else if (hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format))
12072+		seq_puts(s, "YUV444");
12073+	else if (hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format))
12074+		seq_puts(s, "YUV422");
12075+	else if (hdmi_bus_fmt_is_yuv420(hdmi->hdmi_data.enc_out_bus_format))
12076+		seq_puts(s, "YUV420");
12077+	else
12078+		seq_puts(s, "UNKNOWN");
12079+	val =  hdmi_bus_fmt_color_depth(hdmi->hdmi_data.enc_out_bus_format);
12080+	seq_printf(s, "\t\tColor Depth: %d bit\n", val);
12081+	seq_puts(s, "Colorimetry: ");
12082+	switch (hdmi->hdmi_data.enc_out_encoding) {
12083+	case V4L2_YCBCR_ENC_601:
12084+		seq_puts(s, "ITU.BT601");
12085+		break;
12086+	case V4L2_YCBCR_ENC_709:
12087+		seq_puts(s, "ITU.BT709");
12088+		break;
12089+	case V4L2_YCBCR_ENC_BT2020:
12090+		seq_puts(s, "ITU.BT2020");
12091+		break;
12092+	default: /* Carries no data */
12093+		seq_puts(s, "ITU.BT601");
12094+		break;
12095+	}
12096+
12097+	seq_puts(s, "\t\tEOTF: ");
12098+
12099+	if (hdmi->version < 0x211a) {
12100+		seq_puts(s, "Unsupported\n");
12101+		return 0;
12102+	}
12103+
12104+	val = hdmi_readb(hdmi, HDMI_FC_PACKET_TX_EN);
12105+	if (!(val & HDMI_FC_PACKET_TX_EN_DRM_MASK)) {
12106+		seq_puts(s, "Off\n");
12107+		return 0;
12108+	}
12109+
12110+	switch (hdmi_readb(hdmi, HDMI_FC_DRM_PB0)) {
12111+	case HDMI_EOTF_TRADITIONAL_GAMMA_SDR:
12112+		seq_puts(s, "SDR");
12113+		break;
12114+	case HDMI_EOTF_TRADITIONAL_GAMMA_HDR:
12115+		seq_puts(s, "HDR");
12116+		break;
12117+	case HDMI_EOTF_SMPTE_ST2084:
12118+		seq_puts(s, "ST2084");
12119+		break;
12120+	case HDMI_EOTF_BT_2100_HLG:
12121+		seq_puts(s, "HLG");
12122+		break;
12123+	default:
12124+		seq_puts(s, "Not Defined\n");
12125+		return 0;
12126+	}
12127+
12128+	val = hdmi_readb(hdmi, HDMI_FC_DRM_PB3) << 8;
12129+	val |= hdmi_readb(hdmi, HDMI_FC_DRM_PB2);
12130+	seq_printf(s, "\nx0: %d", val);
12131+	val = hdmi_readb(hdmi, HDMI_FC_DRM_PB5) << 8;
12132+	val |= hdmi_readb(hdmi, HDMI_FC_DRM_PB4);
12133+	seq_printf(s, "\t\t\t\ty0: %d\n", val);
12134+	val = hdmi_readb(hdmi, HDMI_FC_DRM_PB7) << 8;
12135+	val |= hdmi_readb(hdmi, HDMI_FC_DRM_PB6);
12136+	seq_printf(s, "x1: %d", val);
12137+	val = hdmi_readb(hdmi, HDMI_FC_DRM_PB9) << 8;
12138+	val |= hdmi_readb(hdmi, HDMI_FC_DRM_PB8);
12139+	seq_printf(s, "\t\t\t\ty1: %d\n", val);
12140+	val = hdmi_readb(hdmi, HDMI_FC_DRM_PB11) << 8;
12141+	val |= hdmi_readb(hdmi, HDMI_FC_DRM_PB10);
12142+	seq_printf(s, "x2: %d", val);
12143+	val = hdmi_readb(hdmi, HDMI_FC_DRM_PB13) << 8;
12144+	val |= hdmi_readb(hdmi, HDMI_FC_DRM_PB12);
12145+	seq_printf(s, "\t\t\t\ty2: %d\n", val);
12146+	val = hdmi_readb(hdmi, HDMI_FC_DRM_PB15) << 8;
12147+	val |= hdmi_readb(hdmi, HDMI_FC_DRM_PB14);
12148+	seq_printf(s, "white x: %d", val);
12149+	val = hdmi_readb(hdmi, HDMI_FC_DRM_PB17) << 8;
12150+	val |= hdmi_readb(hdmi, HDMI_FC_DRM_PB16);
12151+	seq_printf(s, "\t\t\twhite y: %d\n", val);
12152+	val = hdmi_readb(hdmi, HDMI_FC_DRM_PB19) << 8;
12153+	val |= hdmi_readb(hdmi, HDMI_FC_DRM_PB18);
12154+	seq_printf(s, "max lum: %d", val);
12155+	val = hdmi_readb(hdmi, HDMI_FC_DRM_PB21) << 8;
12156+	val |= hdmi_readb(hdmi, HDMI_FC_DRM_PB20);
12157+	seq_printf(s, "\t\t\tmin lum: %d\n", val);
12158+	val = hdmi_readb(hdmi, HDMI_FC_DRM_PB23) << 8;
12159+	val |= hdmi_readb(hdmi, HDMI_FC_DRM_PB22);
12160+	seq_printf(s, "max cll: %d", val);
12161+	val = hdmi_readb(hdmi, HDMI_FC_DRM_PB25) << 8;
12162+	val |= hdmi_readb(hdmi, HDMI_FC_DRM_PB24);
12163+	seq_printf(s, "\t\t\tmax fall: %d\n", val);
12164+	return 0;
12165+}
12166+
12167+static int dw_hdmi_status_open(struct inode *inode, struct file *file)
12168+{
12169+	return single_open(file, dw_hdmi_status_show, inode->i_private);
12170+}
12171+
12172+static const struct file_operations dw_hdmi_status_fops = {
12173+	.owner = THIS_MODULE,
12174+	.open = dw_hdmi_status_open,
12175+	.read = seq_read,
12176+	.llseek = seq_lseek,
12177+	.release = single_release,
12178+};
12179+
12180+#include <linux/fs.h>
12181+#include <linux/debugfs.h>
12182+#include <linux/seq_file.h>
12183+
12184+struct dw_hdmi_reg_table {
12185+	int reg_base;
12186+	int reg_end;
12187+};
12188+
12189+static const struct dw_hdmi_reg_table hdmi_reg_table[] = {
12190+	{HDMI_DESIGN_ID, HDMI_CONFIG3_ID},
12191+	{HDMI_IH_FC_STAT0, HDMI_IH_MUTE},
12192+	{HDMI_TX_INVID0, HDMI_TX_BCBDATA1},
12193+	{HDMI_VP_STATUS, HDMI_VP_POL},
12194+	{HDMI_FC_INVIDCONF, HDMI_FC_DBGTMDS2},
12195+	{HDMI_PHY_CONF0, HDMI_PHY_POL0},
12196+	{HDMI_PHY_I2CM_SLAVE_ADDR, HDMI_PHY_I2CM_FS_SCL_LCNT_0_ADDR},
12197+	{HDMI_AUD_CONF0, 0x3624},
12198+	{HDMI_MC_SFRDIV, HDMI_MC_HEACPHY_RST},
12199+	{HDMI_CSC_CFG, HDMI_CSC_COEF_C4_LSB},
12200+	{HDMI_A_HDCPCFG0, 0x52bb},
12201+	{0x7800, 0x7818},
12202+	{0x7900, 0x790e},
12203+	{HDMI_CEC_CTRL, HDMI_CEC_WKUPCTRL},
12204+	{HDMI_I2CM_SLAVE, 0x7e31},
12205+};
12206+
12207+static int dw_hdmi_ctrl_show(struct seq_file *s, void *v)
12208+{
12209+	struct dw_hdmi *hdmi = s->private;
12210+	u32 i = 0, j = 0, val = 0;
12211+
12212+	seq_puts(s, "\n>>>hdmi_ctl reg ");
12213+	for (i = 0; i < 16; i++)
12214+		seq_printf(s, " %2x", i);
12215+	seq_puts(s, "\n---------------------------------------------------");
12216+
12217+	for (i = 0; i < ARRAY_SIZE(hdmi_reg_table); i++) {
12218+		for (j = hdmi_reg_table[i].reg_base;
12219+		     j <= hdmi_reg_table[i].reg_end; j++) {
12220+			val = hdmi_readb(hdmi, j);
12221+			if ((j - hdmi_reg_table[i].reg_base) % 16 == 0)
12222+				seq_printf(s, "\n>>>hdmi_ctl %04x:", j);
12223+			seq_printf(s, " %02x", val);
12224+		}
12225+	}
12226+	seq_puts(s, "\n---------------------------------------------------\n");
12227+
12228+	return 0;
12229+}
12230+
12231+static int dw_hdmi_ctrl_open(struct inode *inode, struct file *file)
12232+{
12233+	return single_open(file, dw_hdmi_ctrl_show, inode->i_private);
12234+}
12235+
12236+static ssize_t
12237+dw_hdmi_ctrl_write(struct file *file, const char __user *buf,
12238+		   size_t count, loff_t *ppos)
12239+{
12240+	struct dw_hdmi *hdmi =
12241+		((struct seq_file *)file->private_data)->private;
12242+	u32 reg, val;
12243+	char kbuf[25];
12244+
12245+	if (copy_from_user(kbuf, buf, count))
12246+		return -EFAULT;
12247+	if (sscanf(kbuf, "%x%x", &reg, &val) == -1)
12248+		return -EFAULT;
12249+	if (reg > HDMI_I2CM_FS_SCL_LCNT_0_ADDR) {
12250+		dev_err(hdmi->dev, "it is no a hdmi register\n");
12251+		return count;
12252+	}
12253+	dev_info(hdmi->dev, "/**********hdmi register config******/");
12254+	dev_info(hdmi->dev, "\n reg=%x val=%x\n", reg, val);
12255+	hdmi_writeb(hdmi, val, reg);
12256+	return count;
12257+}
12258+
12259+static const struct file_operations dw_hdmi_ctrl_fops = {
12260+	.owner = THIS_MODULE,
12261+	.open = dw_hdmi_ctrl_open,
12262+	.read = seq_read,
12263+	.write = dw_hdmi_ctrl_write,
12264+	.llseek = seq_lseek,
12265+	.release = single_release,
12266+};
12267+
12268+static int dw_hdmi_phy_show(struct seq_file *s, void *v)
12269+{
12270+	struct dw_hdmi *hdmi = s->private;
12271+	u32 i;
12272+
12273+	seq_puts(s, "\n>>>hdmi_phy reg ");
12274+	for (i = 0; i < 0x28; i++)
12275+		seq_printf(s, "regs %02x val %04x\n",
12276+			   i, hdmi_phy_i2c_read(hdmi, i));
12277+	return 0;
12278+}
12279+
12280+static int dw_hdmi_phy_open(struct inode *inode, struct file *file)
12281+{
12282+	return single_open(file, dw_hdmi_phy_show, inode->i_private);
12283+}
12284+
12285+static ssize_t
12286+dw_hdmi_phy_write(struct file *file, const char __user *buf,
12287+		  size_t count, loff_t *ppos)
12288+{
12289+	struct dw_hdmi *hdmi =
12290+		((struct seq_file *)file->private_data)->private;
12291+	u32 reg, val;
12292+	char kbuf[25];
12293+
12294+	if (copy_from_user(kbuf, buf, count))
12295+		return -EFAULT;
12296+	if (sscanf(kbuf, "%x%x", &reg, &val) == -1)
12297+		return -EFAULT;
12298+	if (reg > 0x28) {
12299+		dev_err(hdmi->dev, "it is not a hdmi phy register\n");
12300+		return count;
12301+	}
12302+	dev_info(hdmi->dev, "/*******hdmi phy register config******/");
12303+	dev_info(hdmi->dev, "\n reg=%x val=%x\n", reg, val);
12304+	dw_hdmi_phy_i2c_write(hdmi, val, reg);
12305+	return count;
12306+}
12307+
12308+static const struct file_operations dw_hdmi_phy_fops = {
12309+	.owner = THIS_MODULE,
12310+	.open = dw_hdmi_phy_open,
12311+	.read = seq_read,
12312+	.write = dw_hdmi_phy_write,
12313+	.llseek = seq_lseek,
12314+	.release = single_release,
12315+};
12316+
12317+static void dw_hdmi_register_debugfs(struct device *dev, struct dw_hdmi *hdmi)
12318+{
12319+	hdmi->debugfs_dir = debugfs_create_dir("dw-hdmi", NULL);
12320+	if (IS_ERR(hdmi->debugfs_dir)) {
12321+		dev_err(dev, "failed to create debugfs dir!\n");
12322+		return;
12323+	}
12324+	debugfs_create_file("status", 0400, hdmi->debugfs_dir,
12325+			    hdmi, &dw_hdmi_status_fops);
12326+	debugfs_create_file("ctrl", 0400, hdmi->debugfs_dir,
12327+			    hdmi, &dw_hdmi_ctrl_fops);
12328+	debugfs_create_file("phy", 0400, hdmi->debugfs_dir,
12329+			    hdmi, &dw_hdmi_phy_fops);
12330+}
12331+
12332+static void dw_hdmi_register_hdcp(struct device *dev, struct dw_hdmi *hdmi,
12333+				  u32 val, bool hdcp1x_enable)
12334+{
12335+	struct dw_hdcp hdmi_hdcp = {
12336+		.hdmi = hdmi,
12337+		.write = hdmi_writeb,
12338+		.read = hdmi_readb,
12339+		.regs = hdmi->regs,
12340+		.reg_io_width = val,
12341+		.enable = hdcp1x_enable,
12342+	};
12343+	struct platform_device_info hdcp_device_info = {
12344+		.parent = dev,
12345+		.id = PLATFORM_DEVID_AUTO,
12346+		.res = NULL,
12347+		.num_res = 0,
12348+		.name = DW_HDCP_DRIVER_NAME,
12349+		.data = &hdmi_hdcp,
12350+		.size_data = sizeof(hdmi_hdcp),
12351+		.dma_mask = DMA_BIT_MASK(32),
12352+	};
12353+
12354+	hdmi->hdcp_dev = platform_device_register_full(&hdcp_device_info);
12355+	if (IS_ERR(hdmi->hdcp_dev))
12356+		dev_err(dev, "failed to register hdcp!\n");
12357+	else
12358+		hdmi->hdcp = hdmi->hdcp_dev->dev.platform_data;
12359+}
12360+
12361+static int get_force_logo_property(struct dw_hdmi *hdmi)
12362+{
12363+	struct device_node *dss;
12364+	struct device_node *route;
12365+	struct device_node *route_hdmi;
12366+
12367+	dss = of_find_node_by_name(NULL, "display-subsystem");
12368+	if (!dss) {
12369+		dev_err(hdmi->dev, "can't find display-subsystem\n");
12370+		return -ENODEV;
12371+	}
12372+
12373+	route = of_find_node_by_name(dss, "route");
12374+	if (!route) {
12375+		dev_err(hdmi->dev, "can't find route\n");
12376+		of_node_put(dss);
12377+		return -ENODEV;
12378+	}
12379+	of_node_put(dss);
12380+
12381+	route_hdmi = of_find_node_by_name(route, "route-hdmi");
12382+	if (!route_hdmi) {
12383+		dev_err(hdmi->dev, "can't find route-hdmi\n");
12384+		of_node_put(route);
12385+		return -ENODEV;
12386+	}
12387+	of_node_put(route);
12388+
12389+	hdmi->force_logo =
12390+		of_property_read_bool(route_hdmi, "force-output");
12391+
12392+	of_node_put(route_hdmi);
12393+
12394+	return 0;
12395+}
12396+
12397 /* -----------------------------------------------------------------------------
12398  * Probe/remove API, used from platforms based on the DRM bridge API.
12399  */
12400@@ -3193,6 +4359,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
12401 {
12402 	struct device *dev = &pdev->dev;
12403 	struct device_node *np = dev->of_node;
12404+	struct device_node *endpoint;
12405 	struct platform_device_info pdevinfo;
12406 	struct device_node *ddc_node;
12407 	struct dw_hdmi_cec_data cec;
12408@@ -3205,11 +4372,13 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
12409 	u8 prod_id1;
12410 	u8 config0;
12411 	u8 config3;
12412+	bool hdcp1x_enable = 0;
12413 
12414 	hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
12415 	if (!hdmi)
12416 		return ERR_PTR(-ENOMEM);
12417 
12418+	hdmi->connector.stereo_allowed = 1;
12419 	hdmi->plat_data = plat_data;
12420 	hdmi->dev = dev;
12421 	hdmi->sample_rate = 48000;
12422@@ -3340,7 +4509,24 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
12423 		 prod_id1 & HDMI_PRODUCT_ID1_HDCP ? "with" : "without",
12424 		 hdmi->phy.name);
12425 
12426-	dw_hdmi_init_hw(hdmi);
12427+	ret = get_force_logo_property(hdmi);
12428+	if (ret)
12429+		goto err_iahb;
12430+
12431+	hdmi->initialized = false;
12432+	ret = hdmi_readb(hdmi, HDMI_PHY_STAT0);
12433+	if (((ret & HDMI_PHY_TX_PHY_LOCK) && (ret & HDMI_PHY_HPD) &&
12434+	     hdmi_readb(hdmi, HDMI_FC_EXCTRLDUR)) || hdmi->force_logo) {
12435+		hdmi->mc_clkdis = hdmi_readb(hdmi, HDMI_MC_CLKDIS);
12436+		hdmi->disabled = false;
12437+		hdmi->bridge_is_on = true;
12438+		hdmi->phy.enabled = true;
12439+		hdmi->initialized = true;
12440+	} else if (ret & HDMI_PHY_TX_PHY_LOCK) {
12441+		hdmi->phy.ops->disable(hdmi, hdmi->phy.data);
12442+	}
12443+
12444+	init_hpd_work(hdmi);
12445 
12446 	irq = platform_get_irq(pdev, 0);
12447 	if (irq < 0) {
12448@@ -3348,6 +4534,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
12449 		goto err_iahb;
12450 	}
12451 
12452+	hdmi->irq = irq;
12453 	ret = devm_request_threaded_irq(dev, irq, dw_hdmi_hardirq,
12454 					dw_hdmi_irq, IRQF_SHARED,
12455 					dev_name(dev), hdmi);
12456@@ -3383,8 +4570,20 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
12457 		hdmi->ddc = dw_hdmi_i2c_adapter(hdmi);
12458 		if (IS_ERR(hdmi->ddc))
12459 			hdmi->ddc = NULL;
12460+		/*
12461+		 * Read high and low time from device tree. If not available use
12462+		 * the default timing scl clock rate is about 99.6KHz.
12463+		 */
12464+		if (of_property_read_u32(np, "ddc-i2c-scl-high-time-ns",
12465+					 &hdmi->i2c->scl_high_ns))
12466+			hdmi->i2c->scl_high_ns = 4708;
12467+		if (of_property_read_u32(np, "ddc-i2c-scl-low-time-ns",
12468+					 &hdmi->i2c->scl_low_ns))
12469+			hdmi->i2c->scl_low_ns = 4916;
12470 	}
12471 
12472+	dw_hdmi_init_hw(hdmi);
12473+
12474 	hdmi->bridge.driver_private = hdmi;
12475 	hdmi->bridge.funcs = &dw_hdmi_bridge_funcs;
12476 	hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
12477@@ -3393,6 +4592,30 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
12478 	hdmi->bridge.of_node = pdev->dev.of_node;
12479 #endif
12480 
12481+	endpoint = of_graph_get_endpoint_by_regs(hdmi->dev->of_node, 1, -1);
12482+	if (endpoint && of_device_is_available(endpoint)) {
12483+		struct device_node *remote;
12484+
12485+		remote = of_graph_get_remote_port_parent(endpoint);
12486+		of_node_put(endpoint);
12487+		if (!remote || !of_device_is_available(remote)) {
12488+			of_node_put(remote);
12489+			ret = -ENODEV;
12490+			goto err_iahb;
12491+		}
12492+
12493+		hdmi->next_bridge = of_drm_find_bridge(remote);
12494+		of_node_put(remote);
12495+		if (!hdmi->next_bridge) {
12496+			dev_err(hdmi->dev, "can't find next bridge\n");
12497+			ret = -EPROBE_DEFER;
12498+			goto err_iahb;
12499+		}
12500+
12501+		hdmi->sink_is_hdmi = true;
12502+		hdmi->sink_has_audio = true;
12503+	}
12504+
12505 	memset(&pdevinfo, 0, sizeof(pdevinfo));
12506 	pdevinfo.parent = dev;
12507 	pdevinfo.id = PLATFORM_DEVID_AUTO;
12508@@ -3407,7 +4630,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
12509 		audio.base = hdmi->regs;
12510 		audio.irq = irq;
12511 		audio.hdmi = hdmi;
12512-		audio.get_eld = hdmi_audio_get_eld;
12513+		audio.eld = hdmi->connector.eld;
12514 		hdmi->enable_audio = dw_hdmi_ahb_audio_enable;
12515 		hdmi->disable_audio = dw_hdmi_ahb_audio_disable;
12516 
12517@@ -3420,7 +4643,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
12518 		struct dw_hdmi_i2s_audio_data audio;
12519 
12520 		audio.hdmi	= hdmi;
12521-		audio.get_eld	= hdmi_audio_get_eld;
12522+		audio.eld	= hdmi->connector.eld;
12523 		audio.write	= hdmi_writeb;
12524 		audio.read	= hdmi_readb;
12525 		hdmi->enable_audio = dw_hdmi_i2s_audio_enable;
12526@@ -3446,8 +4669,40 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
12527 		hdmi->cec = platform_device_register_full(&pdevinfo);
12528 	}
12529 
12530+	hdmi->extcon = devm_extcon_dev_allocate(hdmi->dev, dw_hdmi_cable);
12531+	if (IS_ERR(hdmi->extcon)) {
12532+		ret = PTR_ERR(hdmi->extcon);
12533+		dev_err(hdmi->dev, "allocate extcon failed: %d\n", ret);
12534+		goto err_iahb;
12535+	}
12536+
12537+	ret = devm_extcon_dev_register(hdmi->dev, hdmi->extcon);
12538+	if (ret) {
12539+		dev_err(hdmi->dev, "failed to register extcon: %d\n",
12540+			ret);
12541+		goto err_iahb;
12542+	}
12543+
12544+	ret = extcon_set_property_capability(hdmi->extcon, EXTCON_DISP_HDMI,
12545+					     EXTCON_PROP_DISP_HPD);
12546+	if (ret) {
12547+		dev_err(hdmi->dev,
12548+			"failed to set USB property capability: %d\n",
12549+			ret);
12550+		goto err_iahb;
12551+	}
12552+
12553 	drm_bridge_add(&hdmi->bridge);
12554 
12555+	dw_hdmi_register_debugfs(dev, hdmi);
12556+
12557+	if (of_property_read_bool(np, "scramble-low-rates"))
12558+		hdmi->scramble_low_rates = true;
12559+
12560+	if (of_property_read_bool(np, "hdcp1x-enable"))
12561+		hdcp1x_enable = 1;
12562+	dw_hdmi_register_hdcp(dev, hdmi, val, hdcp1x_enable);
12563+
12564 	return hdmi;
12565 
12566 err_iahb:
12567@@ -3457,7 +4712,10 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
12568 err_isfr:
12569 	clk_disable_unprepare(hdmi->isfr_clk);
12570 err_res:
12571-	i2c_put_adapter(hdmi->ddc);
12572+	if (hdmi->i2c)
12573+		i2c_del_adapter(&hdmi->i2c->adap);
12574+	else
12575+		i2c_put_adapter(hdmi->ddc);
12576 
12577 	return ERR_PTR(ret);
12578 }
12579@@ -3465,16 +4723,35 @@ EXPORT_SYMBOL_GPL(dw_hdmi_probe);
12580 
12581 void dw_hdmi_remove(struct dw_hdmi *hdmi)
12582 {
12583+	if (hdmi->irq)
12584+		disable_irq(hdmi->irq);
12585+
12586+	cancel_delayed_work(&hdmi->work);
12587+	flush_workqueue(hdmi->workqueue);
12588+	destroy_workqueue(hdmi->workqueue);
12589+
12590+	debugfs_remove_recursive(hdmi->debugfs_dir);
12591+
12592 	drm_bridge_remove(&hdmi->bridge);
12593 
12594 	if (hdmi->audio && !IS_ERR(hdmi->audio))
12595 		platform_device_unregister(hdmi->audio);
12596+	if (hdmi->hdcp_dev && !IS_ERR(hdmi->hdcp_dev))
12597+		platform_device_unregister(hdmi->hdcp_dev);
12598 	if (!IS_ERR(hdmi->cec))
12599 		platform_device_unregister(hdmi->cec);
12600 
12601 	/* Disable all interrupts */
12602 	hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0);
12603 
12604+	if (!hdmi->next_bridge) {
12605+		dw_hdmi_destroy_properties(hdmi);
12606+		hdmi->connector.funcs->destroy(&hdmi->connector);
12607+	}
12608+
12609+	if (hdmi->bridge.encoder)
12610+		hdmi->bridge.encoder->funcs->destroy(hdmi->bridge.encoder);
12611+
12612 	clk_disable_unprepare(hdmi->iahb_clk);
12613 	clk_disable_unprepare(hdmi->isfr_clk);
12614 	if (hdmi->cec_clk)
12615@@ -3492,7 +4769,7 @@ EXPORT_SYMBOL_GPL(dw_hdmi_remove);
12616  */
12617 struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev,
12618 			     struct drm_encoder *encoder,
12619-			     const struct dw_hdmi_plat_data *plat_data)
12620+			     struct dw_hdmi_plat_data *plat_data)
12621 {
12622 	struct dw_hdmi *hdmi;
12623 	int ret;
12624@@ -3508,6 +4785,9 @@ struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev,
12625 		return ERR_PTR(ret);
12626 	}
12627 
12628+	if (!hdmi->next_bridge)
12629+		plat_data->connector = &hdmi->connector;
12630+
12631 	return hdmi;
12632 }
12633 EXPORT_SYMBOL_GPL(dw_hdmi_bind);
12634@@ -3518,9 +4798,87 @@ void dw_hdmi_unbind(struct dw_hdmi *hdmi)
12635 }
12636 EXPORT_SYMBOL_GPL(dw_hdmi_unbind);
12637 
12638+static void dw_hdmi_reg_initial(struct dw_hdmi *hdmi)
12639+{
12640+	if (hdmi_readb(hdmi, HDMI_IH_MUTE)) {
12641+		initialize_hdmi_ih_mutes(hdmi);
12642+		/* unmute cec irq */
12643+		hdmi_writeb(hdmi, 0x68, HDMI_IH_MUTE_CEC_STAT0);
12644+
12645+		hdmi_writeb(hdmi, HDMI_PHY_I2CM_INT_ADDR_DONE_POL,
12646+			    HDMI_PHY_I2CM_INT_ADDR);
12647+
12648+		hdmi_writeb(hdmi, HDMI_PHY_I2CM_CTLINT_ADDR_NAC_POL |
12649+			    HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_POL,
12650+			    HDMI_PHY_I2CM_CTLINT_ADDR);
12651+
12652+		if (!hdmi->next_bridge) {
12653+			hdmi_writeb(hdmi, HDMI_PHY_HPD | HDMI_PHY_RX_SENSE,
12654+				    HDMI_PHY_POL0);
12655+			hdmi_writeb(hdmi, hdmi->phy_mask, HDMI_PHY_MASK0);
12656+			hdmi_writeb(hdmi, ~(HDMI_IH_PHY_STAT0_HPD |
12657+				    HDMI_IH_PHY_STAT0_RX_SENSE),
12658+				    HDMI_IH_MUTE_PHY_STAT0);
12659+		}
12660+	}
12661+}
12662+
12663+void dw_hdmi_suspend(struct dw_hdmi *hdmi)
12664+{
12665+	if (!hdmi)
12666+		return;
12667+
12668+	mutex_lock(&hdmi->mutex);
12669+
12670+	/*
12671+	 * When system shutdown, hdmi should be disabled.
12672+	 * When system suspend, dw_hdmi_bridge_disable will disable hdmi first.
12673+	 * To prevent duplicate operation, we should determine whether hdmi
12674+	 * has been disabled.
12675+	 */
12676+	if (!hdmi->disabled) {
12677+		hdmi->disabled = true;
12678+		dw_hdmi_update_power(hdmi);
12679+		dw_hdmi_update_phy_mask(hdmi);
12680+	}
12681+	mutex_unlock(&hdmi->mutex);
12682+
12683+	if (hdmi->irq)
12684+		disable_irq(hdmi->irq);
12685+	cancel_delayed_work(&hdmi->work);
12686+	flush_workqueue(hdmi->workqueue);
12687+	pinctrl_pm_select_sleep_state(hdmi->dev);
12688+}
12689+EXPORT_SYMBOL_GPL(dw_hdmi_suspend);
12690+
12691 void dw_hdmi_resume(struct dw_hdmi *hdmi)
12692 {
12693-	dw_hdmi_init_hw(hdmi);
12694+	if (!hdmi)
12695+		return;
12696+
12697+	pinctrl_pm_select_default_state(hdmi->dev);
12698+	mutex_lock(&hdmi->mutex);
12699+	dw_hdmi_reg_initial(hdmi);
12700+	if (hdmi->i2c)
12701+		dw_hdmi_i2c_init(hdmi);
12702+	if (hdmi->irq)
12703+		enable_irq(hdmi->irq);
12704+	/*
12705+	 * HDMI status maybe incorrect in the following condition:
12706+	 * HDMI plug in -> system sleep ->  HDMI plug out -> system wake up.
12707+	 * At this time, cat /sys/class/drm/card 0-HDMI-A-1/status is connected.
12708+	 * There is no hpd interrupt, because HDMI is powerdown during suspend.
12709+	 * So we need check the current HDMI status in this case.
12710+	 */
12711+	if (hdmi->connector.status == connector_status_connected) {
12712+		if (hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data) ==
12713+		    connector_status_disconnected) {
12714+			hdmi->hpd_state = false;
12715+			mod_delayed_work(hdmi->workqueue, &hdmi->work,
12716+					 msecs_to_jiffies(20));
12717+		}
12718+	}
12719+	mutex_unlock(&hdmi->mutex);
12720 }
12721 EXPORT_SYMBOL_GPL(dw_hdmi_resume);
12722 
12723diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
12724index 1999db05b..509732800 100644
12725--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
12726+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
12727@@ -509,6 +509,51 @@
12728 #define HDMI_A_PRESETUP                         0x501A
12729 #define HDMI_A_SRM_BASE                         0x5020
12730 
12731+/* CEC Engine Registers */
12732+#define HDMI_CEC_CTRL                           0x7D00
12733+#define HDMI_CEC_STAT                           0x7D01
12734+#define HDMI_CEC_MASK                           0x7D02
12735+#define HDMI_CEC_POLARITY                       0x7D03
12736+#define HDMI_CEC_INT                            0x7D04
12737+#define HDMI_CEC_ADDR_L                         0x7D05
12738+#define HDMI_CEC_ADDR_H                         0x7D06
12739+#define HDMI_CEC_TX_CNT                         0x7D07
12740+#define HDMI_CEC_RX_CNT                         0x7D08
12741+#define HDMI_CEC_TX_DATA0                       0x7D10
12742+#define HDMI_CEC_TX_DATA1                       0x7D11
12743+#define HDMI_CEC_TX_DATA2                       0x7D12
12744+#define HDMI_CEC_TX_DATA3                       0x7D13
12745+#define HDMI_CEC_TX_DATA4                       0x7D14
12746+#define HDMI_CEC_TX_DATA5                       0x7D15
12747+#define HDMI_CEC_TX_DATA6                       0x7D16
12748+#define HDMI_CEC_TX_DATA7                       0x7D17
12749+#define HDMI_CEC_TX_DATA8                       0x7D18
12750+#define HDMI_CEC_TX_DATA9                       0x7D19
12751+#define HDMI_CEC_TX_DATA10                      0x7D1a
12752+#define HDMI_CEC_TX_DATA11                      0x7D1b
12753+#define HDMI_CEC_TX_DATA12                      0x7D1c
12754+#define HDMI_CEC_TX_DATA13                      0x7D1d
12755+#define HDMI_CEC_TX_DATA14                      0x7D1e
12756+#define HDMI_CEC_TX_DATA15                      0x7D1f
12757+#define HDMI_CEC_RX_DATA0                       0x7D20
12758+#define HDMI_CEC_RX_DATA1                       0x7D21
12759+#define HDMI_CEC_RX_DATA2                       0x7D22
12760+#define HDMI_CEC_RX_DATA3                       0x7D23
12761+#define HDMI_CEC_RX_DATA4                       0x7D24
12762+#define HDMI_CEC_RX_DATA5                       0x7D25
12763+#define HDMI_CEC_RX_DATA6                       0x7D26
12764+#define HDMI_CEC_RX_DATA7                       0x7D27
12765+#define HDMI_CEC_RX_DATA8                       0x7D28
12766+#define HDMI_CEC_RX_DATA9                       0x7D29
12767+#define HDMI_CEC_RX_DATA10                      0x7D2a
12768+#define HDMI_CEC_RX_DATA11                      0x7D2b
12769+#define HDMI_CEC_RX_DATA12                      0x7D2c
12770+#define HDMI_CEC_RX_DATA13                      0x7D2d
12771+#define HDMI_CEC_RX_DATA14                      0x7D2e
12772+#define HDMI_CEC_RX_DATA15                      0x7D2f
12773+#define HDMI_CEC_LOCK                           0x7D30
12774+#define HDMI_CEC_WKUPCTRL                       0x7D31
12775+
12776 /* I2C Master Registers (E-DDC) */
12777 #define HDMI_I2CM_SLAVE                         0x7E00
12778 #define HDMI_I2CM_ADDRESS                       0x7E01
12779@@ -529,6 +574,7 @@
12780 #define HDMI_I2CM_FS_SCL_HCNT_0_ADDR            0x7E10
12781 #define HDMI_I2CM_FS_SCL_LCNT_1_ADDR            0x7E11
12782 #define HDMI_I2CM_FS_SCL_LCNT_0_ADDR            0x7E12
12783+#define HDMI_I2CM_SDA_HOLD                      0x7E13
12784 
12785 enum {
12786 /* PRODUCT_ID0 field values */
12787@@ -842,6 +888,10 @@ enum {
12788 	HDMI_FC_AVICONF3_QUANT_RANGE_LIMITED = 0x00,
12789 	HDMI_FC_AVICONF3_QUANT_RANGE_FULL = 0x04,
12790 
12791+/* HDMI_FC_GCP */
12792+	HDMI_FC_GCP_SET_AVMUTE = 0x2,
12793+	HDMI_FC_GCP_CLEAR_AVMUTE = 0x1,
12794+
12795 /* FC_DBGFORCE field values */
12796 	HDMI_FC_DBGFORCE_FORCEAUDIO = 0x10,
12797 	HDMI_FC_DBGFORCE_FORCEVIDEO = 0x1,
12798@@ -1085,6 +1135,11 @@ enum {
12799 	HDMI_I2CM_CTLINT_NAC_MASK = 0x40,
12800 	HDMI_I2CM_CTLINT_ARB_POL = 0x8,
12801 	HDMI_I2CM_CTLINT_ARB_MASK = 0x4,
12802+
12803+/* I2CM_DIV field values */
12804+	HDMI_I2CM_DIV_FAST_STD_MODE = 0x8,
12805+	HDMI_I2CM_DIV_FAST_MODE = 0x8,
12806+	HDMI_I2CM_DIV_STD_MODE = 0,
12807 };
12808 
12809 /*
12810diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
12811index 376fa6eb4..163dcc03b 100644
12812--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
12813+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
12814@@ -244,7 +244,7 @@ struct dw_mipi_dsi {
12815 	struct device *dev;
12816 	void __iomem *base;
12817 
12818-	struct clk *pclk;
12819+	struct reset_control *apb_rst;
12820 
12821 	unsigned int lane_mbps; /* per lane */
12822 	u32 channel;
12823@@ -316,15 +316,10 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
12824 	const struct dw_mipi_dsi_plat_data *pdata = dsi->plat_data;
12825 	struct drm_bridge *bridge;
12826 	struct drm_panel *panel;
12827+	int max_data_lanes = dsi->plat_data->max_data_lanes;
12828 	int ret;
12829 
12830-	if (device->lanes > dsi->plat_data->max_data_lanes) {
12831-		dev_err(dsi->dev, "the number of data lanes(%u) is too many\n",
12832-			device->lanes);
12833-		return -EINVAL;
12834-	}
12835-
12836-	dsi->lanes = device->lanes;
12837+	dsi->lanes = (device->lanes > max_data_lanes) ? device->lanes / 2 : device->lanes;
12838 	dsi->channel = device->channel;
12839 	dsi->format = device->format;
12840 	dsi->mode_flags = device->mode_flags;
12841@@ -599,8 +594,14 @@ static void dw_mipi_dsi_set_mode(struct dw_mipi_dsi *dsi,
12842 
12843 static void dw_mipi_dsi_disable(struct dw_mipi_dsi *dsi)
12844 {
12845+	const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
12846+
12847+	if (phy_ops->power_off)
12848+		phy_ops->power_off(dsi->plat_data->priv_data);
12849+
12850 	dsi_write(dsi, DSI_PWR_UP, RESET);
12851 	dsi_write(dsi, DSI_PHY_RSTZ, PHY_RSTZ);
12852+	pm_runtime_put(dsi->dev);
12853 }
12854 
12855 static void dw_mipi_dsi_init(struct dw_mipi_dsi *dsi)
12856@@ -715,16 +716,16 @@ static u32 dw_mipi_dsi_get_hcomponent_lbcc(struct dw_mipi_dsi *dsi,
12857 					   const struct drm_display_mode *mode,
12858 					   u32 hcomponent)
12859 {
12860-	u32 frac, lbcc;
12861+	u32 lbcc;
12862 
12863 	lbcc = hcomponent * dsi->lane_mbps * MSEC_PER_SEC / 8;
12864 
12865-	frac = lbcc % mode->clock;
12866-	lbcc = lbcc / mode->clock;
12867-	if (frac)
12868-		lbcc++;
12869+	if (mode->clock == 0) {
12870+		DRM_ERROR("dsi mode clock is 0!\n");
12871+		return 0;
12872+	}
12873 
12874-	return lbcc;
12875+	return DIV_ROUND_CLOSEST_ULL(lbcc, mode->clock);
12876 }
12877 
12878 static void dw_mipi_dsi_line_timer_config(struct dw_mipi_dsi *dsi,
12879@@ -837,13 +838,13 @@ static void dw_mipi_dsi_dphy_enable(struct dw_mipi_dsi *dsi)
12880 	ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS, val,
12881 				 val & PHY_LOCK, 1000, PHY_STATUS_TIMEOUT_US);
12882 	if (ret)
12883-		DRM_DEBUG_DRIVER("failed to wait phy lock state\n");
12884+		DRM_ERROR("failed to wait phy lock state\n");
12885 
12886 	ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS,
12887 				 val, val & PHY_STOP_STATE_CLK_LANE, 1000,
12888 				 PHY_STATUS_TIMEOUT_US);
12889 	if (ret)
12890-		DRM_DEBUG_DRIVER("failed to wait phy clk lane stop state\n");
12891+		DRM_ERROR("failed to wait phy clk lane stop state\n");
12892 }
12893 
12894 static void dw_mipi_dsi_clear_err(struct dw_mipi_dsi *dsi)
12895@@ -857,7 +858,6 @@ static void dw_mipi_dsi_clear_err(struct dw_mipi_dsi *dsi)
12896 static void dw_mipi_dsi_bridge_post_disable(struct drm_bridge *bridge)
12897 {
12898 	struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
12899-	const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
12900 
12901 	/*
12902 	 * Switch to command mode before panel-bridge post_disable &
12903@@ -866,6 +866,8 @@ static void dw_mipi_dsi_bridge_post_disable(struct drm_bridge *bridge)
12904 	 * before by the drm framework.
12905 	 */
12906 	dw_mipi_dsi_set_mode(dsi, 0);
12907+	if (dsi->slave)
12908+		dw_mipi_dsi_set_mode(dsi->slave, 0);
12909 
12910 	/*
12911 	 * TODO Only way found to call panel-bridge post_disable &
12912@@ -876,18 +878,10 @@ static void dw_mipi_dsi_bridge_post_disable(struct drm_bridge *bridge)
12913 	if (dsi->panel_bridge->funcs->post_disable)
12914 		dsi->panel_bridge->funcs->post_disable(dsi->panel_bridge);
12915 
12916-	if (phy_ops->power_off)
12917-		phy_ops->power_off(dsi->plat_data->priv_data);
12918-
12919-	if (dsi->slave) {
12920+	if (dsi->slave)
12921 		dw_mipi_dsi_disable(dsi->slave);
12922-		clk_disable_unprepare(dsi->slave->pclk);
12923-		pm_runtime_put(dsi->slave->dev);
12924-	}
12925-	dw_mipi_dsi_disable(dsi);
12926 
12927-	clk_disable_unprepare(dsi->pclk);
12928-	pm_runtime_put(dsi->dev);
12929+	dw_mipi_dsi_disable(dsi);
12930 }
12931 
12932 static unsigned int dw_mipi_dsi_get_lanes(struct dw_mipi_dsi *dsi)
12933@@ -912,7 +906,11 @@ static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi,
12934 	int ret;
12935 	u32 lanes = dw_mipi_dsi_get_lanes(dsi);
12936 
12937-	clk_prepare_enable(dsi->pclk);
12938+	if (dsi->apb_rst) {
12939+		reset_control_assert(dsi->apb_rst);
12940+		usleep_range(10, 20);
12941+		reset_control_deassert(dsi->apb_rst);
12942+	}
12943 
12944 	ret = phy_ops->get_lane_mbps(priv_data, adjusted_mode, dsi->mode_flags,
12945 				     lanes, dsi->format, &dsi->lane_mbps);
12946@@ -939,15 +937,15 @@ static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi,
12947 	if (ret)
12948 		DRM_DEBUG_DRIVER("Phy init() failed\n");
12949 
12950+	if (phy_ops->power_on)
12951+		phy_ops->power_on(dsi->plat_data->priv_data);
12952+
12953 	dw_mipi_dsi_dphy_enable(dsi);
12954 
12955 	dw_mipi_dsi_wait_for_two_frames(adjusted_mode);
12956 
12957 	/* Switch to cmd mode for panel-bridge pre_enable & panel prepare */
12958 	dw_mipi_dsi_set_mode(dsi, 0);
12959-
12960-	if (phy_ops->power_on)
12961-		phy_ops->power_on(dsi->plat_data->priv_data);
12962 }
12963 
12964 static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
12965@@ -959,16 +957,25 @@ static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
12966 	dw_mipi_dsi_mode_set(dsi, adjusted_mode);
12967 	if (dsi->slave)
12968 		dw_mipi_dsi_mode_set(dsi->slave, adjusted_mode);
12969+
12970+	DRM_DEV_INFO(dsi->dev, "final DSI-Link bandwidth: %u x %d Mbps\n",
12971+		     dsi->lane_mbps, dsi->slave ? dsi->lanes * 2 : dsi->lanes);
12972 }
12973 
12974 static void dw_mipi_dsi_bridge_enable(struct drm_bridge *bridge)
12975 {
12976 	struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
12977 
12978-	/* Switch to video mode for panel-bridge enable & panel enable */
12979-	dw_mipi_dsi_set_mode(dsi, MIPI_DSI_MODE_VIDEO);
12980-	if (dsi->slave)
12981-		dw_mipi_dsi_set_mode(dsi->slave, MIPI_DSI_MODE_VIDEO);
12982+	/* Switch to video/cmd mode for panel-bridge enable & panel enable */
12983+	if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
12984+		dw_mipi_dsi_set_mode(dsi, MIPI_DSI_MODE_VIDEO);
12985+		if (dsi->slave)
12986+			dw_mipi_dsi_set_mode(dsi->slave, MIPI_DSI_MODE_VIDEO);
12987+	} else {
12988+		dw_mipi_dsi_set_mode(dsi, 0);
12989+		if (dsi->slave)
12990+			dw_mipi_dsi_set_mode(dsi->slave, 0);
12991+	}
12992 }
12993 
12994 static enum drm_mode_status
12995@@ -1103,7 +1110,6 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
12996 		    const struct dw_mipi_dsi_plat_data *plat_data)
12997 {
12998 	struct device *dev = &pdev->dev;
12999-	struct reset_control *apb_rst;
13000 	struct dw_mipi_dsi *dsi;
13001 	int ret;
13002 
13003@@ -1129,20 +1135,13 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
13004 		dsi->base = plat_data->base;
13005 	}
13006 
13007-	dsi->pclk = devm_clk_get(dev, "pclk");
13008-	if (IS_ERR(dsi->pclk)) {
13009-		ret = PTR_ERR(dsi->pclk);
13010-		dev_err(dev, "Unable to get pclk: %d\n", ret);
13011-		return ERR_PTR(ret);
13012-	}
13013-
13014 	/*
13015 	 * Note that the reset was not defined in the initial device tree, so
13016 	 * we have to be prepared for it not being found.
13017 	 */
13018-	apb_rst = devm_reset_control_get_optional_exclusive(dev, "apb");
13019-	if (IS_ERR(apb_rst)) {
13020-		ret = PTR_ERR(apb_rst);
13021+	dsi->apb_rst = devm_reset_control_get_optional_exclusive(dev, "apb");
13022+	if (IS_ERR(dsi->apb_rst)) {
13023+		ret = PTR_ERR(dsi->apb_rst);
13024 
13025 		if (ret != -EPROBE_DEFER)
13026 			dev_err(dev, "Unable to get reset control: %d\n", ret);
13027@@ -1150,20 +1149,6 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
13028 		return ERR_PTR(ret);
13029 	}
13030 
13031-	if (apb_rst) {
13032-		ret = clk_prepare_enable(dsi->pclk);
13033-		if (ret) {
13034-			dev_err(dev, "%s: Failed to enable pclk\n", __func__);
13035-			return ERR_PTR(ret);
13036-		}
13037-
13038-		reset_control_assert(apb_rst);
13039-		usleep_range(10, 20);
13040-		reset_control_deassert(apb_rst);
13041-
13042-		clk_disable_unprepare(dsi->pclk);
13043-	}
13044-
13045 	dw_mipi_dsi_debugfs_init(dsi);
13046 	pm_runtime_enable(dev);
13047 
13048@@ -1247,6 +1232,12 @@ void dw_mipi_dsi_unbind(struct dw_mipi_dsi *dsi)
13049 }
13050 EXPORT_SYMBOL_GPL(dw_mipi_dsi_unbind);
13051 
13052+struct drm_connector *dw_mipi_dsi_get_connector(struct dw_mipi_dsi *dsi)
13053+{
13054+	return drm_panel_bridge_connector(dsi->panel_bridge);
13055+}
13056+EXPORT_SYMBOL_GPL(dw_mipi_dsi_get_connector);
13057+
13058 MODULE_AUTHOR("Chris Zhong <zyw@rock-chips.com>");
13059 MODULE_AUTHOR("Philippe Cornu <philippe.cornu@st.com>");
13060 MODULE_DESCRIPTION("DW MIPI DSI host controller driver");
13061diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
13062index 7fc8e7000..4108c7265 100644
13063--- a/drivers/gpu/drm/drm_atomic_helper.c
13064+++ b/drivers/gpu/drm/drm_atomic_helper.c
13065@@ -296,12 +296,14 @@ update_connector_routing(struct drm_atomic_state *state,
13066 	if (old_connector_state->crtc != new_connector_state->crtc) {
13067 		if (old_connector_state->crtc) {
13068 			crtc_state = drm_atomic_get_new_crtc_state(state, old_connector_state->crtc);
13069-			crtc_state->connectors_changed = true;
13070+			if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
13071+				crtc_state->connectors_changed = true;
13072 		}
13073 
13074 		if (new_connector_state->crtc) {
13075 			crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc);
13076-			crtc_state->connectors_changed = true;
13077+			if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
13078+				crtc_state->connectors_changed = true;
13079 		}
13080 	}
13081 
13082@@ -386,7 +388,8 @@ update_connector_routing(struct drm_atomic_state *state,
13083 
13084 	set_best_encoder(state, new_connector_state, new_encoder);
13085 
13086-	crtc_state->connectors_changed = true;
13087+	if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
13088+		crtc_state->connectors_changed = true;
13089 
13090 	DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
13091 			 connector->base.id,
13092@@ -3554,6 +3557,9 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
13093 	replaced  = drm_property_replace_blob(&crtc_state->degamma_lut, NULL);
13094 	replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
13095 	replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob);
13096+#if defined(CONFIG_ROCKCHIP_DRM_CUBIC_LUT)
13097+	replaced |= drm_property_replace_blob(&crtc_state->cubic_lut, NULL);
13098+#endif
13099 	crtc_state->color_mgmt_changed |= replaced;
13100 
13101 	ret = drm_atomic_commit(state);
13102diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
13103index 9ad740451..c29183d2a 100644
13104--- a/drivers/gpu/drm/drm_atomic_state_helper.c
13105+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
13106@@ -141,6 +141,10 @@ void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
13107 		drm_property_blob_get(state->ctm);
13108 	if (state->gamma_lut)
13109 		drm_property_blob_get(state->gamma_lut);
13110+#if defined(CONFIG_ROCKCHIP_DRM_CUBIC_LUT)
13111+	if (state->cubic_lut)
13112+		drm_property_blob_get(state->cubic_lut);
13113+#endif
13114 	state->mode_changed = false;
13115 	state->active_changed = false;
13116 	state->planes_changed = false;
13117@@ -213,6 +217,9 @@ void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state)
13118 	drm_property_blob_put(state->degamma_lut);
13119 	drm_property_blob_put(state->ctm);
13120 	drm_property_blob_put(state->gamma_lut);
13121+#if defined(CONFIG_ROCKCHIP_DRM_CUBIC_LUT)
13122+	drm_property_blob_put(state->cubic_lut);
13123+#endif
13124 }
13125 EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
13126 
13127diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
13128index 25c269bc4..975ece7e0 100644
13129--- a/drivers/gpu/drm/drm_atomic_uapi.c
13130+++ b/drivers/gpu/drm/drm_atomic_uapi.c
13131@@ -459,6 +459,16 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
13132 					&replaced);
13133 		state->color_mgmt_changed |= replaced;
13134 		return ret;
13135+#if defined(CONFIG_ROCKCHIP_DRM_CUBIC_LUT)
13136+	} else if (property == config->cubic_lut_property) {
13137+		ret = drm_atomic_replace_property_blob_from_id(dev,
13138+					&state->cubic_lut,
13139+					val,
13140+					-1, sizeof(struct drm_color_lut),
13141+					&replaced);
13142+		state->color_mgmt_changed |= replaced;
13143+		return ret;
13144+#endif
13145 	} else if (property == config->prop_out_fence_ptr) {
13146 		s32 __user *fence_ptr = u64_to_user_ptr(val);
13147 
13148@@ -501,6 +511,10 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc,
13149 		*val = (state->ctm) ? state->ctm->base.id : 0;
13150 	else if (property == config->gamma_lut_property)
13151 		*val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
13152+#if defined(CONFIG_ROCKCHIP_DRM_CUBIC_LUT)
13153+	else if (property == config->cubic_lut_property)
13154+		*val = (state->cubic_lut) ? state->cubic_lut->base.id : 0;
13155+#endif
13156 	else if (property == config->prop_out_fence_ptr)
13157 		*val = 0;
13158 	else if (crtc->funcs->atomic_get_property)
13159diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
13160index c7adbeaf1..232abbba3 100644
13161--- a/drivers/gpu/drm/drm_auth.c
13162+++ b/drivers/gpu/drm/drm_auth.c
13163@@ -135,18 +135,16 @@ static void drm_set_master(struct drm_device *dev, struct drm_file *fpriv,
13164 static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
13165 {
13166 	struct drm_master *old_master;
13167-	struct drm_master *new_master;
13168 
13169 	lockdep_assert_held_once(&dev->master_mutex);
13170 
13171 	WARN_ON(fpriv->is_master);
13172 	old_master = fpriv->master;
13173-	new_master = drm_master_create(dev);
13174-	if (!new_master)
13175+	fpriv->master = drm_master_create(dev);
13176+	if (!fpriv->master) {
13177+		fpriv->master = old_master;
13178 		return -ENOMEM;
13179-	spin_lock(&fpriv->master_lookup_lock);
13180-	fpriv->master = new_master;
13181-	spin_unlock(&fpriv->master_lookup_lock);
13182+	}
13183 
13184 	fpriv->is_master = 1;
13185 	fpriv->authenticated = 1;
13186@@ -304,13 +302,10 @@ int drm_master_open(struct drm_file *file_priv)
13187 	/* if there is no current master make this fd it, but do not create
13188 	 * any master object for render clients */
13189 	mutex_lock(&dev->master_mutex);
13190-	if (!dev->master) {
13191+	if (!dev->master)
13192 		ret = drm_new_set_master(dev, file_priv);
13193-	} else {
13194-		spin_lock(&file_priv->master_lookup_lock);
13195+	else
13196 		file_priv->master = drm_master_get(dev->master);
13197-		spin_unlock(&file_priv->master_lookup_lock);
13198-	}
13199 	mutex_unlock(&dev->master_mutex);
13200 
13201 	return ret;
13202@@ -376,31 +371,6 @@ struct drm_master *drm_master_get(struct drm_master *master)
13203 }
13204 EXPORT_SYMBOL(drm_master_get);
13205 
13206-/**
13207- * drm_file_get_master - reference &drm_file.master of @file_priv
13208- * @file_priv: DRM file private
13209- *
13210- * Increments the reference count of @file_priv's &drm_file.master and returns
13211- * the &drm_file.master. If @file_priv has no &drm_file.master, returns NULL.
13212- *
13213- * Master pointers returned from this function should be unreferenced using
13214- * drm_master_put().
13215- */
13216-struct drm_master *drm_file_get_master(struct drm_file *file_priv)
13217-{
13218-	struct drm_master *master = NULL;
13219-
13220-	spin_lock(&file_priv->master_lookup_lock);
13221-	if (!file_priv->master)
13222-		goto unlock;
13223-	master = drm_master_get(file_priv->master);
13224-
13225-unlock:
13226-	spin_unlock(&file_priv->master_lookup_lock);
13227-	return master;
13228-}
13229-EXPORT_SYMBOL(drm_file_get_master);
13230-
13231 static void drm_master_destroy(struct kref *kref)
13232 {
13233 	struct drm_master *master = container_of(kref, struct drm_master, refcount);
13234diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
13235index 138ff34b3..7b270b68a 100644
13236--- a/drivers/gpu/drm/drm_color_mgmt.c
13237+++ b/drivers/gpu/drm/drm_color_mgmt.c
13238@@ -33,7 +33,7 @@
13239 /**
13240  * DOC: overview
13241  *
13242- * Color management or color space adjustments is supported through a set of 5
13243+ * Color management or color space adjustments is supported through a set of 7
13244  * properties on the &drm_crtc object. They are set up by calling
13245  * drm_crtc_enable_color_mgmt().
13246  *
13247@@ -60,7 +60,7 @@
13248  * “CTM”:
13249  *	Blob property to set the current transformation matrix (CTM) apply to
13250  *	pixel data after the lookup through the degamma LUT and before the
13251- *	lookup through the gamma LUT. The data is interpreted as a struct
13252+ *	lookup through the cubic LUT. The data is interpreted as a struct
13253  *	&drm_color_ctm.
13254  *
13255  *	Setting this to NULL (blob property value set to 0) means a
13256@@ -68,13 +68,40 @@
13257  *	boot-up state too. Drivers can access the blob for the color conversion
13258  *	matrix through &drm_crtc_state.ctm.
13259  *
13260+ * ”CUBIC_LUT”:
13261+ *	Blob property to set the cubic (3D) lookup table performing color
13262+ *	mapping after the transformation matrix and before the lookup through
13263+ *	the gamma LUT. Unlike the degamma and gamma LUTs that map color
13264+ *	components independently, the 3D LUT converts an input color to an
13265+ *	output color by indexing into the 3D table using the color components
13266+ *	as a 3D coordinate. The LUT is subsampled as 8-bit (or more) precision
13267+ *	would require too much storage space in the hardware, so the precision
13268+ *	of the color components is reduced before the look up, and the low
13269+ *	order bits may be used to interpolate between the nearest points in 3D
13270+ *	space.
13271+ *
13272+ *	The data is interpreted as an array of &struct drm_color_lut elements.
13273+ *	Hardware might choose not to use the full precision of the LUT
13274+ *	elements.
13275+ *
13276+ *	Setting this to NULL (blob property value set to 0) means the output
13277+ *	color is identical to the input color. This is generally the driver
13278+ *	boot-up state too. Drivers can access this blob through
13279+ *	&drm_crtc_state.cubic_lut.
13280+ *
13281+ * ”CUBIC_LUT_SIZE”:
13282+ *	Unsigned range property to give the size of the lookup table to be set
13283+ *	on the CUBIC_LUT property (the size depends on the underlying hardware).
13284+ *	If drivers support multiple LUT sizes then they should publish the
13285+ *	largest size, and sub-sample smaller sized LUTs appropriately.
13286+ *
13287  * “GAMMA_LUT”:
13288  *	Blob property to set the gamma lookup table (LUT) mapping pixel data
13289- *	after the transformation matrix to data sent to the connector. The
13290- *	data is interpreted as an array of &struct drm_color_lut elements.
13291- *	Hardware might choose not to use the full precision of the LUT elements
13292- *	nor use all the elements of the LUT (for example the hardware might
13293- *	choose to interpolate between LUT[0] and LUT[4]).
13294+ *	after the cubic LUT to data sent to the connector. The data is
13295+ *	interpreted as an array of &struct drm_color_lut elements. Hardware
13296+ *	might choose not to use the full precision of the LUT elements nor use
13297+ *	all the elements of the LUT (for example the hardware might choose to
13298+ *	interpolate between LUT[0] and LUT[4]).
13299  *
13300  *	Setting this to NULL (blob property value set to 0) means a
13301  *	linear/pass-thru gamma table should be used. This is generally the
13302diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
13303index b0a826489..3d7182001 100644
13304--- a/drivers/gpu/drm/drm_debugfs.c
13305+++ b/drivers/gpu/drm/drm_debugfs.c
13306@@ -91,7 +91,6 @@ static int drm_clients_info(struct seq_file *m, void *data)
13307 	mutex_lock(&dev->filelist_mutex);
13308 	list_for_each_entry_reverse(priv, &dev->filelist, lhead) {
13309 		struct task_struct *task;
13310-		bool is_current_master = drm_is_current_master(priv);
13311 
13312 		rcu_read_lock(); /* locks pid_task()->comm */
13313 		task = pid_task(priv->pid, PIDTYPE_PID);
13314@@ -100,7 +99,7 @@ static int drm_clients_info(struct seq_file *m, void *data)
13315 			   task ? task->comm : "<unknown>",
13316 			   pid_vnr(priv->pid),
13317 			   priv->minor->index,
13318-			   is_current_master ? 'y' : 'n',
13319+			   drm_is_current_master(priv) ? 'y' : 'n',
13320 			   priv->authenticated ? 'y' : 'n',
13321 			   from_kuid_munged(seq_user_ns(m), uid),
13322 			   priv->magic);
13323diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
13324index 4334e466b..ab52f7fed 100644
13325--- a/drivers/gpu/drm/drm_edid.c
13326+++ b/drivers/gpu/drm/drm_edid.c
13327@@ -1835,20 +1835,11 @@ static void connector_bad_edid(struct drm_connector *connector,
13328 			       u8 *edid, int num_blocks)
13329 {
13330 	int i;
13331-	u8 last_block;
13332-
13333-	/*
13334-	 * 0x7e in the EDID is the number of extension blocks. The EDID
13335-	 * is 1 (base block) + num_ext_blocks big. That means we can think
13336-	 * of 0x7e in the EDID of the _index_ of the last block in the
13337-	 * combined chunk of memory.
13338-	 */
13339-	last_block = edid[0x7e];
13340+	u8 num_of_ext = edid[0x7e];
13341 
13342 	/* Calculate real checksum for the last edid extension block data */
13343-	if (last_block < num_blocks)
13344-		connector->real_edid_checksum =
13345-			drm_edid_block_checksum(edid + last_block * EDID_LENGTH);
13346+	connector->real_edid_checksum =
13347+		drm_edid_block_checksum(edid + num_of_ext * EDID_LENGTH);
13348 
13349 	if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS))
13350 		return;
13351@@ -4861,6 +4852,43 @@ static void drm_parse_vcdb(struct drm_connector *connector, const u8 *db)
13352 		info->rgb_quant_range_selectable = true;
13353 }
13354 
13355+#ifdef CONFIG_NO_GKI
13356+static
13357+void drm_get_max_frl_rate(int max_frl_rate, u8 *max_lanes, u8 *max_rate_per_lane)
13358+{
13359+	switch (max_frl_rate) {
13360+	case 1:
13361+		*max_lanes = 3;
13362+		*max_rate_per_lane = 3;
13363+		break;
13364+	case 2:
13365+		*max_lanes = 3;
13366+		*max_rate_per_lane = 6;
13367+		break;
13368+	case 3:
13369+		*max_lanes = 4;
13370+		*max_rate_per_lane = 6;
13371+		break;
13372+	case 4:
13373+		*max_lanes = 4;
13374+		*max_rate_per_lane = 8;
13375+		break;
13376+	case 5:
13377+		*max_lanes = 4;
13378+		*max_rate_per_lane = 10;
13379+		break;
13380+	case 6:
13381+		*max_lanes = 4;
13382+		*max_rate_per_lane = 12;
13383+		break;
13384+	case 0:
13385+	default:
13386+		*max_lanes = 0;
13387+		*max_rate_per_lane = 0;
13388+	}
13389+}
13390+#endif
13391+
13392 static void drm_parse_ycbcr420_deep_color_info(struct drm_connector *connector,
13393 					       const u8 *db)
13394 {
13395@@ -4914,6 +4942,76 @@ static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector,
13396 		}
13397 	}
13398 
13399+#ifdef CONFIG_NO_GKI
13400+	if (hf_vsdb[7]) {
13401+		u8 max_frl_rate;
13402+		u8 dsc_max_frl_rate;
13403+		u8 dsc_max_slices;
13404+		struct drm_hdmi_dsc_cap *hdmi_dsc = &hdmi->dsc_cap;
13405+
13406+		DRM_DEBUG_KMS("hdmi_21 sink detected. parsing edid\n");
13407+		max_frl_rate = (hf_vsdb[7] & DRM_EDID_MAX_FRL_RATE_MASK) >> 4;
13408+		drm_get_max_frl_rate(max_frl_rate, &hdmi->max_lanes,
13409+				&hdmi->max_frl_rate_per_lane);
13410+		hdmi_dsc->v_1p2 = hf_vsdb[11] & DRM_EDID_DSC_1P2;
13411+
13412+		if (hdmi_dsc->v_1p2) {
13413+			hdmi_dsc->native_420 = hf_vsdb[11] & DRM_EDID_DSC_NATIVE_420;
13414+			hdmi_dsc->all_bpp = hf_vsdb[11] & DRM_EDID_DSC_ALL_BPP;
13415+
13416+			if (hf_vsdb[11] & DRM_EDID_DSC_16BPC)
13417+				hdmi_dsc->bpc_supported = 16;
13418+			else if (hf_vsdb[11] & DRM_EDID_DSC_12BPC)
13419+				hdmi_dsc->bpc_supported = 12;
13420+			else if (hf_vsdb[11] & DRM_EDID_DSC_10BPC)
13421+				hdmi_dsc->bpc_supported = 10;
13422+			else
13423+				hdmi_dsc->bpc_supported = 0;
13424+
13425+			dsc_max_frl_rate = (hf_vsdb[12] & DRM_EDID_DSC_MAX_FRL_RATE_MASK) >> 4;
13426+			drm_get_max_frl_rate(dsc_max_frl_rate, &hdmi_dsc->max_lanes,
13427+					&hdmi_dsc->max_frl_rate_per_lane);
13428+			hdmi_dsc->total_chunk_kbytes = hf_vsdb[13] & DRM_EDID_DSC_TOTAL_CHUNK_KBYTES;
13429+
13430+			dsc_max_slices = hf_vsdb[12] & DRM_EDID_DSC_MAX_SLICES;
13431+			switch (dsc_max_slices) {
13432+			case 1:
13433+				hdmi_dsc->max_slices = 1;
13434+				hdmi_dsc->clk_per_slice = 340;
13435+				break;
13436+			case 2:
13437+				hdmi_dsc->max_slices = 2;
13438+				hdmi_dsc->clk_per_slice = 340;
13439+				break;
13440+			case 3:
13441+				hdmi_dsc->max_slices = 4;
13442+				hdmi_dsc->clk_per_slice = 340;
13443+				break;
13444+			case 4:
13445+				hdmi_dsc->max_slices = 8;
13446+				hdmi_dsc->clk_per_slice = 340;
13447+				break;
13448+			case 5:
13449+				hdmi_dsc->max_slices = 8;
13450+				hdmi_dsc->clk_per_slice = 400;
13451+				break;
13452+			case 6:
13453+				hdmi_dsc->max_slices = 12;
13454+				hdmi_dsc->clk_per_slice = 400;
13455+				break;
13456+			case 7:
13457+				hdmi_dsc->max_slices = 16;
13458+				hdmi_dsc->clk_per_slice = 400;
13459+				break;
13460+			case 0:
13461+			default:
13462+				hdmi_dsc->max_slices = 0;
13463+				hdmi_dsc->clk_per_slice = 0;
13464+			}
13465+		}
13466+	}
13467+#endif
13468+
13469 	drm_parse_ycbcr420_deep_color_info(connector, hf_vsdb);
13470 }
13471 
13472diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
13473index 537e7de8e..01670305d 100644
13474--- a/drivers/gpu/drm/drm_file.c
13475+++ b/drivers/gpu/drm/drm_file.c
13476@@ -177,7 +177,6 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor)
13477 	init_waitqueue_head(&file->event_wait);
13478 	file->event_space = 4096; /* set aside 4k for event buffer */
13479 
13480-	spin_lock_init(&file->master_lookup_lock);
13481 	mutex_init(&file->event_read_lock);
13482 
13483 	if (drm_core_check_feature(dev, DRIVER_GEM))
13484@@ -776,20 +775,19 @@ void drm_event_cancel_free(struct drm_device *dev,
13485 EXPORT_SYMBOL(drm_event_cancel_free);
13486 
13487 /**
13488- * drm_send_event_locked - send DRM event to file descriptor
13489+ * drm_send_event_helper - send DRM event to file descriptor
13490  * @dev: DRM device
13491  * @e: DRM event to deliver
13492+ * @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC
13493+ * time domain
13494  *
13495- * This function sends the event @e, initialized with drm_event_reserve_init(),
13496- * to its associated userspace DRM file. Callers must already hold
13497- * &drm_device.event_lock, see drm_send_event() for the unlocked version.
13498- *
13499- * Note that the core will take care of unlinking and disarming events when the
13500- * corresponding DRM file is closed. Drivers need not worry about whether the
13501- * DRM file for this event still exists and can call this function upon
13502- * completion of the asynchronous work unconditionally.
13503+ * This helper function sends the event @e, initialized with
13504+ * drm_event_reserve_init(), to its associated userspace DRM file.
13505+ * The timestamp variant of dma_fence_signal is used when the caller
13506+ * sends a valid timestamp.
13507  */
13508-void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
13509+void drm_send_event_helper(struct drm_device *dev,
13510+			   struct drm_pending_event *e, ktime_t timestamp)
13511 {
13512 	assert_spin_locked(&dev->event_lock);
13513 
13514@@ -800,7 +798,10 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
13515 	}
13516 
13517 	if (e->fence) {
13518-		dma_fence_signal(e->fence);
13519+		if (timestamp)
13520+			dma_fence_signal_timestamp(e->fence, timestamp);
13521+		else
13522+			dma_fence_signal(e->fence);
13523 		dma_fence_put(e->fence);
13524 	}
13525 
13526@@ -815,6 +816,48 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
13527 	wake_up_interruptible_poll(&e->file_priv->event_wait,
13528 		EPOLLIN | EPOLLRDNORM);
13529 }
13530+
13531+/**
13532+ * drm_send_event_timestamp_locked - send DRM event to file descriptor
13533+ * @dev: DRM device
13534+ * @e: DRM event to deliver
13535+ * @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC
13536+ * time domain
13537+ *
13538+ * This function sends the event @e, initialized with drm_event_reserve_init(),
13539+ * to its associated userspace DRM file. Callers must already hold
13540+ * &drm_device.event_lock.
13541+ *
13542+ * Note that the core will take care of unlinking and disarming events when the
13543+ * corresponding DRM file is closed. Drivers need not worry about whether the
13544+ * DRM file for this event still exists and can call this function upon
13545+ * completion of the asynchronous work unconditionally.
13546+ */
13547+void drm_send_event_timestamp_locked(struct drm_device *dev,
13548+				     struct drm_pending_event *e, ktime_t timestamp)
13549+{
13550+	drm_send_event_helper(dev, e, timestamp);
13551+}
13552+EXPORT_SYMBOL(drm_send_event_timestamp_locked);
13553+
13554+/**
13555+ * drm_send_event_locked - send DRM event to file descriptor
13556+ * @dev: DRM device
13557+ * @e: DRM event to deliver
13558+ *
13559+ * This function sends the event @e, initialized with drm_event_reserve_init(),
13560+ * to its associated userspace DRM file. Callers must already hold
13561+ * &drm_device.event_lock, see drm_send_event() for the unlocked version.
13562+ *
13563+ * Note that the core will take care of unlinking and disarming events when the
13564+ * corresponding DRM file is closed. Drivers need not worry about whether the
13565+ * DRM file for this event still exists and can call this function upon
13566+ * completion of the asynchronous work unconditionally.
13567+ */
13568+void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
13569+{
13570+	drm_send_event_helper(dev, e, 0);
13571+}
13572 EXPORT_SYMBOL(drm_send_event_locked);
13573 
13574 /**
13575@@ -837,7 +880,7 @@ void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
13576 	unsigned long irqflags;
13577 
13578 	spin_lock_irqsave(&dev->event_lock, irqflags);
13579-	drm_send_event_locked(dev, e);
13580+	drm_send_event_helper(dev, e, 0);
13581 	spin_unlock_irqrestore(&dev->event_lock, irqflags);
13582 }
13583 EXPORT_SYMBOL(drm_send_event);
13584diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
13585index 8d1064061..e4c8aa361 100644
13586--- a/drivers/gpu/drm/drm_fourcc.c
13587+++ b/drivers/gpu/drm/drm_fourcc.c
13588@@ -282,6 +282,16 @@ const struct drm_format_info *__drm_format_info(u32 format)
13589 		  .num_planes = 2, .char_per_block = { 5, 5, 0 },
13590 		  .block_w = { 4, 2, 0 }, .block_h = { 1, 1, 0 }, .hsub = 2,
13591 		  .vsub = 2, .is_yuv = true },
13592+#ifdef CONFIG_NO_GKI
13593+		{ .format = DRM_FORMAT_NV20,		.depth = 0,
13594+		  .num_planes = 2, .char_per_block = { 5, 5, 0 },
13595+		  .block_w = { 4, 2, 0 }, .block_h = { 1, 1, 0 }, .hsub = 2,
13596+		  .vsub = 1, .is_yuv = true },
13597+		{ .format = DRM_FORMAT_NV30,		.depth = 0,
13598+		  .num_planes = 2, .char_per_block = { 5, 5, 0 },
13599+		  .block_w = { 4, 2, 0 }, .block_h = { 1, 1, 0 }, .hsub = 1,
13600+		  .vsub = 1, .is_yuv = true },
13601+#endif
13602 		{ .format = DRM_FORMAT_Q410,		.depth = 0,
13603 		  .num_planes = 3, .char_per_block = { 2, 2, 2 },
13604 		  .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 1,
13605diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
13606index c160a45a4..73818ffa0 100644
13607--- a/drivers/gpu/drm/drm_ioctl.c
13608+++ b/drivers/gpu/drm/drm_ioctl.c
13609@@ -543,6 +543,7 @@ int drm_version(struct drm_device *dev, void *data,
13610  */
13611 int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
13612 {
13613+#ifndef CONFIG_DRM_IGNORE_IOTCL_PERMIT
13614 	/* ROOT_ONLY is only for CAP_SYS_ADMIN */
13615 	if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
13616 		return -EACCES;
13617@@ -561,6 +562,7 @@ int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
13618 	if (unlikely(!(flags & DRM_RENDER_ALLOW) &&
13619 		     drm_is_render_client(file_priv)))
13620 		return -EACCES;
13621+#endif
13622 
13623 	return 0;
13624 }
13625@@ -684,9 +686,9 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
13626 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, 0),
13627 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER),
13628 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER),
13629-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, 0),
13630-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, 0),
13631-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, 0),
13632+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_RENDER_ALLOW),
13633+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_RENDER_ALLOW),
13634+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_RENDER_ALLOW),
13635 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, 0),
13636 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER),
13637 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER),
13638diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
13639index aef226340..da4f085fc 100644
13640--- a/drivers/gpu/drm/drm_lease.c
13641+++ b/drivers/gpu/drm/drm_lease.c
13642@@ -107,19 +107,10 @@ static bool _drm_has_leased(struct drm_master *master, int id)
13643  */
13644 bool _drm_lease_held(struct drm_file *file_priv, int id)
13645 {
13646-	bool ret;
13647-	struct drm_master *master;
13648-
13649-	if (!file_priv)
13650+	if (!file_priv || !file_priv->master)
13651 		return true;
13652 
13653-	master = drm_file_get_master(file_priv);
13654-	if (!master)
13655-		return true;
13656-	ret = _drm_lease_held_master(master, id);
13657-	drm_master_put(&master);
13658-
13659-	return ret;
13660+	return _drm_lease_held_master(file_priv->master, id);
13661 }
13662 
13663 /**
13664@@ -138,22 +129,13 @@ bool drm_lease_held(struct drm_file *file_priv, int id)
13665 	struct drm_master *master;
13666 	bool ret;
13667 
13668-	if (!file_priv)
13669+	if (!file_priv || !file_priv->master || !file_priv->master->lessor)
13670 		return true;
13671 
13672-	master = drm_file_get_master(file_priv);
13673-	if (!master)
13674-		return true;
13675-	if (!master->lessor) {
13676-		ret = true;
13677-		goto out;
13678-	}
13679+	master = file_priv->master;
13680 	mutex_lock(&master->dev->mode_config.idr_mutex);
13681 	ret = _drm_lease_held_master(master, id);
13682 	mutex_unlock(&master->dev->mode_config.idr_mutex);
13683-
13684-out:
13685-	drm_master_put(&master);
13686 	return ret;
13687 }
13688 
13689@@ -173,16 +155,10 @@ uint32_t drm_lease_filter_crtcs(struct drm_file *file_priv, uint32_t crtcs_in)
13690 	int count_in, count_out;
13691 	uint32_t crtcs_out = 0;
13692 
13693-	if (!file_priv)
13694+	if (!file_priv || !file_priv->master || !file_priv->master->lessor)
13695 		return crtcs_in;
13696 
13697-	master = drm_file_get_master(file_priv);
13698-	if (!master)
13699-		return crtcs_in;
13700-	if (!master->lessor) {
13701-		crtcs_out = crtcs_in;
13702-		goto out;
13703-	}
13704+	master = file_priv->master;
13705 	dev = master->dev;
13706 
13707 	count_in = count_out = 0;
13708@@ -201,9 +177,6 @@ uint32_t drm_lease_filter_crtcs(struct drm_file *file_priv, uint32_t crtcs_in)
13709 		count_in++;
13710 	}
13711 	mutex_unlock(&master->dev->mode_config.idr_mutex);
13712-
13713-out:
13714-	drm_master_put(&master);
13715 	return crtcs_out;
13716 }
13717 
13718@@ -517,7 +490,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
13719 	size_t object_count;
13720 	int ret = 0;
13721 	struct idr leases;
13722-	struct drm_master *lessor;
13723+	struct drm_master *lessor = lessor_priv->master;
13724 	struct drm_master *lessee = NULL;
13725 	struct file *lessee_file = NULL;
13726 	struct file *lessor_file = lessor_priv->filp;
13727@@ -529,6 +502,12 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
13728 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
13729 		return -EOPNOTSUPP;
13730 
13731+	/* Do not allow sub-leases */
13732+	if (lessor->lessor) {
13733+		DRM_DEBUG_LEASE("recursive leasing not allowed\n");
13734+		return -EINVAL;
13735+	}
13736+
13737 	/* need some objects */
13738 	if (cl->object_count == 0) {
13739 		DRM_DEBUG_LEASE("no objects in lease\n");
13740@@ -540,22 +519,12 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
13741 		return -EINVAL;
13742 	}
13743 
13744-	lessor = drm_file_get_master(lessor_priv);
13745-	/* Do not allow sub-leases */
13746-	if (lessor->lessor) {
13747-		DRM_DEBUG_LEASE("recursive leasing not allowed\n");
13748-		ret = -EINVAL;
13749-		goto out_lessor;
13750-	}
13751-
13752 	object_count = cl->object_count;
13753 
13754 	object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
13755 			array_size(object_count, sizeof(__u32)));
13756-	if (IS_ERR(object_ids)) {
13757-		ret = PTR_ERR(object_ids);
13758-		goto out_lessor;
13759-	}
13760+	if (IS_ERR(object_ids))
13761+		return PTR_ERR(object_ids);
13762 
13763 	idr_init(&leases);
13764 
13765@@ -566,15 +535,14 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
13766 	if (ret) {
13767 		DRM_DEBUG_LEASE("lease object lookup failed: %i\n", ret);
13768 		idr_destroy(&leases);
13769-		goto out_lessor;
13770+		return ret;
13771 	}
13772 
13773 	/* Allocate a file descriptor for the lease */
13774 	fd = get_unused_fd_flags(cl->flags & (O_CLOEXEC | O_NONBLOCK));
13775 	if (fd < 0) {
13776 		idr_destroy(&leases);
13777-		ret = fd;
13778-		goto out_lessor;
13779+		return fd;
13780 	}
13781 
13782 	DRM_DEBUG_LEASE("Creating lease\n");
13783@@ -610,7 +578,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
13784 	/* Hook up the fd */
13785 	fd_install(fd, lessee_file);
13786 
13787-	drm_master_put(&lessor);
13788 	DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
13789 	return 0;
13790 
13791@@ -620,8 +587,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
13792 out_leases:
13793 	put_unused_fd(fd);
13794 
13795-out_lessor:
13796-	drm_master_put(&lessor);
13797 	DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret);
13798 	return ret;
13799 }
13800@@ -644,7 +609,7 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev,
13801 	struct drm_mode_list_lessees *arg = data;
13802 	__u32 __user *lessee_ids = (__u32 __user *) (uintptr_t) (arg->lessees_ptr);
13803 	__u32 count_lessees = arg->count_lessees;
13804-	struct drm_master *lessor, *lessee;
13805+	struct drm_master *lessor = lessor_priv->master, *lessee;
13806 	int count;
13807 	int ret = 0;
13808 
13809@@ -655,7 +620,6 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev,
13810 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
13811 		return -EOPNOTSUPP;
13812 
13813-	lessor = drm_file_get_master(lessor_priv);
13814 	DRM_DEBUG_LEASE("List lessees for %d\n", lessor->lessee_id);
13815 
13816 	mutex_lock(&dev->mode_config.idr_mutex);
13817@@ -679,7 +643,6 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev,
13818 		arg->count_lessees = count;
13819 
13820 	mutex_unlock(&dev->mode_config.idr_mutex);
13821-	drm_master_put(&lessor);
13822 
13823 	return ret;
13824 }
13825@@ -699,7 +662,7 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev,
13826 	struct drm_mode_get_lease *arg = data;
13827 	__u32 __user *object_ids = (__u32 __user *) (uintptr_t) (arg->objects_ptr);
13828 	__u32 count_objects = arg->count_objects;
13829-	struct drm_master *lessee;
13830+	struct drm_master *lessee = lessee_priv->master;
13831 	struct idr *object_idr;
13832 	int count;
13833 	void *entry;
13834@@ -713,7 +676,6 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev,
13835 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
13836 		return -EOPNOTSUPP;
13837 
13838-	lessee = drm_file_get_master(lessee_priv);
13839 	DRM_DEBUG_LEASE("get lease for %d\n", lessee->lessee_id);
13840 
13841 	mutex_lock(&dev->mode_config.idr_mutex);
13842@@ -741,7 +703,6 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev,
13843 		arg->count_objects = count;
13844 
13845 	mutex_unlock(&dev->mode_config.idr_mutex);
13846-	drm_master_put(&lessee);
13847 
13848 	return ret;
13849 }
13850@@ -760,7 +721,7 @@ int drm_mode_revoke_lease_ioctl(struct drm_device *dev,
13851 				void *data, struct drm_file *lessor_priv)
13852 {
13853 	struct drm_mode_revoke_lease *arg = data;
13854-	struct drm_master *lessor;
13855+	struct drm_master *lessor = lessor_priv->master;
13856 	struct drm_master *lessee;
13857 	int ret = 0;
13858 
13859@@ -770,7 +731,6 @@ int drm_mode_revoke_lease_ioctl(struct drm_device *dev,
13860 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
13861 		return -EOPNOTSUPP;
13862 
13863-	lessor = drm_file_get_master(lessor_priv);
13864 	mutex_lock(&dev->mode_config.idr_mutex);
13865 
13866 	lessee = _drm_find_lessee(lessor, arg->lessee_id);
13867@@ -791,7 +751,6 @@ int drm_mode_revoke_lease_ioctl(struct drm_device *dev,
13868 
13869 fail:
13870 	mutex_unlock(&dev->mode_config.idr_mutex);
13871-	drm_master_put(&lessor);
13872 
13873 	return ret;
13874 }
13875diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
13876index 19fb1d93a..3caf9ff34 100644
13877--- a/drivers/gpu/drm/drm_mipi_dsi.c
13878+++ b/drivers/gpu/drm/drm_mipi_dsi.c
13879@@ -356,6 +356,7 @@ static ssize_t mipi_dsi_device_transfer(struct mipi_dsi_device *dsi,
13880 
13881 	if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
13882 		msg->flags |= MIPI_DSI_MSG_USE_LPM;
13883+	msg->flags |= MIPI_DSI_MSG_LASTCOMMAND;
13884 
13885 	return ops->transfer(dsi->host, msg);
13886 }
13887diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
13888index fad2c1181..58050d4ae 100644
13889--- a/drivers/gpu/drm/drm_mode_config.c
13890+++ b/drivers/gpu/drm/drm_mode_config.c
13891@@ -364,6 +364,22 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
13892 		return -ENOMEM;
13893 	dev->mode_config.gamma_lut_size_property = prop;
13894 
13895+#if defined(CONFIG_ROCKCHIP_DRM_CUBIC_LUT)
13896+	prop = drm_property_create(dev,
13897+			DRM_MODE_PROP_BLOB,
13898+			"CUBIC_LUT", 0);
13899+	if (!prop)
13900+		return -ENOMEM;
13901+	dev->mode_config.cubic_lut_property = prop;
13902+
13903+	prop = drm_property_create_range(dev,
13904+			DRM_MODE_PROP_IMMUTABLE,
13905+			"CUBIC_LUT_SIZE", 0, UINT_MAX);
13906+	if (!prop)
13907+		return -ENOMEM;
13908+	dev->mode_config.cubic_lut_size_property = prop;
13909+#endif
13910+
13911 	prop = drm_property_create(dev,
13912 				   DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_BLOB,
13913 				   "IN_FORMATS", 0);
13914diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
13915index 0f99e5453..d42c7310b 100644
13916--- a/drivers/gpu/drm/drm_modes.c
13917+++ b/drivers/gpu/drm/drm_modes.c
13918@@ -1940,6 +1940,7 @@ void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
13919 	strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
13920 	out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
13921 }
13922+EXPORT_SYMBOL_GPL(drm_mode_convert_to_umode);
13923 
13924 /**
13925  * drm_crtc_convert_umode - convert a modeinfo into a drm_display_mode
13926@@ -2016,6 +2017,7 @@ int drm_mode_convert_umode(struct drm_device *dev,
13927 
13928 	return 0;
13929 }
13930+EXPORT_SYMBOL_GPL(drm_mode_convert_umode);
13931 
13932 /**
13933  * drm_mode_is_420_only - if a given videomode can be only supported in YCBCR420
13934diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
13935index 825499ea3..272e5cdd6 100644
13936--- a/drivers/gpu/drm/drm_prime.c
13937+++ b/drivers/gpu/drm/drm_prime.c
13938@@ -784,6 +784,28 @@ int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
13939 }
13940 EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
13941 
13942+/**
13943+ * drm_gem_dmabuf_get_uuid - dma_buf get_uuid implementation for GEM
13944+ * @dma_buf: buffer to query
13945+ * @uuid: uuid outparam
13946+ *
13947+ * Queries the buffer's virtio UUID. This can be used as the
13948+ * &dma_buf_ops.get_uuid callback. Calls into &drm_driver.gem_prime_get_uuid.
13949+ *
13950+ * Returns 0 on success or a negative error code on failure.
13951+ */
13952+int drm_gem_dmabuf_get_uuid(struct dma_buf *dma_buf, uuid_t *uuid)
13953+{
13954+	struct drm_gem_object *obj = dma_buf->priv;
13955+	struct drm_device *dev = obj->dev;
13956+
13957+	if (!dev->driver->gem_prime_get_uuid)
13958+		return -ENODEV;
13959+
13960+	return dev->driver->gem_prime_get_uuid(obj, uuid);
13961+}
13962+EXPORT_SYMBOL(drm_gem_dmabuf_get_uuid);
13963+
13964 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
13965 	.cache_sgt_mapping = true,
13966 	.attach = drm_gem_map_attach,
13967@@ -794,6 +816,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
13968 	.mmap = drm_gem_dmabuf_mmap,
13969 	.vmap = drm_gem_dmabuf_vmap,
13970 	.vunmap = drm_gem_dmabuf_vunmap,
13971+	.get_uuid = drm_gem_dmabuf_get_uuid,
13972 };
13973 
13974 /**
13975diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
13976index f135b7959..286edbe1b 100644
13977--- a/drivers/gpu/drm/drm_vblank.c
13978+++ b/drivers/gpu/drm/drm_vblank.c
13979@@ -1000,7 +1000,14 @@ static void send_vblank_event(struct drm_device *dev,
13980 		break;
13981 	}
13982 	trace_drm_vblank_event_delivered(e->base.file_priv, e->pipe, seq);
13983-	drm_send_event_locked(dev, &e->base);
13984+	/*
13985+	 * Use the same timestamp for any associated fence signal to avoid
13986+	 * mismatch in timestamps for vsync & fence events triggered by the
13987+	 * same HW event. Frameworks like SurfaceFlinger in Android expects the
13988+	 * retire-fence timestamp to match exactly with HW vsync as it uses it
13989+	 * for its software vsync modeling.
13990+	 */
13991+	drm_send_event_timestamp_locked(dev, &e->base, now);
13992 }
13993 
13994 /**
13995diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
13996index 1a87cc445..7c7d10225 100644
13997--- a/drivers/gpu/drm/panel/panel-simple.c
13998+++ b/drivers/gpu/drm/panel/panel-simple.c
13999@@ -30,6 +30,7 @@
14000 #include <linux/regulator/consumer.h>
14001 
14002 #include <video/display_timing.h>
14003+#include <video/mipi_display.h>
14004 #include <video/of_display_timing.h>
14005 #include <video/videomode.h>
14006 
14007@@ -37,6 +38,25 @@
14008 #include <drm/drm_device.h>
14009 #include <drm/drm_mipi_dsi.h>
14010 #include <drm/drm_panel.h>
14011+#include <drm/drm_dsc.h>
14012+
14013+#include <linux/panel-simple.h>
14014+
14015+struct panel_cmd_header {
14016+	u8 data_type;
14017+	u8 delay;
14018+	u8 payload_length;
14019+} __packed;
14020+
14021+struct panel_cmd_desc {
14022+	struct panel_cmd_header header;
14023+	u8 *payload;
14024+};
14025+
14026+struct panel_cmd_seq {
14027+	struct panel_cmd_desc *cmds;
14028+	unsigned int cmd_cnt;
14029+};
14030 
14031 /**
14032  * @modes: Pointer to array of fixed modes appropriate for this panel.  If
14033@@ -83,6 +103,10 @@ struct panel_desc {
14034 	 *           turn the display off (no content is visible)
14035 	 * @unprepare: the time (in milliseconds) that it takes for the panel
14036 	 *             to power itself down completely
14037+	 * @reset: the time (in milliseconds) that it takes for the panel
14038+	 *         to reset itself completely
14039+	 * @init: the time (in milliseconds) that it takes for the panel to
14040+	 *	  send init command sequence after reset deassert
14041 	 */
14042 	struct {
14043 		unsigned int prepare;
14044@@ -90,17 +114,24 @@ struct panel_desc {
14045 		unsigned int enable;
14046 		unsigned int disable;
14047 		unsigned int unprepare;
14048+		unsigned int reset;
14049+		unsigned int init;
14050 	} delay;
14051 
14052 	u32 bus_format;
14053 	u32 bus_flags;
14054 	int connector_type;
14055+
14056+	struct panel_cmd_seq *init_seq;
14057+	struct panel_cmd_seq *exit_seq;
14058 };
14059 
14060 struct panel_simple {
14061 	struct drm_panel base;
14062+	struct mipi_dsi_device *dsi;
14063 	bool prepared;
14064 	bool enabled;
14065+	bool power_invert;
14066 	bool no_hpd;
14067 
14068 	const struct panel_desc *desc;
14069@@ -109,10 +140,12 @@ struct panel_simple {
14070 	struct i2c_adapter *ddc;
14071 
14072 	struct gpio_desc *enable_gpio;
14073+	struct gpio_desc *reset_gpio;
14074 	struct gpio_desc *hpd_gpio;
14075 
14076 	struct drm_display_mode override_mode;
14077 
14078+	struct drm_dsc_picture_parameter_set *pps;
14079 	enum drm_panel_orientation orientation;
14080 };
14081 
14082@@ -121,6 +154,124 @@ static inline struct panel_simple *to_panel_simple(struct drm_panel *panel)
14083 	return container_of(panel, struct panel_simple, base);
14084 }
14085 
14086+static int panel_simple_parse_cmd_seq(struct device *dev,
14087+				      const u8 *data, int length,
14088+				      struct panel_cmd_seq *seq)
14089+{
14090+	struct panel_cmd_header *header;
14091+	struct panel_cmd_desc *desc;
14092+	char *buf, *d;
14093+	unsigned int i, cnt, len;
14094+
14095+	if (!seq)
14096+		return -EINVAL;
14097+
14098+	buf = devm_kmemdup(dev, data, length, GFP_KERNEL);
14099+	if (!buf)
14100+		return -ENOMEM;
14101+
14102+	d = buf;
14103+	len = length;
14104+	cnt = 0;
14105+	while (len > sizeof(*header)) {
14106+		header = (struct panel_cmd_header *)d;
14107+
14108+		d += sizeof(*header);
14109+		len -= sizeof(*header);
14110+
14111+		if (header->payload_length > len)
14112+			return -EINVAL;
14113+
14114+		d += header->payload_length;
14115+		len -= header->payload_length;
14116+		cnt++;
14117+	}
14118+
14119+	if (len)
14120+		return -EINVAL;
14121+
14122+	seq->cmd_cnt = cnt;
14123+	seq->cmds = devm_kcalloc(dev, cnt, sizeof(*desc), GFP_KERNEL);
14124+	if (!seq->cmds)
14125+		return -ENOMEM;
14126+
14127+	d = buf;
14128+	len = length;
14129+	for (i = 0; i < cnt; i++) {
14130+		header = (struct panel_cmd_header *)d;
14131+		len -= sizeof(*header);
14132+		d += sizeof(*header);
14133+
14134+		desc = &seq->cmds[i];
14135+		desc->header = *header;
14136+		desc->payload = d;
14137+
14138+		d += header->payload_length;
14139+		len -= header->payload_length;
14140+	}
14141+
14142+	return 0;
14143+}
14144+
14145+static int panel_simple_xfer_dsi_cmd_seq(struct panel_simple *panel,
14146+					 struct panel_cmd_seq *seq)
14147+{
14148+	struct device *dev = panel->base.dev;
14149+	struct mipi_dsi_device *dsi = panel->dsi;
14150+	unsigned int i;
14151+	int err;
14152+
14153+	if (!IS_ENABLED(CONFIG_DRM_MIPI_DSI))
14154+		return -EINVAL;
14155+	if (!seq)
14156+		return -EINVAL;
14157+
14158+	for (i = 0; i < seq->cmd_cnt; i++) {
14159+		struct panel_cmd_desc *cmd = &seq->cmds[i];
14160+
14161+		switch (cmd->header.data_type) {
14162+		case MIPI_DSI_COMPRESSION_MODE:
14163+			err = mipi_dsi_compression_mode(dsi, cmd->payload[0]);
14164+			break;
14165+		case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
14166+		case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
14167+		case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
14168+		case MIPI_DSI_GENERIC_LONG_WRITE:
14169+			err = mipi_dsi_generic_write(dsi, cmd->payload,
14170+						     cmd->header.payload_length);
14171+			break;
14172+		case MIPI_DSI_DCS_SHORT_WRITE:
14173+		case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
14174+		case MIPI_DSI_DCS_LONG_WRITE:
14175+			err = mipi_dsi_dcs_write_buffer(dsi, cmd->payload,
14176+							cmd->header.payload_length);
14177+			break;
14178+		case MIPI_DSI_PICTURE_PARAMETER_SET:
14179+			if (!panel->pps) {
14180+				panel->pps = devm_kzalloc(dev, sizeof(*panel->pps),
14181+							  GFP_KERNEL);
14182+				if (!panel->pps)
14183+					return -ENOMEM;
14184+
14185+				memcpy(panel->pps, cmd->payload, cmd->header.payload_length);
14186+			}
14187+
14188+			err = mipi_dsi_picture_parameter_set(dsi, panel->pps);
14189+			break;
14190+		default:
14191+			return -EINVAL;
14192+		}
14193+
14194+		if (err < 0)
14195+			dev_err(dev, "failed to write dcs cmd: %d\n", err);
14196+
14197+		if (cmd->header.delay)
14198+			msleep(cmd->header.delay);
14199+	}
14200+
14201+	return 0;
14202+}
14203+
14204 static unsigned int panel_simple_get_timings_modes(struct panel_simple *panel,
14205 						   struct drm_connector *connector)
14206 {
14207@@ -219,17 +370,72 @@ static int panel_simple_get_non_edid_modes(struct panel_simple *panel,
14208 	if (num == 0)
14209 		num = panel_simple_get_display_modes(panel, connector);
14210 
14211-	connector->display_info.bpc = panel->desc->bpc;
14212-	connector->display_info.width_mm = panel->desc->size.width;
14213-	connector->display_info.height_mm = panel->desc->size.height;
14214+	if (panel->desc->bpc)
14215+		connector->display_info.bpc = panel->desc->bpc;
14216+	if (panel->desc->size.width)
14217+		connector->display_info.width_mm = panel->desc->size.width;
14218+	if (panel->desc->size.height)
14219+		connector->display_info.height_mm = panel->desc->size.height;
14220 	if (panel->desc->bus_format)
14221 		drm_display_info_set_bus_formats(&connector->display_info,
14222 						 &panel->desc->bus_format, 1);
14223-	connector->display_info.bus_flags = panel->desc->bus_flags;
14224+	if (panel->desc->bus_flags)
14225+		connector->display_info.bus_flags = panel->desc->bus_flags;
14226 
14227 	return num;
14228 }
14229 
14230+static int panel_simple_regulator_enable(struct panel_simple *p)
14231+{
14232+	int err;
14233+
14234+	if (p->power_invert) {
14235+		if (regulator_is_enabled(p->supply) > 0)
14236+			regulator_disable(p->supply);
14237+	} else {
14238+		err = regulator_enable(p->supply);
14239+		if (err < 0)
14240+			return err;
14241+	}
14242+
14243+	return 0;
14244+}
14245+
14246+static int panel_simple_regulator_disable(struct panel_simple *p)
14247+{
14248+	int err;
14249+
14250+	if (p->power_invert) {
14251+		if (!regulator_is_enabled(p->supply)) {
14252+			err = regulator_enable(p->supply);
14253+			if (err < 0)
14254+				return err;
14255+		}
14256+	} else {
14257+		regulator_disable(p->supply);
14258+	}
14259+
14260+	return 0;
14261+}
14262+
14263+int panel_simple_loader_protect(struct drm_panel *panel)
14264+{
14265+	struct panel_simple *p = to_panel_simple(panel);
14266+	int err;
14267+
14268+	err = panel_simple_regulator_enable(p);
14269+	if (err < 0) {
14270+		dev_err(panel->dev, "failed to enable supply: %d\n", err);
14271+		return err;
14272+	}
14273+
14274+	p->prepared = true;
14275+	p->enabled = true;
14276+
14277+	return 0;
14278+}
14279+EXPORT_SYMBOL(panel_simple_loader_protect);
14280+
14281 static int panel_simple_disable(struct drm_panel *panel)
14282 {
14283 	struct panel_simple *p = to_panel_simple(panel);
14284@@ -252,9 +458,14 @@ static int panel_simple_unprepare(struct drm_panel *panel)
14285 	if (!p->prepared)
14286 		return 0;
14287 
14288-	gpiod_set_value_cansleep(p->enable_gpio, 0);
14289+	if (p->desc->exit_seq)
14290+		if (p->dsi)
14291+			panel_simple_xfer_dsi_cmd_seq(p, p->desc->exit_seq);
14292+
14293+	gpiod_direction_output(p->reset_gpio, 1);
14294+	gpiod_direction_output(p->enable_gpio, 0);
14295 
14296-	regulator_disable(p->supply);
14297+	panel_simple_regulator_disable(p);
14298 
14299 	if (p->desc->delay.unprepare)
14300 		msleep(p->desc->delay.unprepare);
14301@@ -299,13 +510,23 @@ static int panel_simple_prepare(struct drm_panel *panel)
14302 	if (p->prepared)
14303 		return 0;
14304 
14305-	err = regulator_enable(p->supply);
14306+	err = panel_simple_regulator_enable(p);
14307 	if (err < 0) {
14308 		dev_err(panel->dev, "failed to enable supply: %d\n", err);
14309 		return err;
14310 	}
14311 
14312-	gpiod_set_value_cansleep(p->enable_gpio, 1);
14313+	gpiod_direction_output(p->enable_gpio, 1);
14314+
14315+	if (p->desc->delay.reset)
14316+		msleep(p->desc->delay.prepare);
14317+
14318+	gpiod_direction_output(p->reset_gpio, 1);
14319+
14320+	if (p->desc->delay.reset)
14321+		msleep(p->desc->delay.reset);
14322+
14323+	gpiod_direction_output(p->reset_gpio, 0);
14324 
14325 	delay = p->desc->delay.prepare;
14326 	if (p->no_hpd)
14327@@ -333,6 +554,13 @@ static int panel_simple_prepare(struct drm_panel *panel)
14328 		}
14329 	}
14330 
14331+	if (p->desc->init_seq)
14332+		if (p->dsi)
14333+			panel_simple_xfer_dsi_cmd_seq(p, p->desc->init_seq);
14334+
14335+	if (p->desc->delay.init)
14336+		msleep(p->desc->delay.init);
14337+
14338 	p->prepared = true;
14339 
14340 	return 0;
14341@@ -500,6 +728,52 @@ static void panel_simple_parse_panel_timing_node(struct device *dev,
14342 		dev_err(dev, "Reject override mode: No display_timing found\n");
14343 }
14344 
14345+static int dcs_bl_update_status(struct backlight_device *bl)
14346+{
14347+	struct panel_simple *p = bl_get_data(bl);
14348+	struct mipi_dsi_device *dsi = p->dsi;
14349+	int ret;
14350+
14351+	if (!p->prepared)
14352+		return 0;
14353+
14354+	dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
14355+
14356+	ret = mipi_dsi_dcs_set_display_brightness(dsi, bl->props.brightness);
14357+	if (ret < 0)
14358+		return ret;
14359+
14360+	dsi->mode_flags |= MIPI_DSI_MODE_LPM;
14361+
14362+	return 0;
14363+}
14364+
14365+static int dcs_bl_get_brightness(struct backlight_device *bl)
14366+{
14367+	struct panel_simple *p = bl_get_data(bl);
14368+	struct mipi_dsi_device *dsi = p->dsi;
14369+	u16 brightness = bl->props.brightness;
14370+	int ret;
14371+
14372+	if (!p->prepared)
14373+		return 0;
14374+
14375+	dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
14376+
14377+	ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness);
14378+	if (ret < 0)
14379+		return ret;
14380+
14381+	dsi->mode_flags |= MIPI_DSI_MODE_LPM;
14382+
14383+	return brightness & 0xff;
14384+}
14385+
14386+static const struct backlight_ops dcs_bl_ops = {
14387+	.update_status = dcs_bl_update_status,
14388+	.get_brightness = dcs_bl_get_brightness,
14389+};
14390+
14391 static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
14392 {
14393 	struct panel_simple *panel;
14394@@ -528,12 +802,19 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
14395 	if (IS_ERR(panel->supply))
14396 		return PTR_ERR(panel->supply);
14397 
14398-	panel->enable_gpio = devm_gpiod_get_optional(dev, "enable",
14399-						     GPIOD_OUT_LOW);
14400+	panel->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_ASIS);
14401 	if (IS_ERR(panel->enable_gpio)) {
14402 		err = PTR_ERR(panel->enable_gpio);
14403 		if (err != -EPROBE_DEFER)
14404-			dev_err(dev, "failed to request GPIO: %d\n", err);
14405+			dev_err(dev, "failed to get enable GPIO: %d\n", err);
14406+		return err;
14407+	}
14408+
14409+	panel->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS);
14410+	if (IS_ERR(panel->reset_gpio)) {
14411+		err = PTR_ERR(panel->reset_gpio);
14412+		if (err != -EPROBE_DEFER)
14413+			dev_err(dev, "failed to get reset GPIO: %d\n", err);
14414 		return err;
14415 	}
14416 
14417@@ -543,6 +824,8 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
14418 		return err;
14419 	}
14420 
14421+	panel->power_invert = of_property_read_bool(dev->of_node, "power-invert");
14422+
14423 	ddc = of_parse_phandle(dev->of_node, "ddc-i2c-bus", 0);
14424 	if (ddc) {
14425 		panel->ddc = of_find_i2c_adapter_by_node(ddc);
14426@@ -557,7 +840,6 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
14427 		err = panel_dpi_probe(dev, panel);
14428 		if (err)
14429 			goto free_ddc;
14430-		desc = panel->desc;
14431 	} else {
14432 		if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
14433 			panel_simple_parse_panel_timing_node(dev, panel, &dt);
14434@@ -567,7 +849,7 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
14435 	/* Catch common mistakes for panels. */
14436 	switch (connector_type) {
14437 	case 0:
14438-		dev_warn(dev, "Specify missing connector_type\n");
14439+		dev_dbg(dev, "Specify missing connector_type\n");
14440 		connector_type = DRM_MODE_CONNECTOR_DPI;
14441 		break;
14442 	case DRM_MODE_CONNECTOR_LVDS:
14443@@ -3906,6 +4188,9 @@ static const struct panel_desc arm_rtsm = {
14444 
14445 static const struct of_device_id platform_of_match[] = {
14446 	{
14447+		.compatible = "simple-panel",
14448+		.data = NULL,
14449+	}, {
14450 		.compatible = "ampire,am-1280800n3tzqw-t00h",
14451 		.data = &ampire_am_1280800n3tzqw_t00h,
14452 	}, {
14453@@ -4314,15 +4599,132 @@ static const struct of_device_id platform_of_match[] = {
14454 };
14455 MODULE_DEVICE_TABLE(of, platform_of_match);
14456 
14457+static bool of_child_node_is_present(const struct device_node *node,
14458+				     const char *name)
14459+{
14460+	struct device_node *child;
14461+
14462+	child = of_get_child_by_name(node, name);
14463+	of_node_put(child);
14464+
14465+	return !!child;
14466+}
14467+
14468+static int panel_simple_of_get_desc_data(struct device *dev,
14469+					 struct panel_desc *desc)
14470+{
14471+	struct device_node *np = dev->of_node;
14472+	u32 bus_flags;
14473+	const void *data;
14474+	int len;
14475+	int err;
14476+
14477+	if (of_child_node_is_present(np, "display-timings")) {
14478+		struct drm_display_mode *mode;
14479+
14480+		mode = devm_kzalloc(dev, sizeof(*mode), GFP_KERNEL);
14481+		if (!mode)
14482+			return -ENOMEM;
14483+
14484+		if (!of_get_drm_display_mode(np, mode, &bus_flags,
14485+					     OF_USE_NATIVE_MODE)) {
14486+			desc->modes = mode;
14487+			desc->num_modes = 1;
14488+			desc->bus_flags = bus_flags;
14489+		}
14490+	} else if (of_child_node_is_present(np, "panel-timing")) {
14491+		struct display_timing *timing;
14492+		struct videomode vm;
14493+
14494+		timing = devm_kzalloc(dev, sizeof(*timing), GFP_KERNEL);
14495+		if (!timing)
14496+			return -ENOMEM;
14497+
14498+		if (!of_get_display_timing(np, "panel-timing", timing)) {
14499+			desc->timings = timing;
14500+			desc->num_timings = 1;
14501+
14502+			bus_flags = 0;
14503+			vm.flags = timing->flags;
14504+			drm_bus_flags_from_videomode(&vm, &bus_flags);
14505+			desc->bus_flags = bus_flags;
14506+		}
14507+	}
14508+
14509+	if (desc->num_modes || desc->num_timings) {
14510+		of_property_read_u32(np, "bpc", &desc->bpc);
14511+		of_property_read_u32(np, "bus-format", &desc->bus_format);
14512+		of_property_read_u32(np, "width-mm", &desc->size.width);
14513+		of_property_read_u32(np, "height-mm", &desc->size.height);
14514+	}
14515+
14516+	of_property_read_u32(np, "prepare-delay-ms", &desc->delay.prepare);
14517+	of_property_read_u32(np, "enable-delay-ms", &desc->delay.enable);
14518+	of_property_read_u32(np, "disable-delay-ms", &desc->delay.disable);
14519+	of_property_read_u32(np, "unprepare-delay-ms", &desc->delay.unprepare);
14520+	of_property_read_u32(np, "reset-delay-ms", &desc->delay.reset);
14521+	of_property_read_u32(np, "init-delay-ms", &desc->delay.init);
14522+
14523+	data = of_get_property(np, "panel-init-sequence", &len);
14524+	if (data) {
14525+		desc->init_seq = devm_kzalloc(dev, sizeof(*desc->init_seq),
14526+					      GFP_KERNEL);
14527+		if (!desc->init_seq)
14528+			return -ENOMEM;
14529+
14530+		err = panel_simple_parse_cmd_seq(dev, data, len,
14531+						 desc->init_seq);
14532+		if (err) {
14533+			dev_err(dev, "failed to parse init sequence\n");
14534+			return err;
14535+		}
14536+	}
14537+
14538+	data = of_get_property(np, "panel-exit-sequence", &len);
14539+	if (data) {
14540+		desc->exit_seq = devm_kzalloc(dev, sizeof(*desc->exit_seq),
14541+					      GFP_KERNEL);
14542+		if (!desc->exit_seq)
14543+			return -ENOMEM;
14544+
14545+		err = panel_simple_parse_cmd_seq(dev, data, len,
14546+						 desc->exit_seq);
14547+		if (err) {
14548+			dev_err(dev, "failed to parse exit sequence\n");
14549+			return err;
14550+		}
14551+	}
14552+
14553+	return 0;
14554+}
14555+
14556 static int panel_simple_platform_probe(struct platform_device *pdev)
14557 {
14558+	struct device *dev = &pdev->dev;
14559 	const struct of_device_id *id;
14560+	const struct panel_desc *desc;
14561+	struct panel_desc *d;
14562+	int err;
14563 
14564 	id = of_match_node(platform_of_match, pdev->dev.of_node);
14565 	if (!id)
14566 		return -ENODEV;
14567 
14568-	return panel_simple_probe(&pdev->dev, id->data);
14569+	if (!id->data) {
14570+		d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
14571+		if (!d)
14572+			return -ENOMEM;
14573+
14574+		err = panel_simple_of_get_desc_data(dev, d);
14575+		if (err) {
14576+			dev_err(dev, "failed to get desc data: %d\n", err);
14577+			return err;
14578+		}
14579+	}
14580+
14581+	desc = id->data ? id->data : d;
14582+
14583+	return panel_simple_probe(&pdev->dev, desc);
14584 }
14585 
14586 static int panel_simple_platform_remove(struct platform_device *pdev)
14587@@ -4557,6 +4959,9 @@ static const struct panel_desc_dsi osd101t2045_53ts = {
14588 
14589 static const struct of_device_id dsi_of_match[] = {
14590 	{
14591+		.compatible = "simple-panel-dsi",
14592+		.data = NULL,
14593+	}, {
14594 		.compatible = "auo,b080uan01",
14595 		.data = &auo_b080uan01
14596 	}, {
14597@@ -4583,9 +4988,33 @@ static const struct of_device_id dsi_of_match[] = {
14598 };
14599 MODULE_DEVICE_TABLE(of, dsi_of_match);
14600 
14601+static int panel_simple_dsi_of_get_desc_data(struct device *dev,
14602+					     struct panel_desc_dsi *desc)
14603+{
14604+	struct device_node *np = dev->of_node;
14605+	u32 val;
14606+	int err;
14607+
14608+	err = panel_simple_of_get_desc_data(dev, &desc->desc);
14609+	if (err)
14610+		return err;
14611+
14612+	if (!of_property_read_u32(np, "dsi,flags", &val))
14613+		desc->flags = val;
14614+	if (!of_property_read_u32(np, "dsi,format", &val))
14615+		desc->format = val;
14616+	if (!of_property_read_u32(np, "dsi,lanes", &val))
14617+		desc->lanes = val;
14618+
14619+	return 0;
14620+}
14621+
14622 static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi)
14623 {
14624+	struct panel_simple *panel;
14625+	struct device *dev = &dsi->dev;
14626 	const struct panel_desc_dsi *desc;
14627+	struct panel_desc_dsi *d;
14628 	const struct of_device_id *id;
14629 	int err;
14630 
14631@@ -4593,12 +5022,47 @@ static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi)
14632 	if (!id)
14633 		return -ENODEV;
14634 
14635-	desc = id->data;
14636+	if (!id->data) {
14637+		d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
14638+		if (!d)
14639+			return -ENOMEM;
14640+
14641+		err = panel_simple_dsi_of_get_desc_data(dev, d);
14642+		if (err) {
14643+			dev_err(dev, "failed to get desc data: %d\n", err);
14644+			return err;
14645+		}
14646+	}
14647+
14648+	desc = id->data ? id->data : d;
14649 
14650 	err = panel_simple_probe(&dsi->dev, &desc->desc);
14651 	if (err < 0)
14652 		return err;
14653 
14654+	panel = dev_get_drvdata(dev);
14655+	panel->dsi = dsi;
14656+
14657+	if (!panel->base.backlight) {
14658+		struct backlight_properties props;
14659+
14660+		memset(&props, 0, sizeof(props));
14661+		props.type = BACKLIGHT_RAW;
14662+		props.brightness = 255;
14663+		props.max_brightness = 255;
14664+
14665+		panel->base.backlight =
14666+			devm_backlight_device_register(dev, "dcs-backlight",
14667+						       dev, panel, &dcs_bl_ops,
14668+						       &props);
14669+		if (IS_ERR(panel->base.backlight)) {
14670+			err = PTR_ERR(panel->base.backlight);
14671+			dev_err(dev, "failed to register dcs backlight: %d\n",
14672+				err);
14673+			return err;
14674+		}
14675+	}
14676+
14677 	dsi->mode_flags = desc->flags;
14678 	dsi->format = desc->format;
14679 	dsi->lanes = desc->lanes;
14680diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
14681index 512581698..4726b16a3 100644
14682--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
14683+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
14684@@ -16,6 +16,7 @@
14685 #include <linux/reset.h>
14686 #include <linux/clk.h>
14687 
14688+#include <uapi/linux/videodev2.h>
14689 #include <video/of_videomode.h>
14690 #include <video/videomode.h>
14691 
14692@@ -31,54 +32,163 @@
14693 #include "rockchip_drm_drv.h"
14694 #include "rockchip_drm_vop.h"
14695 
14696-#define RK3288_GRF_SOC_CON6		0x25c
14697-#define RK3288_EDP_LCDC_SEL		BIT(5)
14698-#define RK3399_GRF_SOC_CON20		0x6250
14699-#define RK3399_EDP_LCDC_SEL		BIT(5)
14700-
14701-#define HIWORD_UPDATE(val, mask)	(val | (mask) << 16)
14702-
14703 #define PSR_WAIT_LINE_FLAG_TIMEOUT_MS	100
14704 
14705 #define to_dp(nm)	container_of(nm, struct rockchip_dp_device, nm)
14706 
14707+#define GRF_REG_FIELD(_reg, _lsb, _msb) {	\
14708+				.reg = _reg,	\
14709+				.lsb = _lsb,	\
14710+				.msb = _msb,	\
14711+				.valid = true,	\
14712+				}
14713+
14714+struct rockchip_grf_reg_field {
14715+	unsigned int reg;
14716+	unsigned int lsb;
14717+	unsigned int msb;
14718+	bool valid;
14719+};
14720+
14721 /**
14722  * struct rockchip_dp_chip_data - splite the grf setting of kind of chips
14723- * @lcdsel_grf_reg: grf register offset of lcdc select
14724- * @lcdsel_big: reg value of selecting vop big for eDP
14725- * @lcdsel_lit: reg value of selecting vop little for eDP
14726+ * @lcdc_sel: grf register field of lcdc_sel
14727+ * @spdif_sel: grf register field of spdif_sel
14728+ * @i2s_sel: grf register field of i2s_sel
14729+ * @edp_mode: grf register field of edp_mode
14730  * @chip_type: specific chip type
14731+ * @ssc: check if SSC is supported by source
14732+ * @audio: check if audio is supported by source
14733+ * @split_mode: check if split mode is supported
14734  */
14735 struct rockchip_dp_chip_data {
14736-	u32	lcdsel_grf_reg;
14737-	u32	lcdsel_big;
14738-	u32	lcdsel_lit;
14739+	const struct rockchip_grf_reg_field lcdc_sel;
14740+	const struct rockchip_grf_reg_field spdif_sel;
14741+	const struct rockchip_grf_reg_field i2s_sel;
14742+	const struct rockchip_grf_reg_field edp_mode;
14743 	u32	chip_type;
14744+	bool	ssc;
14745+	bool	audio;
14746+	bool	split_mode;
14747 };
14748 
14749 struct rockchip_dp_device {
14750 	struct drm_device        *drm_dev;
14751 	struct device            *dev;
14752 	struct drm_encoder       encoder;
14753+	struct drm_bridge	 *bridge;
14754 	struct drm_display_mode  mode;
14755 
14756-	struct clk               *pclk;
14757-	struct clk               *grfclk;
14758 	struct regmap            *grf;
14759 	struct reset_control     *rst;
14760+	struct reset_control     *apb_reset;
14761 
14762+	struct platform_device *audio_pdev;
14763 	const struct rockchip_dp_chip_data *data;
14764+	int id;
14765 
14766 	struct analogix_dp_device *adp;
14767 	struct analogix_dp_plat_data plat_data;
14768+	struct rockchip_drm_sub_dev sub_dev;
14769 };
14770 
14771+static int rockchip_grf_write(struct regmap *grf, unsigned int reg,
14772+			      unsigned int mask, unsigned int val)
14773+{
14774+	return regmap_write(grf, reg, (mask << 16) | (val & mask));
14775+}
14776+
14777+static int rockchip_grf_field_write(struct regmap *grf,
14778+				    const struct rockchip_grf_reg_field *field,
14779+				    unsigned int val)
14780+{
14781+	unsigned int mask;
14782+
14783+	if (!field->valid)
14784+		return 0;
14785+
14786+	mask = GENMASK(field->msb, field->lsb);
14787+	val <<= field->lsb;
14788+
14789+	return rockchip_grf_write(grf, field->reg, mask, val);
14790+}
14791+
14792+static int rockchip_dp_audio_hw_params(struct device *dev, void *data,
14793+				       struct hdmi_codec_daifmt *daifmt,
14794+				       struct hdmi_codec_params *params)
14795+{
14796+	struct rockchip_dp_device *dp = dev_get_drvdata(dev);
14797+
14798+	rockchip_grf_field_write(dp->grf, &dp->data->spdif_sel,
14799+				 daifmt->fmt == HDMI_SPDIF);
14800+	rockchip_grf_field_write(dp->grf, &dp->data->i2s_sel,
14801+				 daifmt->fmt == HDMI_I2S);
14802+
14803+	return analogix_dp_audio_hw_params(dp->adp, daifmt, params);
14804+}
14805+
14806+static void rockchip_dp_audio_shutdown(struct device *dev, void *data)
14807+{
14808+	struct rockchip_dp_device *dp = dev_get_drvdata(dev);
14809+
14810+	analogix_dp_audio_shutdown(dp->adp);
14811+
14812+	rockchip_grf_field_write(dp->grf, &dp->data->spdif_sel, 0);
14813+	rockchip_grf_field_write(dp->grf, &dp->data->i2s_sel, 0);
14814+}
14815+
14816+static int rockchip_dp_audio_startup(struct device *dev, void *data)
14817+{
14818+	struct rockchip_dp_device *dp = dev_get_drvdata(dev);
14819+
14820+	return analogix_dp_audio_startup(dp->adp);
14821+}
14822+
14823+static int rockchip_dp_audio_get_eld(struct device *dev, void *data,
14824+				     u8 *buf, size_t len)
14825+{
14826+	struct rockchip_dp_device *dp = dev_get_drvdata(dev);
14827+
14828+	return analogix_dp_audio_get_eld(dp->adp, buf, len);
14829+}
14830+
14831+static const struct hdmi_codec_ops rockchip_dp_audio_codec_ops = {
14832+	.hw_params = rockchip_dp_audio_hw_params,
14833+	.audio_startup = rockchip_dp_audio_startup,
14834+	.audio_shutdown = rockchip_dp_audio_shutdown,
14835+	.get_eld = rockchip_dp_audio_get_eld,
14836+};
14837+
14838+static int rockchip_dp_match_by_id(struct device *dev, const void *data)
14839+{
14840+	struct rockchip_dp_device *dp = dev_get_drvdata(dev);
14841+	const unsigned int *id = data;
14842+
14843+	return dp->id == *id;
14844+}
14845+
14846+static struct rockchip_dp_device *
14847+rockchip_dp_find_by_id(struct device_driver *drv, unsigned int id)
14848+{
14849+	struct device *dev;
14850+
14851+	dev = driver_find_device(drv, NULL, &id, rockchip_dp_match_by_id);
14852+	if (!dev)
14853+		return NULL;
14854+
14855+	return dev_get_drvdata(dev);
14856+}
14857+
14858 static int rockchip_dp_pre_init(struct rockchip_dp_device *dp)
14859 {
14860 	reset_control_assert(dp->rst);
14861 	usleep_range(10, 20);
14862 	reset_control_deassert(dp->rst);
14863 
14864+	reset_control_assert(dp->apb_reset);
14865+	usleep_range(10, 20);
14866+	reset_control_deassert(dp->apb_reset);
14867+
14868 	return 0;
14869 }
14870 
14871@@ -87,29 +197,20 @@ static int rockchip_dp_poweron_start(struct analogix_dp_plat_data *plat_data)
14872 	struct rockchip_dp_device *dp = to_dp(plat_data);
14873 	int ret;
14874 
14875-	ret = clk_prepare_enable(dp->pclk);
14876-	if (ret < 0) {
14877-		DRM_DEV_ERROR(dp->dev, "failed to enable pclk %d\n", ret);
14878-		return ret;
14879-	}
14880-
14881 	ret = rockchip_dp_pre_init(dp);
14882 	if (ret < 0) {
14883 		DRM_DEV_ERROR(dp->dev, "failed to dp pre init %d\n", ret);
14884-		clk_disable_unprepare(dp->pclk);
14885 		return ret;
14886 	}
14887 
14888-	return ret;
14889+	return rockchip_grf_field_write(dp->grf, &dp->data->edp_mode, 1);
14890 }
14891 
14892 static int rockchip_dp_powerdown(struct analogix_dp_plat_data *plat_data)
14893 {
14894 	struct rockchip_dp_device *dp = to_dp(plat_data);
14895 
14896-	clk_disable_unprepare(dp->pclk);
14897-
14898-	return 0;
14899+	return rockchip_grf_field_write(dp->grf, &dp->data->edp_mode, 0);
14900 }
14901 
14902 static int rockchip_dp_get_modes(struct analogix_dp_plat_data *plat_data,
14903@@ -129,6 +230,56 @@ static int rockchip_dp_get_modes(struct analogix_dp_plat_data *plat_data,
14904 	return 0;
14905 }
14906 
14907+static void rockchip_dp_loader_protect(struct drm_encoder *encoder, bool on)
14908+{
14909+	struct rockchip_dp_device *dp = to_dp(encoder);
14910+	struct analogix_dp_plat_data *plat_data = &dp->plat_data;
14911+
14912+	if (!on)
14913+		return;
14914+
14915+	if (plat_data->panel)
14916+		panel_simple_loader_protect(plat_data->panel);
14917+
14918+	analogix_dp_loader_protect(dp->adp);
14919+}
14920+
14921+static int rockchip_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
14922+				     struct drm_bridge *bridge,
14923+				     struct drm_connector *connector)
14924+{
14925+	struct rockchip_dp_device *dp = to_dp(plat_data);
14926+	struct rockchip_drm_sub_dev *sdev = &dp->sub_dev;
14927+	int ret;
14928+
14929+	if (dp->bridge) {
14930+		ret = drm_bridge_attach(&dp->encoder, dp->bridge, bridge, 0);
14931+		if (ret) {
14932+			DRM_ERROR("Failed to attach bridge to drm: %d\n", ret);
14933+			return ret;
14934+		}
14935+	}
14936+
14937+	if (connector) {
14938+		sdev->connector = connector;
14939+		sdev->of_node = dp->dev->of_node;
14940+		sdev->loader_protect = rockchip_dp_loader_protect;
14941+		rockchip_drm_register_sub_dev(sdev);
14942+	}
14943+
14944+	return 0;
14945+}
14946+
14947+static void rockchip_dp_bridge_detach(struct analogix_dp_plat_data *plat_data,
14948+				      struct drm_bridge *bridge)
14949+{
14950+	struct rockchip_dp_device *dp = to_dp(plat_data);
14951+	struct rockchip_drm_sub_dev *sdev = &dp->sub_dev;
14952+
14953+	if (sdev->connector)
14954+		rockchip_drm_unregister_sub_dev(sdev);
14955+}
14956+
14957 static bool
14958 rockchip_dp_drm_encoder_mode_fixup(struct drm_encoder *encoder,
14959 				   const struct drm_display_mode *mode,
14960@@ -170,7 +321,6 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder,
14961 	struct drm_crtc *crtc;
14962 	struct drm_crtc_state *old_crtc_state;
14963 	int ret;
14964-	u32 val;
14965 
14966 	crtc = rockchip_dp_drm_get_new_crtc(encoder, state);
14967 	if (!crtc)
14968@@ -185,24 +335,11 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder,
14969 	if (ret < 0)
14970 		return;
14971 
14972-	if (ret)
14973-		val = dp->data->lcdsel_lit;
14974-	else
14975-		val = dp->data->lcdsel_big;
14976-
14977 	DRM_DEV_DEBUG(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG");
14978 
14979-	ret = clk_prepare_enable(dp->grfclk);
14980-	if (ret < 0) {
14981-		DRM_DEV_ERROR(dp->dev, "failed to enable grfclk %d\n", ret);
14982-		return;
14983-	}
14984-
14985-	ret = regmap_write(dp->grf, dp->data->lcdsel_grf_reg, val);
14986+	ret = rockchip_grf_field_write(dp->grf, &dp->data->lcdc_sel, ret);
14987 	if (ret != 0)
14988 		DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
14989-
14990-	clk_disable_unprepare(dp->grfclk);
14991 }
14992 
14993 static void rockchip_dp_drm_encoder_disable(struct drm_encoder *encoder,
14994@@ -233,9 +370,15 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
14995 				      struct drm_crtc_state *crtc_state,
14996 				      struct drm_connector_state *conn_state)
14997 {
14998+	struct rockchip_dp_device *dp = to_dp(encoder);
14999 	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
15000 	struct drm_display_info *di = &conn_state->connector->display_info;
15001 
15002+	if (di->num_bus_formats)
15003+		s->bus_format = di->bus_formats[0];
15004+	else
15005+		s->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
15006+
15007 	/*
15008 	 * The hardware IC designed that VOP must output the RGB10 video
15009 	 * format to eDP controller, and if eDP panel only support RGB8,
15010@@ -246,7 +389,18 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
15011 
15012 	s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
15013 	s->output_type = DRM_MODE_CONNECTOR_eDP;
15014+	if (dp->plat_data.split_mode) {
15015+		s->output_flags |= ROCKCHIP_OUTPUT_DUAL_CHANNEL_LEFT_RIGHT_MODE;
15016+		s->output_flags |= dp->id ? ROCKCHIP_OUTPUT_DATA_SWAP : 0;
15017+		s->output_if |= VOP_OUTPUT_IF_eDP0 | VOP_OUTPUT_IF_eDP1;
15018+	} else {
15019+		s->output_if |= dp->id ? VOP_OUTPUT_IF_eDP1 : VOP_OUTPUT_IF_eDP0;
15020+	}
15021 	s->output_bpc = di->bpc;
15022+	s->bus_flags = di->bus_flags;
15023+	s->tv_state = &conn_state->tv;
15024+	s->eotf = HDMI_EOTF_TRADITIONAL_GAMMA_SDR;
15025+	s->color_space = V4L2_COLORSPACE_DEFAULT;
15026 
15027 	return 0;
15028 }
15029@@ -264,26 +418,12 @@ static int rockchip_dp_of_probe(struct rockchip_dp_device *dp)
15030 	struct device *dev = dp->dev;
15031 	struct device_node *np = dev->of_node;
15032 
15033-	dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
15034-	if (IS_ERR(dp->grf)) {
15035-		DRM_DEV_ERROR(dev, "failed to get rockchip,grf property\n");
15036-		return PTR_ERR(dp->grf);
15037-	}
15038-
15039-	dp->grfclk = devm_clk_get(dev, "grf");
15040-	if (PTR_ERR(dp->grfclk) == -ENOENT) {
15041-		dp->grfclk = NULL;
15042-	} else if (PTR_ERR(dp->grfclk) == -EPROBE_DEFER) {
15043-		return -EPROBE_DEFER;
15044-	} else if (IS_ERR(dp->grfclk)) {
15045-		DRM_DEV_ERROR(dev, "failed to get grf clock\n");
15046-		return PTR_ERR(dp->grfclk);
15047-	}
15048-
15049-	dp->pclk = devm_clk_get(dev, "pclk");
15050-	if (IS_ERR(dp->pclk)) {
15051-		DRM_DEV_ERROR(dev, "failed to get pclk property\n");
15052-		return PTR_ERR(dp->pclk);
15053+	if (of_property_read_bool(np, "rockchip,grf")) {
15054+		dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
15055+		if (IS_ERR(dp->grf)) {
15056+			DRM_DEV_ERROR(dev, "failed to get rockchip,grf\n");
15057+			return PTR_ERR(dp->grf);
15058+		}
15059 	}
15060 
15061 	dp->rst = devm_reset_control_get(dev, "dp");
15062@@ -292,6 +432,12 @@ static int rockchip_dp_of_probe(struct rockchip_dp_device *dp)
15063 		return PTR_ERR(dp->rst);
15064 	}
15065 
15066+	dp->apb_reset = devm_reset_control_get_optional(dev, "apb");
15067+	if (IS_ERR(dp->apb_reset)) {
15068+		DRM_DEV_ERROR(dev, "failed to get apb reset control\n");
15069+		return PTR_ERR(dp->apb_reset);
15070+	}
15071+
15072 	return 0;
15073 }
15074 
15075@@ -302,8 +448,8 @@ static int rockchip_dp_drm_create_encoder(struct rockchip_dp_device *dp)
15076 	struct device *dev = dp->dev;
15077 	int ret;
15078 
15079-	encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
15080-							     dev->of_node);
15081+	encoder->possible_crtcs = rockchip_drm_of_find_possible_crtcs(drm_dev,
15082+								      dev->of_node);
15083 	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
15084 
15085 	ret = drm_simple_encoder_init(drm_dev, encoder,
15086@@ -327,19 +473,44 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
15087 
15088 	dp->drm_dev = drm_dev;
15089 
15090-	ret = rockchip_dp_drm_create_encoder(dp);
15091-	if (ret) {
15092-		DRM_ERROR("failed to create drm encoder\n");
15093-		return ret;
15094+	if (!dp->plat_data.left) {
15095+		ret = rockchip_dp_drm_create_encoder(dp);
15096+		if (ret) {
15097+			DRM_ERROR("failed to create drm encoder\n");
15098+			return ret;
15099+		}
15100+
15101+		dp->plat_data.encoder = &dp->encoder;
15102 	}
15103 
15104-	dp->plat_data.encoder = &dp->encoder;
15105+	if (dp->data->audio) {
15106+		struct hdmi_codec_pdata codec_data = {
15107+			.ops = &rockchip_dp_audio_codec_ops,
15108+			.spdif = 1,
15109+			.i2s = 1,
15110+			.max_i2s_channels = 2,
15111+		};
15112+
15113+		dp->audio_pdev =
15114+			platform_device_register_data(dev, HDMI_CODEC_DRV_NAME,
15115+						      PLATFORM_DEVID_AUTO,
15116+						      &codec_data,
15117+						      sizeof(codec_data));
15118+		if (IS_ERR(dp->audio_pdev)) {
15119+			ret = PTR_ERR(dp->audio_pdev);
15120+			goto err_cleanup_encoder;
15121+		}
15122+	}
15123 
15124 	ret = analogix_dp_bind(dp->adp, drm_dev);
15125 	if (ret)
15126-		goto err_cleanup_encoder;
15127+		goto err_unregister_audio_pdev;
15128 
15129 	return 0;
15130+
15131+err_unregister_audio_pdev:
15132+	if (dp->audio_pdev)
15133+		platform_device_unregister(dp->audio_pdev);
15134 err_cleanup_encoder:
15135 	dp->encoder.funcs->destroy(&dp->encoder);
15136 	return ret;
15137@@ -350,6 +521,8 @@ static void rockchip_dp_unbind(struct device *dev, struct device *master,
15138 {
15139 	struct rockchip_dp_device *dp = dev_get_drvdata(dev);
15140 
15141+	if (dp->audio_pdev)
15142+		platform_device_unregister(dp->audio_pdev);
15143 	analogix_dp_unbind(dp->adp);
15144 	dp->encoder.funcs->destroy(&dp->encoder);
15145 }
15146@@ -364,29 +537,51 @@ static int rockchip_dp_probe(struct platform_device *pdev)
15147 	struct device *dev = &pdev->dev;
15148 	const struct rockchip_dp_chip_data *dp_data;
15149 	struct drm_panel *panel = NULL;
15150+	struct drm_bridge *bridge = NULL;
15151 	struct rockchip_dp_device *dp;
15152-	int ret;
15153+	int id, i, ret;
15154 
15155 	dp_data = of_device_get_match_data(dev);
15156 	if (!dp_data)
15157 		return -ENODEV;
15158 
15159-	ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
15160-	if (ret < 0)
15161+	ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, &bridge);
15162+	if (ret < 0 && ret != -ENODEV)
15163 		return ret;
15164 
15165 	dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
15166 	if (!dp)
15167 		return -ENOMEM;
15168 
15169+	id = of_alias_get_id(dev->of_node, "edp");
15170+	if (id < 0)
15171+		id = 0;
15172+
15173+	i = 0;
15174+	while (is_rockchip(dp_data[i].chip_type))
15175+		i++;
15176+
15177+	if (id >= i) {
15178+		dev_err(dev, "invalid id: %d\n", id);
15179+		return -ENODEV;
15180+	}
15181+
15182 	dp->dev = dev;
15183+	dp->id = id;
15184 	dp->adp = ERR_PTR(-ENODEV);
15185-	dp->data = dp_data;
15186+	dp->data = &dp_data[id];
15187+	dp->plat_data.ssc = dp->data->ssc;
15188 	dp->plat_data.panel = panel;
15189 	dp->plat_data.dev_type = dp->data->chip_type;
15190 	dp->plat_data.power_on_start = rockchip_dp_poweron_start;
15191 	dp->plat_data.power_off = rockchip_dp_powerdown;
15192 	dp->plat_data.get_modes = rockchip_dp_get_modes;
15193+	dp->plat_data.attach = rockchip_dp_bridge_attach;
15194+	dp->plat_data.detach = rockchip_dp_bridge_detach;
15195+	dp->plat_data.convert_to_split_mode = drm_mode_convert_to_split_mode;
15196+	dp->plat_data.convert_to_origin_mode = drm_mode_convert_to_origin_mode;
15197+	dp->plat_data.skip_connector = !!bridge;
15198+	dp->bridge = bridge;
15199 
15200 	ret = rockchip_dp_of_probe(dp);
15201 	if (ret < 0)
15202@@ -398,15 +593,19 @@ static int rockchip_dp_probe(struct platform_device *pdev)
15203 	if (IS_ERR(dp->adp))
15204 		return PTR_ERR(dp->adp);
15205 
15206-	ret = component_add(dev, &rockchip_dp_component_ops);
15207-	if (ret)
15208-		goto err_dp_remove;
15209+	if (dp->data->split_mode && device_property_read_bool(dev, "split-mode")) {
15210+		struct rockchip_dp_device *secondary =
15211+				rockchip_dp_find_by_id(dev->driver, !dp->id);
15212+		if (!secondary)
15213+			return -EPROBE_DEFER;
15214 
15215-	return 0;
15216+		dp->plat_data.right = secondary->adp;
15217+		dp->plat_data.split_mode = true;
15218+		secondary->plat_data.left = dp->adp;
15219+		secondary->plat_data.split_mode = true;
15220+	}
15221 
15222-err_dp_remove:
15223-	analogix_dp_remove(dp->adp);
15224-	return ret;
15225+	return component_add(dev, &rockchip_dp_component_ops);
15226 }
15227 
15228 static int rockchip_dp_remove(struct platform_device *pdev)
15229@@ -419,52 +618,85 @@ static int rockchip_dp_remove(struct platform_device *pdev)
15230 	return 0;
15231 }
15232 
15233-#ifdef CONFIG_PM_SLEEP
15234-static int rockchip_dp_suspend(struct device *dev)
15235+static __maybe_unused int rockchip_dp_runtime_suspend(struct device *dev)
15236 {
15237 	struct rockchip_dp_device *dp = dev_get_drvdata(dev);
15238 
15239 	if (IS_ERR(dp->adp))
15240 		return 0;
15241 
15242-	return analogix_dp_suspend(dp->adp);
15243+	return analogix_dp_runtime_suspend(dp->adp);
15244 }
15245 
15246-static int rockchip_dp_resume(struct device *dev)
15247+static __maybe_unused int rockchip_dp_runtime_resume(struct device *dev)
15248 {
15249 	struct rockchip_dp_device *dp = dev_get_drvdata(dev);
15250 
15251 	if (IS_ERR(dp->adp))
15252 		return 0;
15253 
15254-	return analogix_dp_resume(dp->adp);
15255+	return analogix_dp_runtime_resume(dp->adp);
15256 }
15257-#endif
15258 
15259 static const struct dev_pm_ops rockchip_dp_pm_ops = {
15260-#ifdef CONFIG_PM_SLEEP
15261-	.suspend_late = rockchip_dp_suspend,
15262-	.resume_early = rockchip_dp_resume,
15263-#endif
15264+	SET_RUNTIME_PM_OPS(rockchip_dp_runtime_suspend,
15265+			   rockchip_dp_runtime_resume, NULL)
15266 };
15267 
15268-static const struct rockchip_dp_chip_data rk3399_edp = {
15269-	.lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
15270-	.lcdsel_big = HIWORD_UPDATE(0, RK3399_EDP_LCDC_SEL),
15271-	.lcdsel_lit = HIWORD_UPDATE(RK3399_EDP_LCDC_SEL, RK3399_EDP_LCDC_SEL),
15272-	.chip_type = RK3399_EDP,
15273+static const struct rockchip_dp_chip_data rk3399_edp[] = {
15274+	{
15275+		.chip_type = RK3399_EDP,
15276+		.lcdc_sel = GRF_REG_FIELD(0x6250, 5, 5),
15277+		.ssc = true,
15278+	},
15279+	{ /* sentinel */ }
15280 };
15281 
15282-static const struct rockchip_dp_chip_data rk3288_dp = {
15283-	.lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
15284-	.lcdsel_big = HIWORD_UPDATE(0, RK3288_EDP_LCDC_SEL),
15285-	.lcdsel_lit = HIWORD_UPDATE(RK3288_EDP_LCDC_SEL, RK3288_EDP_LCDC_SEL),
15286-	.chip_type = RK3288_DP,
15287+static const struct rockchip_dp_chip_data rk3288_dp[] = {
15288+	{
15289+		.chip_type = RK3288_DP,
15290+		.lcdc_sel = GRF_REG_FIELD(0x025c, 5, 5),
15291+		.ssc = true,
15292+	},
15293+	{ /* sentinel */ }
15294+};
15295+
15296+static const struct rockchip_dp_chip_data rk3568_edp[] = {
15297+	{
15298+		.chip_type = RK3568_EDP,
15299+		.ssc = true,
15300+		.audio = true,
15301+	},
15302+	{ /* sentinel */ }
15303+};
15304+
15305+static const struct rockchip_dp_chip_data rk3588_edp[] = {
15306+	{
15307+		.chip_type = RK3588_EDP,
15308+		.spdif_sel = GRF_REG_FIELD(0x0000, 4, 4),
15309+		.i2s_sel = GRF_REG_FIELD(0x0000, 3, 3),
15310+		.edp_mode = GRF_REG_FIELD(0x0000, 0, 0),
15311+		.ssc = true,
15312+		.audio = true,
15313+		.split_mode = true,
15314+	},
15315+	{
15316+		.chip_type = RK3588_EDP,
15317+		.spdif_sel = GRF_REG_FIELD(0x0004, 4, 4),
15318+		.i2s_sel = GRF_REG_FIELD(0x0004, 3, 3),
15319+		.edp_mode = GRF_REG_FIELD(0x0004, 0, 0),
15320+		.ssc = true,
15321+		.audio = true,
15322+		.split_mode = true,
15323+	},
15324+	{ /* sentinel */ }
15325 };
15326 
15327 static const struct of_device_id rockchip_dp_dt_ids[] = {
15328 	{.compatible = "rockchip,rk3288-dp", .data = &rk3288_dp },
15329 	{.compatible = "rockchip,rk3399-edp", .data = &rk3399_edp },
15330+	{.compatible = "rockchip,rk3568-edp", .data = &rk3568_edp },
15331+	{.compatible = "rockchip,rk3588-edp", .data = &rk3588_edp },
15332 	{}
15333 };
15334 MODULE_DEVICE_TABLE(of, rockchip_dp_dt_ids);
15335diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
15336index adeaa0140..d7cc434fa 100644
15337--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
15338+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
15339@@ -6,7 +6,6 @@
15340 
15341 #include <linux/clk.h>
15342 #include <linux/component.h>
15343-#include <linux/extcon.h>
15344 #include <linux/firmware.h>
15345 #include <linux/mfd/syscon.h>
15346 #include <linux/phy/phy.h>
15347@@ -143,24 +142,7 @@ static void cdn_dp_clk_disable(struct cdn_dp_device *dp)
15348 
15349 static int cdn_dp_get_port_lanes(struct cdn_dp_port *port)
15350 {
15351-	struct extcon_dev *edev = port->extcon;
15352-	union extcon_property_value property;
15353-	int dptx;
15354-	u8 lanes;
15355-
15356-	dptx = extcon_get_state(edev, EXTCON_DISP_DP);
15357-	if (dptx > 0) {
15358-		extcon_get_property(edev, EXTCON_DISP_DP,
15359-				    EXTCON_PROP_USB_SS, &property);
15360-		if (property.intval)
15361-			lanes = 2;
15362-		else
15363-			lanes = 4;
15364-	} else {
15365-		lanes = 0;
15366-	}
15367-
15368-	return lanes;
15369+	return phy_get_bus_width(port->phy);
15370 }
15371 
15372 static int cdn_dp_get_sink_count(struct cdn_dp_device *dp, u8 *sink_count)
15373@@ -194,7 +176,6 @@ static struct cdn_dp_port *cdn_dp_connected_port(struct cdn_dp_device *dp)
15374 static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp)
15375 {
15376 	unsigned long timeout = jiffies + msecs_to_jiffies(CDN_DPCD_TIMEOUT_MS);
15377-	struct cdn_dp_port *port;
15378 	u8 sink_count = 0;
15379 
15380 	if (dp->active_port < 0 || dp->active_port >= dp->ports) {
15381@@ -202,8 +183,6 @@ static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp)
15382 		return false;
15383 	}
15384 
15385-	port = dp->port[dp->active_port];
15386-
15387 	/*
15388 	 * Attempt to read sink count, retry in case the sink may not be ready.
15389 	 *
15390@@ -211,9 +190,6 @@ static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp)
15391 	 * some docks need more time to power up.
15392 	 */
15393 	while (time_before(jiffies, timeout)) {
15394-		if (!extcon_get_state(port->extcon, EXTCON_DISP_DP))
15395-			return false;
15396-
15397 		if (!cdn_dp_get_sink_count(dp, &sink_count))
15398 			return sink_count ? true : false;
15399 
15400@@ -244,6 +220,13 @@ static void cdn_dp_connector_destroy(struct drm_connector *connector)
15401 	drm_connector_cleanup(connector);
15402 }
15403 
15404+static void cdn_dp_oob_hotplug_event(struct drm_connector *connector)
15405+{
15406+	struct cdn_dp_device *dp = connector_to_dp(connector);
15407+
15408+	schedule_delayed_work(&dp->event_work, msecs_to_jiffies(100));
15409+}
15410+
15411 static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = {
15412 	.detect = cdn_dp_connector_detect,
15413 	.destroy = cdn_dp_connector_destroy,
15414@@ -383,7 +366,6 @@ static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
15415 
15416 static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port)
15417 {
15418-	union extcon_property_value property;
15419 	int ret;
15420 
15421 	if (!port->phy_enabled) {
15422@@ -410,15 +392,8 @@ static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port)
15423 		goto err_power_on;
15424 	}
15425 
15426-	ret = extcon_get_property(port->extcon, EXTCON_DISP_DP,
15427-				  EXTCON_PROP_USB_TYPEC_POLARITY, &property);
15428-	if (ret) {
15429-		DRM_DEV_ERROR(dp->dev, "get property failed\n");
15430-		goto err_power_on;
15431-	}
15432-
15433 	port->lanes = cdn_dp_get_port_lanes(port);
15434-	ret = cdn_dp_set_host_cap(dp, port->lanes, property.intval);
15435+	ret = cdn_dp_set_host_cap(dp, port->lanes, 0);
15436 	if (ret) {
15437 		DRM_DEV_ERROR(dp->dev, "set host capabilities failed: %d\n",
15438 			      ret);
15439@@ -670,7 +645,7 @@ static void cdn_dp_encoder_disable(struct drm_encoder *encoder)
15440 	 *    run the event_work to re-connect it.
15441 	 */
15442 	if (!dp->connected && cdn_dp_connected_port(dp))
15443-		schedule_work(&dp->event_work);
15444+		schedule_delayed_work(&dp->event_work, 0);
15445 }
15446 
15447 static int cdn_dp_encoder_atomic_check(struct drm_encoder *encoder,
15448@@ -681,6 +656,7 @@ static int cdn_dp_encoder_atomic_check(struct drm_encoder *encoder,
15449 
15450 	s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
15451 	s->output_type = DRM_MODE_CONNECTOR_DisplayPort;
15452+	s->tv_state = &conn_state->tv;
15453 
15454 	return 0;
15455 }
15456@@ -913,7 +889,7 @@ static int cdn_dp_request_firmware(struct cdn_dp_device *dp)
15457 
15458 static void cdn_dp_pd_event_work(struct work_struct *work)
15459 {
15460-	struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device,
15461+	struct cdn_dp_device *dp = container_of(to_delayed_work(work), struct cdn_dp_device,
15462 						event_work);
15463 	struct drm_connector *connector = &dp->connector;
15464 	enum drm_connector_status old_status;
15465@@ -986,31 +962,13 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
15466 		drm_kms_helper_hotplug_event(dp->drm_dev);
15467 }
15468 
15469-static int cdn_dp_pd_event(struct notifier_block *nb,
15470-			   unsigned long event, void *priv)
15471-{
15472-	struct cdn_dp_port *port = container_of(nb, struct cdn_dp_port,
15473-						event_nb);
15474-	struct cdn_dp_device *dp = port->dp;
15475-
15476-	/*
15477-	 * It would be nice to be able to just do the work inline right here.
15478-	 * However, we need to make a bunch of calls that might sleep in order
15479-	 * to turn on the block/phy, so use a worker instead.
15480-	 */
15481-	schedule_work(&dp->event_work);
15482-
15483-	return NOTIFY_DONE;
15484-}
15485-
15486 static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
15487 {
15488 	struct cdn_dp_device *dp = dev_get_drvdata(dev);
15489 	struct drm_encoder *encoder;
15490 	struct drm_connector *connector;
15491-	struct cdn_dp_port *port;
15492 	struct drm_device *drm_dev = data;
15493-	int ret, i;
15494+	int ret;
15495 
15496 	ret = cdn_dp_parse_dt(dp);
15497 	if (ret < 0)
15498@@ -1022,12 +980,12 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
15499 	dp->active_port = -1;
15500 	dp->fw_loaded = false;
15501 
15502-	INIT_WORK(&dp->event_work, cdn_dp_pd_event_work);
15503+	INIT_DELAYED_WORK(&dp->event_work, cdn_dp_pd_event_work);
15504 
15505 	encoder = &dp->encoder;
15506 
15507-	encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
15508-							     dev->of_node);
15509+	encoder->possible_crtcs = rockchip_drm_of_find_possible_crtcs(drm_dev,
15510+								      dev->of_node);
15511 	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
15512 
15513 	ret = drm_simple_encoder_init(drm_dev, encoder,
15514@@ -1059,23 +1017,14 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
15515 		goto err_free_connector;
15516 	}
15517 
15518-	for (i = 0; i < dp->ports; i++) {
15519-		port = dp->port[i];
15520-
15521-		port->event_nb.notifier_call = cdn_dp_pd_event;
15522-		ret = devm_extcon_register_notifier(dp->dev, port->extcon,
15523-						    EXTCON_DISP_DP,
15524-						    &port->event_nb);
15525-		if (ret) {
15526-			DRM_DEV_ERROR(dev,
15527-				      "register EXTCON_DISP_DP notifier err\n");
15528-			goto err_free_connector;
15529-		}
15530-	}
15531+	dp->sub_dev.connector = &dp->connector;
15532+	dp->sub_dev.of_node = dev->of_node;
15533+	dp->sub_dev.oob_hotplug_event = cdn_dp_oob_hotplug_event;
15534+	rockchip_drm_register_sub_dev(&dp->sub_dev);
15535 
15536 	pm_runtime_enable(dev);
15537 
15538-	schedule_work(&dp->event_work);
15539+	schedule_delayed_work(&dp->event_work, 0);
15540 
15541 	return 0;
15542 
15543@@ -1092,7 +1041,7 @@ static void cdn_dp_unbind(struct device *dev, struct device *master, void *data)
15544 	struct drm_encoder *encoder = &dp->encoder;
15545 	struct drm_connector *connector = &dp->connector;
15546 
15547-	cancel_work_sync(&dp->event_work);
15548+	cancel_delayed_work_sync(&dp->event_work);
15549 	cdn_dp_encoder_disable(encoder);
15550 	encoder->funcs->destroy(encoder);
15551 	connector->funcs->destroy(connector);
15552@@ -1123,14 +1072,14 @@ static int cdn_dp_suspend(struct device *dev)
15553 	return ret;
15554 }
15555 
15556-static __maybe_unused int cdn_dp_resume(struct device *dev)
15557+static int cdn_dp_resume(struct device *dev)
15558 {
15559 	struct cdn_dp_device *dp = dev_get_drvdata(dev);
15560 
15561 	mutex_lock(&dp->lock);
15562 	dp->suspended = false;
15563 	if (dp->fw_loaded)
15564-		schedule_work(&dp->event_work);
15565+		schedule_delayed_work(&dp->event_work, 0);
15566 	mutex_unlock(&dp->lock);
15567 
15568 	return 0;
15569@@ -1143,7 +1092,6 @@ static int cdn_dp_probe(struct platform_device *pdev)
15570 	struct cdn_dp_data *dp_data;
15571 	struct cdn_dp_port *port;
15572 	struct cdn_dp_device *dp;
15573-	struct extcon_dev *extcon;
15574 	struct phy *phy;
15575 	int i;
15576 
15577@@ -1156,21 +1104,18 @@ static int cdn_dp_probe(struct platform_device *pdev)
15578 	dp_data = (struct cdn_dp_data *)match->data;
15579 
15580 	for (i = 0; i < dp_data->max_phy; i++) {
15581-		extcon = extcon_get_edev_by_phandle(dev, i);
15582 		phy = devm_of_phy_get_by_index(dev, dev->of_node, i);
15583 
15584-		if (PTR_ERR(extcon) == -EPROBE_DEFER ||
15585-		    PTR_ERR(phy) == -EPROBE_DEFER)
15586+		if (PTR_ERR(phy) == -EPROBE_DEFER)
15587 			return -EPROBE_DEFER;
15588 
15589-		if (IS_ERR(extcon) || IS_ERR(phy))
15590+		if (IS_ERR(phy))
15591 			continue;
15592 
15593 		port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
15594 		if (!port)
15595 			return -ENOMEM;
15596 
15597-		port->extcon = extcon;
15598 		port->phy = phy;
15599 		port->dp = dp;
15600 		port->id = i;
15601@@ -1178,7 +1123,7 @@ static int cdn_dp_probe(struct platform_device *pdev)
15602 	}
15603 
15604 	if (!dp->ports) {
15605-		DRM_DEV_ERROR(dev, "missing extcon or phy\n");
15606+		DRM_DEV_ERROR(dev, "missing phy\n");
15607 		return -EINVAL;
15608 	}
15609 
15610diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h
15611index 81ac9b658..519900c67 100644
15612--- a/drivers/gpu/drm/rockchip/cdn-dp-core.h
15613+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h
15614@@ -53,8 +53,6 @@ struct cdn_firmware_header {
15615 
15616 struct cdn_dp_port {
15617 	struct cdn_dp_device *dp;
15618-	struct notifier_block event_nb;
15619-	struct extcon_dev *extcon;
15620 	struct phy *phy;
15621 	u8 lanes;
15622 	bool phy_enabled;
15623@@ -68,8 +66,9 @@ struct cdn_dp_device {
15624 	struct drm_encoder encoder;
15625 	struct drm_display_mode mode;
15626 	struct platform_device *audio_pdev;
15627-	struct work_struct event_work;
15628+	struct delayed_work event_work;
15629 	struct edid *edid;
15630+	struct rockchip_drm_sub_dev sub_dev;
15631 
15632 	struct mutex lock;
15633 	bool connected;
15634diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
15635index 82cbfccba..10dea26b0 100644
15636--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
15637+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
15638@@ -16,8 +16,9 @@
15639 #include <linux/pm_runtime.h>
15640 #include <linux/regmap.h>
15641 
15642+#include <drm/drm_dsc.h>
15643 #include <video/mipi_display.h>
15644-
15645+#include <uapi/linux/videodev2.h>
15646 #include <drm/bridge/dw_mipi_dsi.h>
15647 #include <drm/drm_mipi_dsi.h>
15648 #include <drm/drm_of.h>
15649@@ -140,6 +141,7 @@
15650 
15651 #define DW_MIPI_NEEDS_PHY_CFG_CLK	BIT(0)
15652 #define DW_MIPI_NEEDS_GRF_CLK		BIT(1)
15653+#define DW_MIPI_NEEDS_HCLK		BIT(2)
15654 
15655 #define PX30_GRF_PD_VO_CON1		0x0438
15656 #define PX30_DSI_FORCETXSTOPMODE	(0xf << 7)
15657@@ -172,6 +174,12 @@
15658 #define RK3399_TXRX_ENABLECLK		BIT(6)
15659 #define RK3399_TXRX_BASEDIR		BIT(5)
15660 
15661+#define RK3568_GRF_VO_CON2		0x0368
15662+#define RK3568_GRF_VO_CON3		0x036c
15663+#define RK3568_DSI_FORCETXSTOPMODE	(0xf << 4)
15664+#define RK3568_DSI_TURNDISABLE		(0x1 << 2)
15665+#define RK3568_DSI_FORCERXMODE		(0x1 << 0)
15666+
15667 #define HIWORD_UPDATE(val, mask)	(val | (mask) << 16)
15668 
15669 #define to_dsi(nm)	container_of(nm, struct dw_mipi_dsi_rockchip, nm)
15670@@ -198,6 +206,19 @@ enum {
15671 	BIASEXTR_127_7,
15672 };
15673 
15674+enum soc_type {
15675+	PX30,
15676+	RK3288,
15677+	RK3399,
15678+	RK3568,
15679+};
15680+
15681+struct cmd_header {
15682+	u8 cmd_type;
15683+	u8 delay;
15684+	u8 payload_length;
15685+};
15686+
15687 struct rockchip_dw_dsi_chip_data {
15688 	u32 reg;
15689 
15690@@ -213,25 +234,42 @@ struct rockchip_dw_dsi_chip_data {
15691 	u32 lanecfg2_grf_reg;
15692 	u32 lanecfg2;
15693 
15694+	enum soc_type soc_type;
15695 	unsigned int flags;
15696 	unsigned int max_data_lanes;
15697+	unsigned long max_bit_rate_per_lane;
15698 };
15699 
15700 struct dw_mipi_dsi_rockchip {
15701 	struct device *dev;
15702 	struct drm_encoder encoder;
15703 	void __iomem *base;
15704-
15705+	int id;
15706+
15707+	bool c_option;
15708+	bool scrambling_en;
15709+	unsigned int slice_width;
15710+	unsigned int slice_height;
15711+	unsigned int slice_per_pkt;
15712+	bool block_pred_enable;
15713+	bool dsc_enable;
15714+	u8 version_major;
15715+	u8 version_minor;
15716+
15717+	struct drm_dsc_picture_parameter_set *pps;
15718 	struct regmap *grf_regmap;
15719 	struct clk *pllref_clk;
15720+	struct clk *pclk;
15721 	struct clk *grf_clk;
15722 	struct clk *phy_cfg_clk;
15723+	struct clk *hclk;
15724 
15725 	/* dual-channel */
15726 	bool is_slave;
15727 	struct dw_mipi_dsi_rockchip *slave;
15728 
15729 	/* optional external dphy */
15730+	bool phy_enabled;
15731 	struct phy *phy;
15732 	union phy_configure_opts phy_opts;
15733 
15734@@ -243,6 +281,9 @@ struct dw_mipi_dsi_rockchip {
15735 	struct dw_mipi_dsi *dmd;
15736 	const struct rockchip_dw_dsi_chip_data *cdata;
15737 	struct dw_mipi_dsi_plat_data pdata;
15738+	int devcnt;
15739+	struct rockchip_drm_sub_dev sub_dev;
15740+	struct drm_panel *panel;
15741 };
15742 
15743 struct dphy_pll_parameter_map {
15744@@ -365,10 +406,27 @@ static inline unsigned int ns2ui(struct dw_mipi_dsi_rockchip *dsi, int ns)
15745 	return DIV_ROUND_UP(ns * dsi->lane_mbps, 1000);
15746 }
15747 
15748+static void dw_mipi_dsi_phy_tx_config(struct dw_mipi_dsi_rockchip *dsi)
15749+{
15750+	if (dsi->cdata->lanecfg1_grf_reg)
15751+		regmap_write(dsi->grf_regmap, dsi->cdata->lanecfg1_grf_reg,
15752+					      dsi->cdata->lanecfg1);
15753+
15754+	if (dsi->cdata->lanecfg2_grf_reg)
15755+		regmap_write(dsi->grf_regmap, dsi->cdata->lanecfg2_grf_reg,
15756+					      dsi->cdata->lanecfg2);
15757+
15758+	if (dsi->cdata->enable_grf_reg)
15759+		regmap_write(dsi->grf_regmap, dsi->cdata->enable_grf_reg,
15760+					      dsi->cdata->enable);
15761+}
15762+
15763 static int dw_mipi_dsi_phy_init(void *priv_data)
15764 {
15765 	struct dw_mipi_dsi_rockchip *dsi = priv_data;
15766-	int ret, i, vco;
15767+	int i, vco;
15768+
15769+	dw_mipi_dsi_phy_tx_config(dsi);
15770 
15771 	if (dsi->phy)
15772 		return 0;
15773@@ -395,12 +453,6 @@ static int dw_mipi_dsi_phy_init(void *priv_data)
15774 		return i;
15775 	}
15776 
15777-	ret = clk_prepare_enable(dsi->phy_cfg_clk);
15778-	if (ret) {
15779-		DRM_DEV_ERROR(dsi->dev, "Failed to enable phy_cfg_clk\n");
15780-		return ret;
15781-	}
15782-
15783 	dw_mipi_dsi_phy_write(dsi, PLL_BIAS_CUR_SEL_CAP_VCO_CONTROL,
15784 			      BYPASS_VCO_RANGE |
15785 			      VCO_RANGE_CON_SEL(vco) |
15786@@ -453,7 +505,7 @@ static int dw_mipi_dsi_phy_init(void *priv_data)
15787 			      TER_RESISTORS_ON);
15788 
15789 	dw_mipi_dsi_phy_write(dsi, HS_TX_CLOCK_LANE_REQUEST_STATE_TIME_CONTROL,
15790-			      TLP_PROGRAM_EN | ns2bc(dsi, 500));
15791+			      TLP_PROGRAM_EN | ns2bc(dsi, 60));
15792 	dw_mipi_dsi_phy_write(dsi, HS_TX_CLOCK_LANE_PREPARE_STATE_TIME_CONTROL,
15793 			      THS_PRE_PROGRAM_EN | ns2ui(dsi, 40));
15794 	dw_mipi_dsi_phy_write(dsi, HS_TX_CLOCK_LANE_HS_ZERO_STATE_TIME_CONTROL,
15795@@ -466,7 +518,7 @@ static int dw_mipi_dsi_phy_init(void *priv_data)
15796 			      BIT(5) | (ns2bc(dsi, 60) + 7));
15797 
15798 	dw_mipi_dsi_phy_write(dsi, HS_TX_DATA_LANE_REQUEST_STATE_TIME_CONTROL,
15799-			      TLP_PROGRAM_EN | ns2bc(dsi, 500));
15800+			      TLP_PROGRAM_EN | ns2bc(dsi, 60));
15801 	dw_mipi_dsi_phy_write(dsi, HS_TX_DATA_LANE_PREPARE_STATE_TIME_CONTROL,
15802 			      THS_PRE_PROGRAM_EN | (ns2ui(dsi, 50) + 20));
15803 	dw_mipi_dsi_phy_write(dsi, HS_TX_DATA_LANE_HS_ZERO_STATE_TIME_CONTROL,
15804@@ -476,31 +528,29 @@ static int dw_mipi_dsi_phy_init(void *priv_data)
15805 	dw_mipi_dsi_phy_write(dsi, HS_TX_DATA_LANE_EXIT_STATE_TIME_CONTROL,
15806 			      BIT(5) | ns2bc(dsi, 100));
15807 
15808-	clk_disable_unprepare(dsi->phy_cfg_clk);
15809-
15810-	return ret;
15811+	return 0;
15812 }
15813 
15814 static void dw_mipi_dsi_phy_power_on(void *priv_data)
15815 {
15816 	struct dw_mipi_dsi_rockchip *dsi = priv_data;
15817-	int ret;
15818 
15819-	ret = phy_set_mode(dsi->phy, PHY_MODE_MIPI_DPHY);
15820-	if (ret) {
15821-		DRM_DEV_ERROR(dsi->dev, "failed to set phy mode: %d\n", ret);
15822+	if (dsi->phy_enabled)
15823 		return;
15824-	}
15825 
15826-	phy_configure(dsi->phy, &dsi->phy_opts);
15827 	phy_power_on(dsi->phy);
15828+	dsi->phy_enabled = true;
15829 }
15830 
15831 static void dw_mipi_dsi_phy_power_off(void *priv_data)
15832 {
15833 	struct dw_mipi_dsi_rockchip *dsi = priv_data;
15834 
15835+	if (!dsi->phy_enabled)
15836+		return;
15837+
15838 	phy_power_off(dsi->phy);
15839+	dsi->phy_enabled = false;
15840 }
15841 
15842 static int
15843@@ -509,17 +559,22 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
15844 			  unsigned int *lane_mbps)
15845 {
15846 	struct dw_mipi_dsi_rockchip *dsi = priv_data;
15847+	struct device *dev = dsi->dev;
15848 	int bpp;
15849 	unsigned long mpclk, tmp;
15850 	unsigned int target_mbps = 1000;
15851-	unsigned int max_mbps = dppa_map[ARRAY_SIZE(dppa_map) - 1].max_mbps;
15852+	unsigned int max_mbps;
15853 	unsigned long best_freq = 0;
15854 	unsigned long fvco_min, fvco_max, fin, fout;
15855 	unsigned int min_prediv, max_prediv;
15856 	unsigned int _prediv, best_prediv;
15857 	unsigned long _fbdiv, best_fbdiv;
15858 	unsigned long min_delta = ULONG_MAX;
15859+	unsigned long target_pclk, hs_clk_rate;
15860+	unsigned int value;
15861+	int ret;
15862 
15863+	max_mbps = dsi->cdata->max_bit_rate_per_lane / USEC_PER_SEC;
15864 	dsi->format = format;
15865 	bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
15866 	if (bpp < 0) {
15867@@ -529,23 +584,40 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
15868 		return bpp;
15869 	}
15870 
15871-	mpclk = DIV_ROUND_UP(mode->clock, MSEC_PER_SEC);
15872-	if (mpclk) {
15873-		/* take 1 / 0.8, since mbps must big than bandwidth of RGB */
15874-		tmp = mpclk * (bpp / lanes) * 10 / 8;
15875-		if (tmp < max_mbps)
15876-			target_mbps = tmp;
15877-		else
15878-			DRM_DEV_ERROR(dsi->dev,
15879-				      "DPHY clock frequency is out of range\n");
15880+	/* optional override of the desired bandwidth */
15881+	if (!of_property_read_u32(dev->of_node, "rockchip,lane-rate", &value)) {
15882+		target_mbps = value;
15883+	} else {
15884+		mpclk = DIV_ROUND_UP(mode->clock, MSEC_PER_SEC);
15885+		if (mpclk) {
15886+			/* take 1 / 0.9, since mbps must big than bandwidth of RGB */
15887+			tmp = mpclk * (bpp / lanes) * 10 / 9;
15888+			if (tmp < max_mbps)
15889+				target_mbps = tmp;
15890+			else {
15891+				DRM_DEV_ERROR(dsi->dev,
15892+					      "DPHY clock frequency is out of range\n");
15893+				target_mbps = max_mbps;
15894+			}
15895+		}
15896 	}
15897 
15898 	/* for external phy only a the mipi_dphy_config is necessary */
15899 	if (dsi->phy) {
15900-		phy_mipi_dphy_get_default_config(mode->clock * 1000 * 10 / 8,
15901+		target_pclk = DIV_ROUND_CLOSEST_ULL(target_mbps * lanes, bpp);
15902+		phy_mipi_dphy_get_default_config(target_pclk * USEC_PER_SEC,
15903 						 bpp, lanes,
15904 						 &dsi->phy_opts.mipi_dphy);
15905-		dsi->lane_mbps = target_mbps;
15906+		ret = phy_set_mode(dsi->phy, PHY_MODE_MIPI_DPHY);
15907+		if (ret) {
15908+			DRM_DEV_ERROR(dsi->dev,
15909+				      "failed to set phy mode: %d\n", ret);
15910+			return ret;
15911+		}
15912+
15913+		phy_configure(dsi->phy, &dsi->phy_opts);
15914+		hs_clk_rate = dsi->phy_opts.mipi_dphy.hs_clk_rate;
15915+		dsi->lane_mbps = DIV_ROUND_UP(hs_clk_rate, USEC_PER_SEC);
15916 		*lane_mbps = dsi->lane_mbps;
15917 
15918 		return 0;
15919@@ -611,74 +683,18 @@ struct hstt {
15920 	struct dw_mipi_dsi_dphy_timing timing;
15921 };
15922 
15923-#define HSTT(_maxfreq, _c_lp2hs, _c_hs2lp, _d_lp2hs, _d_hs2lp)	\
15924-{					\
15925-	.maxfreq = _maxfreq,		\
15926-	.timing = {			\
15927-		.clk_lp2hs = _c_lp2hs,	\
15928-		.clk_hs2lp = _c_hs2lp,	\
15929-		.data_lp2hs = _d_lp2hs,	\
15930-		.data_hs2lp = _d_hs2lp,	\
15931-	}				\
15932-}
15933-
15934-/* Table A-3 High-Speed Transition Times */
15935-struct hstt hstt_table[] = {
15936-	HSTT(  90,  32, 20,  26, 13),
15937-	HSTT( 100,  35, 23,  28, 14),
15938-	HSTT( 110,  32, 22,  26, 13),
15939-	HSTT( 130,  31, 20,  27, 13),
15940-	HSTT( 140,  33, 22,  26, 14),
15941-	HSTT( 150,  33, 21,  26, 14),
15942-	HSTT( 170,  32, 20,  27, 13),
15943-	HSTT( 180,  36, 23,  30, 15),
15944-	HSTT( 200,  40, 22,  33, 15),
15945-	HSTT( 220,  40, 22,  33, 15),
15946-	HSTT( 240,  44, 24,  36, 16),
15947-	HSTT( 250,  48, 24,  38, 17),
15948-	HSTT( 270,  48, 24,  38, 17),
15949-	HSTT( 300,  50, 27,  41, 18),
15950-	HSTT( 330,  56, 28,  45, 18),
15951-	HSTT( 360,  59, 28,  48, 19),
15952-	HSTT( 400,  61, 30,  50, 20),
15953-	HSTT( 450,  67, 31,  55, 21),
15954-	HSTT( 500,  73, 31,  59, 22),
15955-	HSTT( 550,  79, 36,  63, 24),
15956-	HSTT( 600,  83, 37,  68, 25),
15957-	HSTT( 650,  90, 38,  73, 27),
15958-	HSTT( 700,  95, 40,  77, 28),
15959-	HSTT( 750, 102, 40,  84, 28),
15960-	HSTT( 800, 106, 42,  87, 30),
15961-	HSTT( 850, 113, 44,  93, 31),
15962-	HSTT( 900, 118, 47,  98, 32),
15963-	HSTT( 950, 124, 47, 102, 34),
15964-	HSTT(1000, 130, 49, 107, 35),
15965-	HSTT(1050, 135, 51, 111, 37),
15966-	HSTT(1100, 139, 51, 114, 38),
15967-	HSTT(1150, 146, 54, 120, 40),
15968-	HSTT(1200, 153, 57, 125, 41),
15969-	HSTT(1250, 158, 58, 130, 42),
15970-	HSTT(1300, 163, 58, 135, 44),
15971-	HSTT(1350, 168, 60, 140, 45),
15972-	HSTT(1400, 172, 64, 144, 47),
15973-	HSTT(1450, 176, 65, 148, 48),
15974-	HSTT(1500, 181, 66, 153, 50)
15975+struct dw_mipi_dsi_dphy_timing dphy_hstt = {
15976+	.clk_lp2hs = 0x40,
15977+	.clk_hs2lp = 0x40,
15978+	.data_lp2hs = 0x10,
15979+	.data_hs2lp = 0x14,
15980 };
15981 
15982 static int
15983 dw_mipi_dsi_phy_get_timing(void *priv_data, unsigned int lane_mbps,
15984 			   struct dw_mipi_dsi_dphy_timing *timing)
15985 {
15986-	int i;
15987-
15988-	for (i = 0; i < ARRAY_SIZE(hstt_table); i++)
15989-		if (lane_mbps < hstt_table[i].maxfreq)
15990-			break;
15991-
15992-	if (i == ARRAY_SIZE(hstt_table))
15993-		i--;
15994-
15995-	*timing = hstt_table[i].timing;
15996+	*timing = dphy_hstt;
15997 
15998 	return 0;
15999 }
16000@@ -691,26 +707,25 @@ static const struct dw_mipi_dsi_phy_ops dw_mipi_dsi_rockchip_phy_ops = {
16001 	.get_timing = dw_mipi_dsi_phy_get_timing,
16002 };
16003 
16004-static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi)
16005+static void dw_mipi_dsi_rockchip_vop_routing(struct dw_mipi_dsi_rockchip *dsi)
16006 {
16007-	if (dsi->cdata->lanecfg1_grf_reg)
16008-		regmap_write(dsi->grf_regmap, dsi->cdata->lanecfg1_grf_reg,
16009-					      dsi->cdata->lanecfg1);
16010+	int mux;
16011 
16012-	if (dsi->cdata->lanecfg2_grf_reg)
16013-		regmap_write(dsi->grf_regmap, dsi->cdata->lanecfg2_grf_reg,
16014-					      dsi->cdata->lanecfg2);
16015+	mux = drm_of_encoder_active_endpoint_id(dsi->dev->of_node,
16016+						&dsi->encoder);
16017+	if (mux < 0)
16018+		return;
16019 
16020-	if (dsi->cdata->enable_grf_reg)
16021-		regmap_write(dsi->grf_regmap, dsi->cdata->enable_grf_reg,
16022-					      dsi->cdata->enable);
16023-}
16024+	if (dsi->cdata->lcdsel_grf_reg) {
16025+		regmap_write(dsi->grf_regmap, dsi->cdata->lcdsel_grf_reg,
16026+			mux ? dsi->cdata->lcdsel_lit : dsi->cdata->lcdsel_big);
16027 
16028-static void dw_mipi_dsi_rockchip_set_lcdsel(struct dw_mipi_dsi_rockchip *dsi,
16029-					    int mux)
16030-{
16031-	regmap_write(dsi->grf_regmap, dsi->cdata->lcdsel_grf_reg,
16032-		mux ? dsi->cdata->lcdsel_lit : dsi->cdata->lcdsel_big);
16033+		if (dsi->slave && dsi->slave->cdata->lcdsel_grf_reg)
16034+			regmap_write(dsi->slave->grf_regmap,
16035+				     dsi->slave->cdata->lcdsel_grf_reg,
16036+				     mux ? dsi->slave->cdata->lcdsel_lit :
16037+				     dsi->slave->cdata->lcdsel_big);
16038+	}
16039 }
16040 
16041 static int
16042@@ -720,6 +735,8 @@ dw_mipi_dsi_encoder_atomic_check(struct drm_encoder *encoder,
16043 {
16044 	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
16045 	struct dw_mipi_dsi_rockchip *dsi = to_dsi(encoder);
16046+	struct drm_connector *connector = conn_state->connector;
16047+	struct drm_display_info *info = &connector->display_info;
16048 
16049 	switch (dsi->format) {
16050 	case MIPI_DSI_FMT_RGB888:
16051@@ -736,9 +753,42 @@ dw_mipi_dsi_encoder_atomic_check(struct drm_encoder *encoder,
16052 		return -EINVAL;
16053 	}
16054 
16055+	if (info->num_bus_formats)
16056+		s->bus_format = info->bus_formats[0];
16057+	else
16058+		s->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
16059+
16060+	/* rk356x series drive mipi pixdata on posedge */
16061+	if (dsi->cdata->soc_type == RK3568) {
16062+		s->bus_flags &= ~DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
16063+		s->bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
16064+	}
16065+
16066 	s->output_type = DRM_MODE_CONNECTOR_DSI;
16067-	if (dsi->slave)
16068-		s->output_flags = ROCKCHIP_OUTPUT_DSI_DUAL;
16069+	s->color_space = V4L2_COLORSPACE_DEFAULT;
16070+	s->output_if = dsi->id ? VOP_OUTPUT_IF_MIPI1 : VOP_OUTPUT_IF_MIPI0;
16071+	if (dsi->slave) {
16072+		s->output_flags |= ROCKCHIP_OUTPUT_DUAL_CHANNEL_LEFT_RIGHT_MODE;
16073+		s->output_if |= VOP_OUTPUT_IF_MIPI1;
16074+	}
16075+
16076+	/* dual link dsi for rk3399 */
16077+	if (dsi->id && dsi->cdata->soc_type == RK3399)
16078+		s->output_flags |= ROCKCHIP_OUTPUT_DATA_SWAP;
16079+
16080+	if (dsi->dsc_enable) {
16081+		s->dsc_enable = 1;
16082+		s->dsc_sink_cap.version_major = dsi->version_major;
16083+		s->dsc_sink_cap.version_minor = dsi->version_minor;
16084+		s->dsc_sink_cap.slice_width = dsi->slice_width;
16085+		s->dsc_sink_cap.slice_height = dsi->slice_height;
16086+		/* only can support rgb888 panel now */
16087+		s->dsc_sink_cap.target_bits_per_pixel_x16 = 8 << 4;
16088+		s->dsc_sink_cap.block_pred = dsi->block_pred_enable;
16089+		s->dsc_sink_cap.native_420 = 0;
16090+
16091+		memcpy(&s->pps, dsi->pps, sizeof(struct drm_dsc_picture_parameter_set));
16092+	}
16093 
16094 	return 0;
16095 }
16096@@ -746,42 +796,43 @@ dw_mipi_dsi_encoder_atomic_check(struct drm_encoder *encoder,
16097 static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder)
16098 {
16099 	struct dw_mipi_dsi_rockchip *dsi = to_dsi(encoder);
16100-	int ret, mux;
16101 
16102-	mux = drm_of_encoder_active_endpoint_id(dsi->dev->of_node,
16103-						&dsi->encoder);
16104-	if (mux < 0)
16105-		return;
16106+	dw_mipi_dsi_rockchip_vop_routing(dsi);
16107+}
16108 
16109-	pm_runtime_get_sync(dsi->dev);
16110-	if (dsi->slave)
16111-		pm_runtime_get_sync(dsi->slave->dev);
16112+static void dw_mipi_dsi_encoder_disable(struct drm_encoder *encoder)
16113+{
16114+}
16115 
16116-	/*
16117-	 * For the RK3399, the clk of grf must be enabled before writing grf
16118-	 * register. And for RK3288 or other soc, this grf_clk must be NULL,
16119-	 * the clk_prepare_enable return true directly.
16120-	 */
16121-	ret = clk_prepare_enable(dsi->grf_clk);
16122-	if (ret) {
16123-		DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret);
16124-		return;
16125+static void dw_mipi_dsi_rockchip_loader_protect(struct dw_mipi_dsi_rockchip *dsi, bool on)
16126+{
16127+	if (on) {
16128+		pm_runtime_get_sync(dsi->dev);
16129+		phy_init(dsi->phy);
16130+		dsi->phy_enabled = true;
16131+		if (dsi->phy)
16132+			dsi->phy->power_count++;
16133+	} else {
16134+		pm_runtime_put(dsi->dev);
16135+		phy_exit(dsi->phy);
16136+		dsi->phy_enabled = false;
16137+		if (dsi->phy)
16138+			dsi->phy->power_count--;
16139 	}
16140 
16141-	dw_mipi_dsi_rockchip_set_lcdsel(dsi, mux);
16142 	if (dsi->slave)
16143-		dw_mipi_dsi_rockchip_set_lcdsel(dsi->slave, mux);
16144-
16145-	clk_disable_unprepare(dsi->grf_clk);
16146+		dw_mipi_dsi_rockchip_loader_protect(dsi->slave, on);
16147 }
16148 
16149-static void dw_mipi_dsi_encoder_disable(struct drm_encoder *encoder)
16150+static void dw_mipi_dsi_rockchip_encoder_loader_protect(struct drm_encoder *encoder,
16151+					      bool on)
16152 {
16153 	struct dw_mipi_dsi_rockchip *dsi = to_dsi(encoder);
16154 
16155-	if (dsi->slave)
16156-		pm_runtime_put(dsi->slave->dev);
16157-	pm_runtime_put(dsi->dev);
16158+	if (dsi->panel)
16159+		panel_simple_loader_protect(dsi->panel);
16160+
16161+	dw_mipi_dsi_rockchip_loader_protect(dsi, on);
16162 }
16163 
16164 static const struct drm_encoder_helper_funcs
16165@@ -797,8 +848,8 @@ static int rockchip_dsi_drm_create_encoder(struct dw_mipi_dsi_rockchip *dsi,
16166 	struct drm_encoder *encoder = &dsi->encoder;
16167 	int ret;
16168 
16169-	encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
16170-							     dsi->dev->of_node);
16171+	encoder->possible_crtcs = rockchip_drm_of_find_possible_crtcs(drm_dev,
16172+								      dsi->dev->of_node);
16173 
16174 	ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_DSI);
16175 	if (ret) {
16176@@ -814,61 +865,90 @@ static int rockchip_dsi_drm_create_encoder(struct dw_mipi_dsi_rockchip *dsi,
16177 static struct device
16178 *dw_mipi_dsi_rockchip_find_second(struct dw_mipi_dsi_rockchip *dsi)
16179 {
16180-	const struct of_device_id *match;
16181-	struct device_node *node = NULL, *local;
16182-
16183-	match = of_match_device(dsi->dev->driver->of_match_table, dsi->dev);
16184-
16185-	local = of_graph_get_remote_node(dsi->dev->of_node, 1, 0);
16186-	if (!local)
16187-		return NULL;
16188-
16189-	while ((node = of_find_compatible_node(node, NULL,
16190-					       match->compatible))) {
16191-		struct device_node *remote;
16192+	struct device_node *node = NULL;
16193+	struct platform_device *pdev;
16194+	struct dw_mipi_dsi_rockchip *dsi2;
16195+
16196+	node = of_parse_phandle(dsi->dev->of_node, "rockchip,dual-channel", 0);
16197+	if (node) {
16198+		pdev = of_find_device_by_node(node);
16199+		if (!pdev)
16200+			return ERR_PTR(-EPROBE_DEFER);
16201+
16202+		dsi2 = platform_get_drvdata(pdev);
16203+		if (!dsi2) {
16204+			platform_device_put(pdev);
16205+			return ERR_PTR(-EPROBE_DEFER);
16206+		}
16207 
16208-		/* found ourself */
16209-		if (node == dsi->dev->of_node)
16210-			continue;
16211+		return &pdev->dev;
16212+	}
16213 
16214-		remote = of_graph_get_remote_node(node, 1, 0);
16215-		if (!remote)
16216-			continue;
16217+	return NULL;
16218+}
16219 
16220-		/* same display device in port1-ep0 for both */
16221-		if (remote == local) {
16222-			struct dw_mipi_dsi_rockchip *dsi2;
16223-			struct platform_device *pdev;
16224+static int dw_mipi_dsi_get_dsc_info_from_sink(struct dw_mipi_dsi_rockchip *dsi,
16225+					      struct drm_panel *panel,
16226+					      struct drm_bridge *bridge)
16227+{
16228+	struct drm_dsc_picture_parameter_set *pps = NULL;
16229+	struct device_node *np = NULL;
16230+	struct cmd_header *header;
16231+	const void *data;
16232+	char *d;
16233+	uint8_t *dsc_packed_pps;
16234+	int len;
16235+
16236+	if (!panel && !bridge)
16237+		return -ENODEV;
16238+
16239+	if (panel)
16240+		np = panel->dev->of_node;
16241+	else
16242+		np = bridge->of_node;
16243+
16244+	dsi->c_option = of_property_read_bool(np, "phy-c-option");
16245+	dsi->scrambling_en = of_property_read_bool(np, "scrambling-enable");
16246+	dsi->dsc_enable = of_property_read_bool(np, "compressed-data");
16247+	dsi->block_pred_enable = of_property_read_bool(np, "blk-pred-enable");
16248+	of_property_read_u32(np, "slice-width", &dsi->slice_width);
16249+	of_property_read_u32(np, "slice-height", &dsi->slice_height);
16250+	of_property_read_u32(np, "slice-per-pkt", &dsi->slice_per_pkt);
16251+	of_property_read_u8(np, "version-major", &dsi->version_major);
16252+	of_property_read_u8(np, "version-minor", &dsi->version_minor);
16253+
16254+	data = of_get_property(np, "panel-init-sequence", &len);
16255+	if (!data)
16256+		return -EINVAL;
16257 
16258-			pdev = of_find_device_by_node(node);
16259+	d = devm_kmemdup(dsi->dev, data, len, GFP_KERNEL);
16260+	if (!d)
16261+		return -ENOMEM;
16262 
16263-			/*
16264-			 * we have found the second, so will either return it
16265-			 * or return with an error. In any case won't need the
16266-			 * nodes anymore nor continue the loop.
16267-			 */
16268-			of_node_put(remote);
16269-			of_node_put(node);
16270-			of_node_put(local);
16271+	while (len > sizeof(*header)) {
16272+		header = (struct cmd_header *)d;
16273+		d += sizeof(*header);
16274+		len -= sizeof(*header);
16275 
16276-			if (!pdev)
16277-				return ERR_PTR(-EPROBE_DEFER);
16278+		if (header->payload_length > len)
16279+			return -EINVAL;
16280 
16281-			dsi2 = platform_get_drvdata(pdev);
16282-			if (!dsi2) {
16283-				platform_device_put(pdev);
16284-				return ERR_PTR(-EPROBE_DEFER);
16285-			}
16286+		if (header->cmd_type == MIPI_DSI_PICTURE_PARAMETER_SET) {
16287+			dsc_packed_pps = devm_kmemdup(dsi->dev, d,
16288+						      header->payload_length, GFP_KERNEL);
16289+			if (!dsc_packed_pps)
16290+				return -ENOMEM;
16291 
16292-			return &pdev->dev;
16293+			pps = (struct drm_dsc_picture_parameter_set *)dsc_packed_pps;
16294+			break;
16295 		}
16296 
16297-		of_node_put(remote);
16298+		d += header->payload_length;
16299+		len -= header->payload_length;
16300 	}
16301+	dsi->pps = pps;
16302 
16303-	of_node_put(local);
16304-
16305-	return NULL;
16306+	return 0;
16307 }
16308 
16309 static int dw_mipi_dsi_rockchip_bind(struct device *dev,
16310@@ -878,7 +958,6 @@ static int dw_mipi_dsi_rockchip_bind(struct device *dev,
16311 	struct dw_mipi_dsi_rockchip *dsi = dev_get_drvdata(dev);
16312 	struct drm_device *drm_dev = data;
16313 	struct device *second;
16314-	bool master1, master2;
16315 	int ret;
16316 
16317 	second = dw_mipi_dsi_rockchip_find_second(dsi);
16318@@ -886,27 +965,7 @@ static int dw_mipi_dsi_rockchip_bind(struct device *dev,
16319 		return PTR_ERR(second);
16320 
16321 	if (second) {
16322-		master1 = of_property_read_bool(dsi->dev->of_node,
16323-						"clock-master");
16324-		master2 = of_property_read_bool(second->of_node,
16325-						"clock-master");
16326-
16327-		if (master1 && master2) {
16328-			DRM_DEV_ERROR(dsi->dev, "only one clock-master allowed\n");
16329-			return -EINVAL;
16330-		}
16331-
16332-		if (!master1 && !master2) {
16333-			DRM_DEV_ERROR(dsi->dev, "no clock-master defined\n");
16334-			return -EINVAL;
16335-		}
16336-
16337 		/* we are the slave in dual-DSI */
16338-		if (!master1) {
16339-			dsi->is_slave = true;
16340-			return 0;
16341-		}
16342-
16343 		dsi->slave = dev_get_drvdata(second);
16344 		if (!dsi->slave) {
16345 			DRM_DEV_ERROR(dev, "could not get slaves data\n");
16346@@ -918,30 +977,15 @@ static int dw_mipi_dsi_rockchip_bind(struct device *dev,
16347 		put_device(second);
16348 	}
16349 
16350+	if (dsi->is_slave)
16351+		return 0;
16352+
16353 	ret = clk_prepare_enable(dsi->pllref_clk);
16354 	if (ret) {
16355 		DRM_DEV_ERROR(dev, "Failed to enable pllref_clk: %d\n", ret);
16356 		return ret;
16357 	}
16358 
16359-	/*
16360-	 * With the GRF clock running, write lane and dual-mode configurations
16361-	 * that won't change immediately. If we waited until enable() to do
16362-	 * this, things like panel preparation would not be able to send
16363-	 * commands over DSI.
16364-	 */
16365-	ret = clk_prepare_enable(dsi->grf_clk);
16366-	if (ret) {
16367-		DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret);
16368-		return ret;
16369-	}
16370-
16371-	dw_mipi_dsi_rockchip_config(dsi);
16372-	if (dsi->slave)
16373-		dw_mipi_dsi_rockchip_config(dsi->slave);
16374-
16375-	clk_disable_unprepare(dsi->grf_clk);
16376-
16377 	ret = rockchip_dsi_drm_create_encoder(dsi, drm_dev);
16378 	if (ret) {
16379 		DRM_DEV_ERROR(dev, "Failed to create drm encoder\n");
16380@@ -954,6 +998,20 @@ static int dw_mipi_dsi_rockchip_bind(struct device *dev,
16381 		return ret;
16382 	}
16383 
16384+	ret = drm_of_find_panel_or_bridge(dsi->dev->of_node, 1, 0,
16385+					  &dsi->panel, NULL);
16386+	if (ret)
16387+		dev_err(dsi->dev, "failed to find panel\n");
16388+
16389+	dw_mipi_dsi_get_dsc_info_from_sink(dsi, dsi->panel, NULL);
16390+
16391+	dsi->sub_dev.connector = dw_mipi_dsi_get_connector(dsi->dmd);
16392+	if (dsi->sub_dev.connector) {
16393+		dsi->sub_dev.of_node = dev->of_node;
16394+		dsi->sub_dev.loader_protect = dw_mipi_dsi_rockchip_encoder_loader_protect;
16395+		rockchip_drm_register_sub_dev(&dsi->sub_dev);
16396+	}
16397+
16398 	return 0;
16399 }
16400 
16401@@ -966,6 +1024,9 @@ static void dw_mipi_dsi_rockchip_unbind(struct device *dev,
16402 	if (dsi->is_slave)
16403 		return;
16404 
16405+	if (dsi->sub_dev.connector)
16406+		rockchip_drm_unregister_sub_dev(&dsi->sub_dev);
16407+
16408 	dw_mipi_dsi_unbind(dsi->dmd);
16409 
16410 	clk_disable_unprepare(dsi->pllref_clk);
16411@@ -1051,6 +1112,7 @@ static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev)
16412 	while (cdata[i].reg) {
16413 		if (cdata[i].reg == res->start) {
16414 			dsi->cdata = &cdata[i];
16415+			dsi->id = i;
16416 			break;
16417 		}
16418 
16419@@ -1070,6 +1132,13 @@ static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev)
16420 		return ret;
16421 	}
16422 
16423+	dsi->pclk = devm_clk_get(dev, "pclk");
16424+	if (IS_ERR(dsi->pclk)) {
16425+		ret = PTR_ERR(dsi->pclk);
16426+		dev_err(dev, "Unable to get pclk: %d\n", ret);
16427+		return ret;
16428+	}
16429+
16430 	dsi->pllref_clk = devm_clk_get(dev, "ref");
16431 	if (IS_ERR(dsi->pllref_clk)) {
16432 		if (dsi->phy) {
16433@@ -1106,6 +1175,15 @@ static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev)
16434 		}
16435 	}
16436 
16437+	if (dsi->cdata->flags & DW_MIPI_NEEDS_HCLK) {
16438+		dsi->hclk = devm_clk_get(dev, "hclk");
16439+		if (IS_ERR(dsi->hclk)) {
16440+			ret = PTR_ERR(dsi->hclk);
16441+			DRM_DEV_ERROR(dev, "Unable to get hclk: %d\n", ret);
16442+			return ret;
16443+		}
16444+	}
16445+
16446 	dsi->grf_regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
16447 	if (IS_ERR(dsi->grf_regmap)) {
16448 		DRM_DEV_ERROR(dsi->dev, "Unable to get rockchip,grf\n");
16449@@ -1140,11 +1218,43 @@ static int dw_mipi_dsi_rockchip_remove(struct platform_device *pdev)
16450 {
16451 	struct dw_mipi_dsi_rockchip *dsi = platform_get_drvdata(pdev);
16452 
16453+	if (dsi->devcnt == 0)
16454+		component_del(dsi->dev, &dw_mipi_dsi_rockchip_ops);
16455+
16456 	dw_mipi_dsi_remove(dsi->dmd);
16457 
16458 	return 0;
16459 }
16460 
16461+static __maybe_unused int dw_mipi_dsi_runtime_suspend(struct device *dev)
16462+{
16463+	struct dw_mipi_dsi_rockchip *dsi = dev_get_drvdata(dev);
16464+
16465+	clk_disable_unprepare(dsi->grf_clk);
16466+	clk_disable_unprepare(dsi->pclk);
16467+	clk_disable_unprepare(dsi->hclk);
16468+	clk_disable_unprepare(dsi->phy_cfg_clk);
16469+
16470+	return 0;
16471+}
16472+
16473+static __maybe_unused int dw_mipi_dsi_runtime_resume(struct device *dev)
16474+{
16475+	struct dw_mipi_dsi_rockchip *dsi = dev_get_drvdata(dev);
16476+
16477+	clk_prepare_enable(dsi->phy_cfg_clk);
16478+	clk_prepare_enable(dsi->hclk);
16479+	clk_prepare_enable(dsi->pclk);
16480+	clk_prepare_enable(dsi->grf_clk);
16481+
16482+	return 0;
16483+}
16484+
16485+static const struct dev_pm_ops dw_mipi_dsi_rockchip_pm_ops = {
16486+	SET_RUNTIME_PM_OPS(dw_mipi_dsi_runtime_suspend,
16487+			   dw_mipi_dsi_runtime_resume, NULL)
16488+};
16489+
16490 static const struct rockchip_dw_dsi_chip_data px30_chip_data[] = {
16491 	{
16492 		.reg = 0xff450000,
16493@@ -1159,6 +1269,8 @@ static const struct rockchip_dw_dsi_chip_data px30_chip_data[] = {
16494 					     PX30_DSI_FORCETXSTOPMODE),
16495 
16496 		.max_data_lanes = 4,
16497+		.max_bit_rate_per_lane = 1000000000UL,
16498+		.soc_type = PX30,
16499 	},
16500 	{ /* sentinel */ }
16501 };
16502@@ -1171,6 +1283,8 @@ static const struct rockchip_dw_dsi_chip_data rk3288_chip_data[] = {
16503 		.lcdsel_lit = HIWORD_UPDATE(RK3288_DSI0_LCDC_SEL, RK3288_DSI0_LCDC_SEL),
16504 
16505 		.max_data_lanes = 4,
16506+		.max_bit_rate_per_lane = 1500000000UL,
16507+		.soc_type = RK3288,
16508 	},
16509 	{
16510 		.reg = 0xff964000,
16511@@ -1179,6 +1293,8 @@ static const struct rockchip_dw_dsi_chip_data rk3288_chip_data[] = {
16512 		.lcdsel_lit = HIWORD_UPDATE(RK3288_DSI1_LCDC_SEL, RK3288_DSI1_LCDC_SEL),
16513 
16514 		.max_data_lanes = 4,
16515+		.max_bit_rate_per_lane = 1500000000UL,
16516+		.soc_type = RK3288,
16517 	},
16518 	{ /* sentinel */ }
16519 };
16520@@ -1199,6 +1315,8 @@ static const struct rockchip_dw_dsi_chip_data rk3399_chip_data[] = {
16521 
16522 		.flags = DW_MIPI_NEEDS_PHY_CFG_CLK | DW_MIPI_NEEDS_GRF_CLK,
16523 		.max_data_lanes = 4,
16524+		.max_bit_rate_per_lane = 1500000000UL,
16525+		.soc_type = RK3399,
16526 	},
16527 	{
16528 		.reg = 0xff968000,
16529@@ -1225,6 +1343,38 @@ static const struct rockchip_dw_dsi_chip_data rk3399_chip_data[] = {
16530 
16531 		.flags = DW_MIPI_NEEDS_PHY_CFG_CLK | DW_MIPI_NEEDS_GRF_CLK,
16532 		.max_data_lanes = 4,
16533+		.max_bit_rate_per_lane = 1500000000UL,
16534+		.soc_type = RK3399,
16535+	},
16536+	{ /* sentinel */ }
16537+};
16538+
16539+static const struct rockchip_dw_dsi_chip_data rk3568_chip_data[] = {
16540+	{
16541+		.reg = 0xfe060000,
16542+
16543+		.lanecfg1_grf_reg = RK3568_GRF_VO_CON2,
16544+		.lanecfg1 = HIWORD_UPDATE(0, RK3568_DSI_TURNDISABLE |
16545+					     RK3568_DSI_FORCERXMODE |
16546+					     RK3568_DSI_FORCETXSTOPMODE),
16547+
16548+		.flags = DW_MIPI_NEEDS_HCLK,
16549+		.max_data_lanes = 4,
16550+		.max_bit_rate_per_lane = 1200000000UL,
16551+		.soc_type = RK3568,
16552+	},
16553+	{
16554+		.reg = 0xfe070000,
16555+
16556+		.lanecfg1_grf_reg = RK3568_GRF_VO_CON3,
16557+		.lanecfg1 = HIWORD_UPDATE(0, RK3568_DSI_TURNDISABLE |
16558+					     RK3568_DSI_FORCERXMODE |
16559+					     RK3568_DSI_FORCETXSTOPMODE),
16560+
16561+		.flags = DW_MIPI_NEEDS_HCLK,
16562+		.max_data_lanes = 4,
16563+		.max_bit_rate_per_lane = 1200000000UL,
16564+		.soc_type = RK3568,
16565 	},
16566 	{ /* sentinel */ }
16567 };
16568@@ -1239,6 +1389,9 @@ static const struct of_device_id dw_mipi_dsi_rockchip_dt_ids[] = {
16569 	}, {
16570 	 .compatible = "rockchip,rk3399-mipi-dsi",
16571 	 .data = &rk3399_chip_data,
16572+	}, {
16573+	 .compatible = "rockchip,rk3568-mipi-dsi",
16574+	 .data = &rk3568_chip_data,
16575 	},
16576 	{ /* sentinel */ }
16577 };
16578@@ -1249,6 +1402,7 @@ struct platform_driver dw_mipi_dsi_rockchip_driver = {
16579 	.remove		= dw_mipi_dsi_rockchip_remove,
16580 	.driver		= {
16581 		.of_match_table = dw_mipi_dsi_rockchip_dt_ids,
16582+		.pm = &dw_mipi_dsi_rockchip_pm_ops,
16583 		.name	= "dw-mipi-dsi-rockchip",
16584 		/*
16585 		 * For dual-DSI display, one DSI pokes at the other DSI's
16586diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
16587index 23de359a1..430a35995 100644
16588--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
16589+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
16590@@ -4,21 +4,31 @@
16591  */
16592 
16593 #include <linux/clk.h>
16594+#include <linux/gpio/consumer.h>
16595 #include <linux/mfd/syscon.h>
16596 #include <linux/module.h>
16597 #include <linux/platform_device.h>
16598 #include <linux/phy/phy.h>
16599 #include <linux/regmap.h>
16600+#include <linux/pm_runtime.h>
16601 
16602+#include <drm/drm_of.h>
16603+#include <drm/drm_crtc_helper.h>
16604+#include <drm/drm_dsc.h>
16605+#include <drm/drm_edid.h>
16606 #include <drm/bridge/dw_hdmi.h>
16607 #include <drm/drm_edid.h>
16608 #include <drm/drm_of.h>
16609 #include <drm/drm_probe_helper.h>
16610 #include <drm/drm_simple_kms_helper.h>
16611 
16612+#include <uapi/linux/videodev2.h>
16613+
16614 #include "rockchip_drm_drv.h"
16615 #include "rockchip_drm_vop.h"
16616 
16617+#define HIWORD_UPDATE(val, mask)	(val | (mask) << 16)
16618+
16619 #define RK3228_GRF_SOC_CON2		0x0408
16620 #define RK3228_HDMI_SDAIN_MSK		BIT(14)
16621 #define RK3228_HDMI_SCLIN_MSK		BIT(13)
16622@@ -29,8 +39,11 @@
16623 
16624 #define RK3288_GRF_SOC_CON6		0x025C
16625 #define RK3288_HDMI_LCDC_SEL		BIT(4)
16626-#define RK3328_GRF_SOC_CON2		0x0408
16627+#define RK3288_GRF_SOC_CON16		0x03a8
16628+#define RK3288_HDMI_LCDC0_YUV420	BIT(2)
16629+#define RK3288_HDMI_LCDC1_YUV420	BIT(3)
16630 
16631+#define RK3328_GRF_SOC_CON2		0x0408
16632 #define RK3328_HDMI_SDAIN_MSK		BIT(11)
16633 #define RK3328_HDMI_SCLIN_MSK		BIT(10)
16634 #define RK3328_HDMI_HPD_IOE		BIT(2)
16635@@ -50,281 +63,2486 @@
16636 #define RK3399_GRF_SOC_CON20		0x6250
16637 #define RK3399_HDMI_LCDC_SEL		BIT(6)
16638 
16639-#define HIWORD_UPDATE(val, mask)	(val | (mask) << 16)
16640+#define RK3568_GRF_VO_CON1		0x0364
16641+#define RK3568_HDMI_SDAIN_MSK		BIT(15)
16642+#define RK3568_HDMI_SCLIN_MSK		BIT(14)
16643+
16644+#define RK3588_GRF_SOC_CON2		0x0308
16645+#define RK3588_HDMI1_HPD_INT_MSK	BIT(15)
16646+#define RK3588_HDMI1_HPD_INT_CLR	BIT(14)
16647+#define RK3588_HDMI0_HPD_INT_MSK	BIT(13)
16648+#define RK3588_HDMI0_HPD_INT_CLR	BIT(12)
16649+#define RK3588_GRF_SOC_CON7		0x031c
16650+#define RK3588_SET_HPD_PATH_MASK	(0x3 << 12)
16651+#define RK3588_GRF_SOC_STATUS1		0x0384
16652+#define RK3588_HDMI0_LOW_MORETHAN100MS	BIT(20)
16653+#define RK3588_HDMI0_HPD_PORT_LEVEL	BIT(19)
16654+#define RK3588_HDMI0_IHPD_PORT		BIT(18)
16655+#define RK3588_HDMI0_OHPD_INT		BIT(17)
16656+#define RK3588_HDMI0_LEVEL_INT		BIT(16)
16657+#define RK3588_HDMI0_INTR_CHANGE_CNT	(0x7 << 13)
16658+#define RK3588_HDMI1_LOW_MORETHAN100MS	BIT(28)
16659+#define RK3588_HDMI1_HPD_PORT_LEVEL	BIT(27)
16660+#define RK3588_HDMI1_IHPD_PORT		BIT(26)
16661+#define RK3588_HDMI1_OHPD_INT		BIT(25)
16662+#define RK3588_HDMI1_LEVEL_INT		BIT(24)
16663+#define RK3588_HDMI1_INTR_CHANGE_CNT	(0x7 << 21)
16664+
16665+#define RK3588_GRF_VO1_CON3		0x000c
16666+#define RK3588_COLOR_FORMAT_MASK	0xf
16667+#define RK3588_YUV444			0x2
16668+#define RK3588_YUV420			0x3
16669+#define RK3588_COMPRESSED_DATA		0xb
16670+#define RK3588_COLOR_DEPTH_MASK		(0xf << 4)
16671+#define RK3588_8BPC			(0x5 << 4)
16672+#define RK3588_10BPC			(0x6 << 4)
16673+#define RK3588_CECIN_MASK		BIT(8)
16674+#define RK3588_SCLIN_MASK		BIT(9)
16675+#define RK3588_SDAIN_MASK		BIT(10)
16676+#define RK3588_MODE_MASK		BIT(11)
16677+#define RK3588_COMPRESS_MODE_MASK	BIT(12)
16678+#define RK3588_I2S_SEL_MASK		BIT(13)
16679+#define RK3588_SPDIF_SEL_MASK		BIT(14)
16680+#define RK3588_GRF_VO1_CON4		0x0010
16681+#define RK3588_HDMI21_MASK		BIT(0)
16682+#define RK3588_GRF_VO1_CON9		0x0024
16683+#define RK3588_HDMI0_GRANT_SEL		BIT(10)
16684+#define RK3588_HDMI0_GRANT_SW		BIT(11)
16685+#define RK3588_HDMI1_GRANT_SEL		BIT(12)
16686+#define RK3588_HDMI1_GRANT_SW		BIT(13)
16687+#define RK3588_GRF_VO1_CON6		0x0018
16688+#define RK3588_GRF_VO1_CON7		0x001c
16689+
16690+#define RK_HDMI_COLORIMETRY_BT2020	(HDMI_COLORIMETRY_EXTENDED + \
16691+					 HDMI_EXTENDED_COLORIMETRY_BT2020)
16692+
16693+#define COLOR_DEPTH_10BIT		BIT(31)
16694+#define HDMI_FRL_MODE			BIT(30)
16695+#define HDMI_EARC_MODE			BIT(29)
16696+
16697+#define HDMI20_MAX_RATE			600000
16698+#define HDMI_8K60_RATE			2376000
16699 
16700 /**
16701  * struct rockchip_hdmi_chip_data - splite the grf setting of kind of chips
16702  * @lcdsel_grf_reg: grf register offset of lcdc select
16703+ * @ddc_en_reg: grf register offset of hdmi ddc enable
16704  * @lcdsel_big: reg value of selecting vop big for HDMI
16705  * @lcdsel_lit: reg value of selecting vop little for HDMI
16706  */
16707 struct rockchip_hdmi_chip_data {
16708 	int	lcdsel_grf_reg;
16709+	int	ddc_en_reg;
16710 	u32	lcdsel_big;
16711 	u32	lcdsel_lit;
16712 };
16713 
16714+/* HDMI output pixel format */
16715+enum drm_hdmi_output_type {
16716+	DRM_HDMI_OUTPUT_DEFAULT_RGB, /* default RGB */
16717+	DRM_HDMI_OUTPUT_YCBCR444, /* YCBCR 444 */
16718+	DRM_HDMI_OUTPUT_YCBCR422, /* YCBCR 422 */
16719+	DRM_HDMI_OUTPUT_YCBCR420, /* YCBCR 420 */
16720+	DRM_HDMI_OUTPUT_YCBCR_HQ, /* Highest subsampled YUV */
16721+	DRM_HDMI_OUTPUT_YCBCR_LQ, /* Lowest subsampled YUV */
16722+	DRM_HDMI_OUTPUT_INVALID, /* Guess what ? */
16723+};
16724+
16725+enum dw_hdmi_rockchip_color_depth {
16726+	ROCKCHIP_HDMI_DEPTH_8,
16727+	ROCKCHIP_HDMI_DEPTH_10,
16728+	ROCKCHIP_HDMI_DEPTH_12,
16729+	ROCKCHIP_HDMI_DEPTH_16,
16730+	ROCKCHIP_HDMI_DEPTH_420_10,
16731+	ROCKCHIP_HDMI_DEPTH_420_12,
16732+	ROCKCHIP_HDMI_DEPTH_420_16
16733+};
16734+
16735+enum hdmi_frl_rate_per_lane {
16736+	FRL_12G_PER_LANE = 12,
16737+	FRL_10G_PER_LANE = 10,
16738+	FRL_8G_PER_LANE = 8,
16739+	FRL_6G_PER_LANE = 6,
16740+	FRL_3G_PER_LANE = 3,
16741+};
16742+
16743 struct rockchip_hdmi {
16744 	struct device *dev;
16745 	struct regmap *regmap;
16746+	struct regmap *vo1_regmap;
16747 	struct drm_encoder encoder;
16748 	const struct rockchip_hdmi_chip_data *chip_data;
16749-	struct clk *vpll_clk;
16750+	struct clk *aud_clk;
16751+	struct clk *phyref_clk;
16752 	struct clk *grf_clk;
16753+	struct clk *hclk_vio;
16754+	struct clk *hclk_vo1;
16755+	struct clk *hclk_vop;
16756+	struct clk *hpd_clk;
16757+	struct clk *pclk;
16758+	struct clk *earc_clk;
16759+	struct clk *hdmitx_ref;
16760 	struct dw_hdmi *hdmi;
16761+	struct dw_hdmi_qp *hdmi_qp;
16762+
16763 	struct phy *phy;
16764+
16765+	u32 max_tmdsclk;
16766+	bool unsupported_yuv_input;
16767+	bool unsupported_deep_color;
16768+	bool skip_check_420_mode;
16769+	bool mode_changed;
16770+	u8 force_output;
16771+	u8 id;
16772+	bool hpd_stat;
16773+	bool is_hdmi_qp;
16774+
16775+	unsigned long bus_format;
16776+	unsigned long output_bus_format;
16777+	unsigned long enc_out_encoding;
16778+	int color_changed;
16779+	int hpd_irq;
16780+
16781+	struct drm_property *color_depth_property;
16782+	struct drm_property *hdmi_output_property;
16783+	struct drm_property *colordepth_capacity;
16784+	struct drm_property *outputmode_capacity;
16785+	struct drm_property *colorimetry_property;
16786+	struct drm_property *quant_range;
16787+	struct drm_property *hdr_panel_metadata_property;
16788+	struct drm_property *next_hdr_sink_data_property;
16789+	struct drm_property *output_hdmi_dvi;
16790+	struct drm_property *output_type_capacity;
16791+
16792+	struct drm_property_blob *hdr_panel_blob_ptr;
16793+	struct drm_property_blob *next_hdr_data_ptr;
16794+
16795+	unsigned int colordepth;
16796+	unsigned int colorimetry;
16797+	unsigned int hdmi_quant_range;
16798+	unsigned int phy_bus_width;
16799+	enum drm_hdmi_output_type hdmi_output;
16800+	struct rockchip_drm_sub_dev sub_dev;
16801+
16802+	u8 max_frl_rate_per_lane;
16803+	u8 max_lanes;
16804+	struct rockchip_drm_dsc_cap dsc_cap;
16805+	struct next_hdr_sink_data next_hdr_data;
16806+	struct dw_hdmi_link_config link_cfg;
16807+	struct gpio_desc *enable_gpio;
16808+
16809+	struct delayed_work work;
16810+	struct workqueue_struct *workqueue;
16811 };
16812 
16813 #define to_rockchip_hdmi(x)	container_of(x, struct rockchip_hdmi, x)
16814 
16815-static const struct dw_hdmi_mpll_config rockchip_mpll_cfg[] = {
16816-	{
16817-		27000000, {
16818-			{ 0x00b3, 0x0000},
16819-			{ 0x2153, 0x0000},
16820-			{ 0x40f3, 0x0000}
16821-		},
16822-	}, {
16823-		36000000, {
16824-			{ 0x00b3, 0x0000},
16825-			{ 0x2153, 0x0000},
16826-			{ 0x40f3, 0x0000}
16827-		},
16828-	}, {
16829-		40000000, {
16830-			{ 0x00b3, 0x0000},
16831-			{ 0x2153, 0x0000},
16832-			{ 0x40f3, 0x0000}
16833-		},
16834-	}, {
16835-		54000000, {
16836-			{ 0x0072, 0x0001},
16837-			{ 0x2142, 0x0001},
16838-			{ 0x40a2, 0x0001},
16839-		},
16840-	}, {
16841-		65000000, {
16842-			{ 0x0072, 0x0001},
16843-			{ 0x2142, 0x0001},
16844-			{ 0x40a2, 0x0001},
16845-		},
16846-	}, {
16847-		66000000, {
16848-			{ 0x013e, 0x0003},
16849-			{ 0x217e, 0x0002},
16850-			{ 0x4061, 0x0002}
16851-		},
16852-	}, {
16853-		74250000, {
16854-			{ 0x0072, 0x0001},
16855-			{ 0x2145, 0x0002},
16856-			{ 0x4061, 0x0002}
16857-		},
16858-	}, {
16859-		83500000, {
16860-			{ 0x0072, 0x0001},
16861-		},
16862-	}, {
16863-		108000000, {
16864-			{ 0x0051, 0x0002},
16865-			{ 0x2145, 0x0002},
16866-			{ 0x4061, 0x0002}
16867-		},
16868-	}, {
16869-		106500000, {
16870-			{ 0x0051, 0x0002},
16871-			{ 0x2145, 0x0002},
16872-			{ 0x4061, 0x0002}
16873-		},
16874-	}, {
16875-		146250000, {
16876-			{ 0x0051, 0x0002},
16877-			{ 0x2145, 0x0002},
16878-			{ 0x4061, 0x0002}
16879-		},
16880-	}, {
16881-		148500000, {
16882-			{ 0x0051, 0x0003},
16883-			{ 0x214c, 0x0003},
16884-			{ 0x4064, 0x0003}
16885-		},
16886-	}, {
16887-		~0UL, {
16888-			{ 0x00a0, 0x000a },
16889-			{ 0x2001, 0x000f },
16890-			{ 0x4002, 0x000f },
16891-		},
16892+/*
16893+ * There are some rates that would be ranged for better clock jitter at
16894+ * Chrome OS tree, like 25.175Mhz would range to 25.170732Mhz. But due
16895+ * to the clock is aglined to KHz in struct drm_display_mode, this would
16896+ * bring some inaccurate error if we still run the compute_n math, so
16897+ * let's just code an const table for it until we can actually get the
16898+ * right clock rate.
16899+ */
16900+static const struct dw_hdmi_audio_tmds_n rockchip_werid_tmds_n_table[] = {
16901+	/* 25176471 for 25.175 MHz = 428000000 / 17. */
16902+	{ .tmds = 25177000, .n_32k = 4352, .n_44k1 = 14994, .n_48k = 6528, },
16903+	/* 57290323 for 57.284 MHz */
16904+	{ .tmds = 57291000, .n_32k = 3968, .n_44k1 = 4557, .n_48k = 5952, },
16905+	/* 74437500 for 74.44 MHz = 297750000 / 4 */
16906+	{ .tmds = 74438000, .n_32k = 8192, .n_44k1 = 18816, .n_48k = 4096, },
16907+	/* 118666667 for 118.68 MHz */
16908+	{ .tmds = 118667000, .n_32k = 4224, .n_44k1 = 5292, .n_48k = 6336, },
16909+	/* 121714286 for 121.75 MHz */
16910+	{ .tmds = 121715000, .n_32k = 4480, .n_44k1 = 6174, .n_48k = 6272, },
16911+	/* 136800000 for 136.75 MHz */
16912+	{ .tmds = 136800000, .n_32k = 4096, .n_44k1 = 5684, .n_48k = 6144, },
16913+	/* End of table */
16914+	{ .tmds = 0,         .n_32k = 0,    .n_44k1 = 0,    .n_48k = 0, },
16915+};
16916+
16917+static const struct dw_hdmi_mpll_config rockchip_mpll_cfg[] = {
16918+	{
16919+		30666000, {
16920+			{ 0x00b3, 0x0000 },
16921+			{ 0x2153, 0x0000 },
16922+			{ 0x40f3, 0x0000 },
16923+		},
16924+	},  {
16925+		36800000, {
16926+			{ 0x00b3, 0x0000 },
16927+			{ 0x2153, 0x0000 },
16928+			{ 0x40a2, 0x0001 },
16929+		},
16930+	},  {
16931+		46000000, {
16932+			{ 0x00b3, 0x0000 },
16933+			{ 0x2142, 0x0001 },
16934+			{ 0x40a2, 0x0001 },
16935+		},
16936+	},  {
16937+		61333000, {
16938+			{ 0x0072, 0x0001 },
16939+			{ 0x2142, 0x0001 },
16940+			{ 0x40a2, 0x0001 },
16941+		},
16942+	},  {
16943+		73600000, {
16944+			{ 0x0072, 0x0001 },
16945+			{ 0x2142, 0x0001 },
16946+			{ 0x4061, 0x0002 },
16947+		},
16948+	},  {
16949+		92000000, {
16950+			{ 0x0072, 0x0001 },
16951+			{ 0x2145, 0x0002 },
16952+			{ 0x4061, 0x0002 },
16953+		},
16954+	},  {
16955+		122666000, {
16956+			{ 0x0051, 0x0002 },
16957+			{ 0x2145, 0x0002 },
16958+			{ 0x4061, 0x0002 },
16959+		},
16960+	},  {
16961+		147200000, {
16962+			{ 0x0051, 0x0002 },
16963+			{ 0x2145, 0x0002 },
16964+			{ 0x4064, 0x0003 },
16965+		},
16966+	},  {
16967+		184000000, {
16968+			{ 0x0051, 0x0002 },
16969+			{ 0x214c, 0x0003 },
16970+			{ 0x4064, 0x0003 },
16971+		},
16972+	},  {
16973+		226666000, {
16974+			{ 0x0040, 0x0003 },
16975+			{ 0x214c, 0x0003 },
16976+			{ 0x4064, 0x0003 },
16977+		},
16978+	},  {
16979+		272000000, {
16980+			{ 0x0040, 0x0003 },
16981+			{ 0x214c, 0x0003 },
16982+			{ 0x5a64, 0x0003 },
16983+		},
16984+	},  {
16985+		340000000, {
16986+			{ 0x0040, 0x0003 },
16987+			{ 0x3b4c, 0x0003 },
16988+			{ 0x5a64, 0x0003 },
16989+		},
16990+	},  {
16991+		600000000, {
16992+			{ 0x1a40, 0x0003 },
16993+			{ 0x3b4c, 0x0003 },
16994+			{ 0x5a64, 0x0003 },
16995+		},
16996+	},  {
16997+		~0UL, {
16998+			{ 0x0000, 0x0000 },
16999+			{ 0x0000, 0x0000 },
17000+			{ 0x0000, 0x0000 },
17001+		},
17002+	}
17003+};
17004+
17005+static const struct dw_hdmi_mpll_config rockchip_mpll_cfg_420[] = {
17006+	{
17007+		30666000, {
17008+			{ 0x00b7, 0x0000 },
17009+			{ 0x2157, 0x0000 },
17010+			{ 0x40f7, 0x0000 },
17011+		},
17012+	},  {
17013+		92000000, {
17014+			{ 0x00b7, 0x0000 },
17015+			{ 0x2143, 0x0001 },
17016+			{ 0x40a3, 0x0001 },
17017+		},
17018+	},  {
17019+		184000000, {
17020+			{ 0x0073, 0x0001 },
17021+			{ 0x2146, 0x0002 },
17022+			{ 0x4062, 0x0002 },
17023+		},
17024+	},  {
17025+		340000000, {
17026+			{ 0x0052, 0x0003 },
17027+			{ 0x214d, 0x0003 },
17028+			{ 0x4065, 0x0003 },
17029+		},
17030+	},  {
17031+		600000000, {
17032+			{ 0x0041, 0x0003 },
17033+			{ 0x3b4d, 0x0003 },
17034+			{ 0x5a65, 0x0003 },
17035+		},
17036+	},  {
17037+		~0UL, {
17038+			{ 0x0000, 0x0000 },
17039+			{ 0x0000, 0x0000 },
17040+			{ 0x0000, 0x0000 },
17041+		},
17042+	}
17043+};
17044+
17045+static const struct dw_hdmi_mpll_config rockchip_rk3288w_mpll_cfg_420[] = {
17046+	{
17047+		30666000, {
17048+			{ 0x00b7, 0x0000 },
17049+			{ 0x2157, 0x0000 },
17050+			{ 0x40f7, 0x0000 },
17051+		},
17052+	},  {
17053+		92000000, {
17054+			{ 0x00b7, 0x0000 },
17055+			{ 0x2143, 0x0001 },
17056+			{ 0x40a3, 0x0001 },
17057+		},
17058+	},  {
17059+		184000000, {
17060+			{ 0x0073, 0x0001 },
17061+			{ 0x2146, 0x0002 },
17062+			{ 0x4062, 0x0002 },
17063+		},
17064+	},  {
17065+		340000000, {
17066+			{ 0x0052, 0x0003 },
17067+			{ 0x214d, 0x0003 },
17068+			{ 0x4065, 0x0003 },
17069+		},
17070+	},  {
17071+		600000000, {
17072+			{ 0x0040, 0x0003 },
17073+			{ 0x3b4c, 0x0003 },
17074+			{ 0x5a65, 0x0003 },
17075+		},
17076+	},  {
17077+		~0UL, {
17078+			{ 0x0000, 0x0000 },
17079+			{ 0x0000, 0x0000 },
17080+			{ 0x0000, 0x0000 },
17081+		},
17082+	}
17083+};
17084+
17085+static const struct dw_hdmi_curr_ctrl rockchip_cur_ctr[] = {
17086+	/*      pixelclk    bpp8    bpp10   bpp12 */
17087+	{
17088+		600000000, { 0x0000, 0x0000, 0x0000 },
17089+	},  {
17090+		~0UL,      { 0x0000, 0x0000, 0x0000},
17091+	}
17092+};
17093+
17094+static struct dw_hdmi_phy_config rockchip_phy_config[] = {
17095+	/*pixelclk   symbol   term   vlev*/
17096+	{ 74250000,  0x8009, 0x0004, 0x0272},
17097+	{ 165000000, 0x802b, 0x0004, 0x0209},
17098+	{ 297000000, 0x8039, 0x0005, 0x028d},
17099+	{ 594000000, 0x8039, 0x0000, 0x019d},
17100+	{ ~0UL,	     0x0000, 0x0000, 0x0000},
17101+	{ ~0UL,      0x0000, 0x0000, 0x0000},
17102+};
17103+
17104+enum ROW_INDEX_BPP {
17105+	ROW_INDEX_6BPP = 0,
17106+	ROW_INDEX_8BPP,
17107+	ROW_INDEX_10BPP,
17108+	ROW_INDEX_12BPP,
17109+	ROW_INDEX_23BPP,
17110+	MAX_ROW_INDEX
17111+};
17112+
17113+enum COLUMN_INDEX_BPC {
17114+	COLUMN_INDEX_8BPC = 0,
17115+	COLUMN_INDEX_10BPC,
17116+	COLUMN_INDEX_12BPC,
17117+	COLUMN_INDEX_14BPC,
17118+	COLUMN_INDEX_16BPC,
17119+	MAX_COLUMN_INDEX
17120+};
17121+
17122+#define PPS_TABLE_LEN 8
17123+#define PPS_BPP_LEN 4
17124+#define PPS_BPC_LEN 2
17125+
17126+/* From DSC_v1.11 spec, rc_parameter_Set syntax element typically constant */
17127+static const u16 rc_buf_thresh[] = {
17128+	0x0e, 0x1c, 0x2a, 0x38, 0x46, 0x54, 0x62,
17129+	0x69, 0x70, 0x77, 0x79, 0x7b, 0x7d, 0x7e,
17130+};
17131+
17132+struct rc_parameters {
17133+	u16 initial_xmit_delay;
17134+	u16 initial_dec_delay;
17135+	u8 initial_scale_value;
17136+	u16 scale_increment_interval;
17137+	u16 scale_decrement_interval;
17138+	u8 first_line_bpg_offset;
17139+	u16 nfl_bpg_offset;
17140+	u16 slice_bpg_offset;
17141+	u16 initial_offset;
17142+	u16 final_offset;
17143+	u8 flatness_min_qp;
17144+	u8 flatness_max_qp;
17145+	u16 rc_model_size;
17146+	u8 rc_edge_factor;
17147+	u8 rc_quant_incr_limit0;
17148+	u8 rc_quant_incr_limit1;
17149+	u8 rc_tgt_offset_hi;
17150+	u8 rc_tgt_offset_lo;
17151+	struct drm_dsc_rc_range_parameters rc_range_params[DSC_NUM_BUF_RANGES];
17152+};
17153+
17154+struct pps_data {
17155+	u32 pic_width;
17156+	u32 pic_height;
17157+	u32 slice_width;
17158+	u32 slice_height;
17159+	bool convert_rgb;
17160+	u8 bpc;
17161+	u8 bpp;
17162+	u8 raw_pps[128];
17163+};
17164+
17165+/*
17166+ * Selected Rate Control Related Parameter Recommended Values
17167+ * from DSC_v1.11 spec & C Model release: DSC_model_20161212
17168+ */
17169+static struct pps_data pps_datas[PPS_TABLE_LEN] = {
17170+	{
17171+		/* 7680x4320/960X96 rgb 8bpc 12bpp */
17172+		7680, 4320, 960, 96, 1, 8, 192,
17173+		{
17174+			0x12, 0x00, 0x00, 0x8d, 0x30, 0xc0, 0x10, 0xe0,
17175+			0x1e, 0x00, 0x00, 0x60, 0x03, 0xc0, 0x05, 0xa0,
17176+			0x01, 0x55, 0x03, 0x90, 0x00, 0x0a, 0x05, 0xc9,
17177+			0x00, 0xa0, 0x00, 0x0f, 0x01, 0x44, 0x01, 0xaa,
17178+			0x08, 0x00, 0x10, 0xf4, 0x03, 0x0c, 0x20, 0x00,
17179+			0x06, 0x0b, 0x0b, 0x33, 0x0e, 0x1c, 0x2a, 0x38,
17180+			0x46, 0x54, 0x62, 0x69, 0x70, 0x77, 0x79, 0x7b,
17181+			0x7d, 0x7e, 0x00, 0x82, 0x00, 0xc0, 0x09, 0x00,
17182+			0x09, 0x7e, 0x19, 0xbc, 0x19, 0xba, 0x19, 0xf8,
17183+			0x1a, 0x38, 0x1a, 0x38, 0x1a, 0x76, 0x2a, 0x76,
17184+			0x2a, 0x76, 0x2a, 0x74, 0x3a, 0xb4, 0x52, 0xf4,
17185+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17186+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17187+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17188+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17189+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
17190+		},
17191+	},
17192+	{
17193+		/* 7680x4320/960X96 rgb 8bpc 11bpp */
17194+		7680, 4320, 960, 96, 1, 8, 176,
17195+		{
17196+			0x12, 0x00, 0x00, 0x8d, 0x30, 0xb0, 0x10, 0xe0,
17197+			0x1e, 0x00, 0x00, 0x60, 0x03, 0xc0, 0x05, 0x28,
17198+			0x01, 0x74, 0x03, 0x40, 0x00, 0x0f, 0x06, 0xe0,
17199+			0x00, 0x2d, 0x00, 0x0f, 0x01, 0x44, 0x01, 0x33,
17200+			0x0f, 0x00, 0x10, 0xf4, 0x03, 0x0c, 0x20, 0x00,
17201+			0x06, 0x0b, 0x0b, 0x33, 0x0e, 0x1c, 0x2a, 0x38,
17202+			0x46, 0x54, 0x62, 0x69, 0x70, 0x77, 0x79, 0x7b,
17203+			0x7d, 0x7e, 0x00, 0x82, 0x01, 0x00, 0x09, 0x40,
17204+			0x09, 0xbe, 0x19, 0xfc, 0x19, 0xfa, 0x19, 0xf8,
17205+			0x1a, 0x38, 0x1a, 0x38, 0x1a, 0x76, 0x2a, 0x76,
17206+			0x2a, 0x76, 0x2a, 0xb4, 0x3a, 0xb4, 0x52, 0xf4,
17207+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17208+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17209+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17210+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17211+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
17212+		},
17213+	},
17214+	{
17215+		/* 7680x4320/960X96 rgb 8bpc 10bpp */
17216+		7680, 4320, 960, 96, 1, 8, 160,
17217+		{
17218+			0x12, 0x00, 0x00, 0x8d, 0x30, 0xa0, 0x10, 0xe0,
17219+			0x1e, 0x00, 0x00, 0x60, 0x03, 0xc0, 0x04, 0xb0,
17220+			0x01, 0x9a, 0x02, 0xe0, 0x00, 0x19, 0x09, 0xb0,
17221+			0x00, 0x12, 0x00, 0x0f, 0x01, 0x44, 0x00, 0xbb,
17222+			0x16, 0x00, 0x10, 0xec, 0x03, 0x0c, 0x20, 0x00,
17223+			0x06, 0x0b, 0x0b, 0x33, 0x0e, 0x1c, 0x2a, 0x38,
17224+			0x46, 0x54, 0x62, 0x69, 0x70, 0x77, 0x79, 0x7b,
17225+			0x7d, 0x7e, 0x00, 0xc2, 0x01, 0x00, 0x09, 0x40,
17226+			0x09, 0xbe, 0x19, 0xfc, 0x19, 0xfa, 0x19, 0xf8,
17227+			0x1a, 0x38, 0x1a, 0x78, 0x1a, 0x76, 0x2a, 0xb6,
17228+			0x2a, 0xb6, 0x2a, 0xf4, 0x3a, 0xf4, 0x5b, 0x34,
17229+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17230+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17231+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17232+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17233+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
17234+		},
17235+	},
17236+	{
17237+		/* 7680x4320/960X96 rgb 8bpc 9bpp */
17238+		7680, 4320, 960, 96, 1, 8, 144,
17239+		{
17240+			0x12, 0x00, 0x00, 0x8d, 0x30, 0x90, 0x10, 0xe0,
17241+			0x1e, 0x00, 0x00, 0x60, 0x03, 0xc0, 0x04, 0x38,
17242+			0x01, 0xc7, 0x03, 0x16, 0x00, 0x1c, 0x08, 0xc7,
17243+			0x00, 0x10, 0x00, 0x0f, 0x01, 0x44, 0x00, 0xaa,
17244+			0x17, 0x00, 0x10, 0xf1, 0x03, 0x0c, 0x20, 0x00,
17245+			0x06, 0x0b, 0x0b, 0x33, 0x0e, 0x1c, 0x2a, 0x38,
17246+			0x46, 0x54, 0x62, 0x69, 0x70, 0x77, 0x79, 0x7b,
17247+			0x7d, 0x7e, 0x00, 0xc2, 0x01, 0x00, 0x09, 0x40,
17248+			0x09, 0xbe, 0x19, 0xfc, 0x19, 0xfa, 0x19, 0xf8,
17249+			0x1a, 0x38, 0x1a, 0x78, 0x1a, 0x76, 0x2a, 0xb6,
17250+			0x2a, 0xb6, 0x2a, 0xf4, 0x3a, 0xf4, 0x63, 0x74,
17251+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17252+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17253+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17254+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17255+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
17256+		},
17257+	},
17258+	{
17259+		/* 7680x4320/960X96 rgb 10bpc 12bpp */
17260+		7680, 4320, 960, 96, 1, 10, 192,
17261+		{
17262+			0x12, 0x00, 0x00, 0xad, 0x30, 0xc0, 0x10, 0xe0,
17263+			0x1e, 0x00, 0x00, 0x60, 0x03, 0xc0, 0x05, 0xa0,
17264+			0x01, 0x55, 0x03, 0x90, 0x00, 0x0a, 0x05, 0xc9,
17265+			0x00, 0xa0, 0x00, 0x0f, 0x01, 0x44, 0x01, 0xaa,
17266+			0x08, 0x00, 0x10, 0xf4, 0x07, 0x10, 0x20, 0x00,
17267+			0x06, 0x0f, 0x0f, 0x33, 0x0e, 0x1c, 0x2a, 0x38,
17268+			0x46, 0x54, 0x62, 0x69, 0x70, 0x77, 0x79, 0x7b,
17269+			0x7d, 0x7e, 0x01, 0x02, 0x11, 0x80, 0x22, 0x00,
17270+			0x22, 0x7e, 0x32, 0xbc, 0x32, 0xba, 0x3a, 0xf8,
17271+			0x3b, 0x38, 0x3b, 0x38, 0x3b, 0x76, 0x4b, 0x76,
17272+			0x4b, 0x76, 0x4b, 0x74, 0x5b, 0xb4, 0x73, 0xf4,
17273+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17274+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17275+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17276+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17277+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
17278+		},
17279+	},
17280+	{
17281+		/* 7680x4320/960X96 rgb 10bpc 11bpp */
17282+		7680, 4320, 960, 96, 1, 10, 176,
17283+		{
17284+			0x12, 0x00, 0x00, 0xad, 0x30, 0xb0, 0x10, 0xe0,
17285+			0x1e, 0x00, 0x00, 0x60, 0x03, 0xc0, 0x05, 0x28,
17286+			0x01, 0x74, 0x03, 0x40, 0x00, 0x0f, 0x06, 0xe0,
17287+			0x00, 0x2d, 0x00, 0x0f, 0x01, 0x44, 0x01, 0x33,
17288+			0x0f, 0x00, 0x10, 0xf4, 0x07, 0x10, 0x20, 0x00,
17289+			0x06, 0x0f, 0x0f, 0x33, 0x0e, 0x1c, 0x2a, 0x38,
17290+			0x46, 0x54, 0x62, 0x69, 0x70, 0x77, 0x79, 0x7b,
17291+			0x7d, 0x7e, 0x01, 0x42, 0x19, 0xc0, 0x2a, 0x40,
17292+			0x2a, 0xbe, 0x3a, 0xfc, 0x3a, 0xfa, 0x3a, 0xf8,
17293+			0x3b, 0x38, 0x3b, 0x38, 0x3b, 0x76, 0x4b, 0x76,
17294+			0x4b, 0x76, 0x4b, 0xb4, 0x5b, 0xb4, 0x73, 0xf4,
17295+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17296+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17297+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17298+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17299+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
17300+		},
17301+	},
17302+	{
17303+		/* 7680x4320/960X96 rgb 10bpc 10bpp */
17304+		7680, 4320, 960, 96, 1, 10, 160,
17305+		{
17306+			0x12, 0x00, 0x00, 0xad, 0x30, 0xa0, 0x10, 0xe0,
17307+			0x1e, 0x00, 0x00, 0x60, 0x03, 0xc0, 0x04, 0xb0,
17308+			0x01, 0x9a, 0x02, 0xe0, 0x00, 0x19, 0x09, 0xb0,
17309+			0x00, 0x12, 0x00, 0x0f, 0x01, 0x44, 0x00, 0xbb,
17310+			0x16, 0x00, 0x10, 0xec, 0x07, 0x10, 0x20, 0x00,
17311+			0x06, 0x0f, 0x0f, 0x33, 0x0e, 0x1c, 0x2a, 0x38,
17312+			0x46, 0x54, 0x62, 0x69, 0x70, 0x77, 0x79, 0x7b,
17313+			0x7d, 0x7e, 0x01, 0xc2, 0x22, 0x00, 0x2a, 0x40,
17314+			0x2a, 0xbe, 0x3a, 0xfc, 0x3a, 0xfa, 0x3a, 0xf8,
17315+			0x3b, 0x38, 0x3b, 0x78, 0x3b, 0x76, 0x4b, 0xb6,
17316+			0x4b, 0xb6, 0x4b, 0xf4, 0x63, 0xf4, 0x7c, 0x34,
17317+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17318+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17319+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17320+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17321+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
17322+		},
17323+	},
17324+	{
17325+		/* 7680x4320/960X96 rgb 10bpc 9bpp */
17326+		7680, 4320, 960, 96, 1, 10, 144,
17327+		{
17328+			0x12, 0x00, 0x00, 0xad, 0x30, 0x90, 0x10, 0xe0,
17329+			0x1e, 0x00, 0x00, 0x60, 0x03, 0xc0, 0x04, 0x38,
17330+			0x01, 0xc7, 0x03, 0x16, 0x00, 0x1c, 0x08, 0xc7,
17331+			0x00, 0x10, 0x00, 0x0f, 0x01, 0x44, 0x00, 0xaa,
17332+			0x17, 0x00, 0x10, 0xf1, 0x07, 0x10, 0x20, 0x00,
17333+			0x06, 0x0f, 0x0f, 0x33, 0x0e, 0x1c, 0x2a, 0x38,
17334+			0x46, 0x54, 0x62, 0x69, 0x70, 0x77, 0x79, 0x7b,
17335+			0x7d, 0x7e, 0x01, 0xc2, 0x22, 0x00, 0x2a, 0x40,
17336+			0x2a, 0xbe, 0x3a, 0xfc, 0x3a, 0xfa, 0x3a, 0xf8,
17337+			0x3b, 0x38, 0x3b, 0x78, 0x3b, 0x76, 0x4b, 0xb6,
17338+			0x4b, 0xb6, 0x4b, 0xf4, 0x63, 0xf4, 0x84, 0x74,
17339+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17340+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17341+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17342+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
17343+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
17344+		},
17345+	},
17346+};
17347+
17348+static bool hdmi_bus_fmt_is_rgb(unsigned int bus_format)
17349+{
17350+	switch (bus_format) {
17351+	case MEDIA_BUS_FMT_RGB888_1X24:
17352+	case MEDIA_BUS_FMT_RGB101010_1X30:
17353+	case MEDIA_BUS_FMT_RGB121212_1X36:
17354+	case MEDIA_BUS_FMT_RGB161616_1X48:
17355+		return true;
17356+
17357+	default:
17358+		return false;
17359+	}
17360+}
17361+
17362+static bool hdmi_bus_fmt_is_yuv444(unsigned int bus_format)
17363+{
17364+	switch (bus_format) {
17365+	case MEDIA_BUS_FMT_YUV8_1X24:
17366+	case MEDIA_BUS_FMT_YUV10_1X30:
17367+	case MEDIA_BUS_FMT_YUV12_1X36:
17368+	case MEDIA_BUS_FMT_YUV16_1X48:
17369+		return true;
17370+
17371+	default:
17372+		return false;
17373+	}
17374+}
17375+
17376+static bool hdmi_bus_fmt_is_yuv422(unsigned int bus_format)
17377+{
17378+	switch (bus_format) {
17379+	case MEDIA_BUS_FMT_UYVY8_1X16:
17380+	case MEDIA_BUS_FMT_UYVY10_1X20:
17381+	case MEDIA_BUS_FMT_UYVY12_1X24:
17382+		return true;
17383+
17384+	default:
17385+		return false;
17386+	}
17387+}
17388+
17389+static bool hdmi_bus_fmt_is_yuv420(unsigned int bus_format)
17390+{
17391+	switch (bus_format) {
17392+	case MEDIA_BUS_FMT_UYYVYY8_0_5X24:
17393+	case MEDIA_BUS_FMT_UYYVYY10_0_5X30:
17394+	case MEDIA_BUS_FMT_UYYVYY12_0_5X36:
17395+	case MEDIA_BUS_FMT_UYYVYY16_0_5X48:
17396+		return true;
17397+
17398+	default:
17399+	return false;
17400+	}
17401+}
17402+
17403+static int hdmi_bus_fmt_color_depth(unsigned int bus_format)
17404+{
17405+	switch (bus_format) {
17406+	case MEDIA_BUS_FMT_RGB888_1X24:
17407+	case MEDIA_BUS_FMT_YUV8_1X24:
17408+	case MEDIA_BUS_FMT_UYVY8_1X16:
17409+	case MEDIA_BUS_FMT_UYYVYY8_0_5X24:
17410+		return 8;
17411+
17412+	case MEDIA_BUS_FMT_RGB101010_1X30:
17413+	case MEDIA_BUS_FMT_YUV10_1X30:
17414+	case MEDIA_BUS_FMT_UYVY10_1X20:
17415+	case MEDIA_BUS_FMT_UYYVYY10_0_5X30:
17416+		return 10;
17417+
17418+	case MEDIA_BUS_FMT_RGB121212_1X36:
17419+	case MEDIA_BUS_FMT_YUV12_1X36:
17420+	case MEDIA_BUS_FMT_UYVY12_1X24:
17421+	case MEDIA_BUS_FMT_UYYVYY12_0_5X36:
17422+		return 12;
17423+
17424+	case MEDIA_BUS_FMT_RGB161616_1X48:
17425+	case MEDIA_BUS_FMT_YUV16_1X48:
17426+	case MEDIA_BUS_FMT_UYYVYY16_0_5X48:
17427+		return 16;
17428+
17429+	default:
17430+		return 0;
17431+	}
17432+}
17433+
17434+static unsigned int
17435+hdmi_get_tmdsclock(struct rockchip_hdmi *hdmi, unsigned long pixelclock)
17436+{
17437+	unsigned int tmdsclock = pixelclock;
17438+	unsigned int depth =
17439+		hdmi_bus_fmt_color_depth(hdmi->output_bus_format);
17440+
17441+	if (!hdmi_bus_fmt_is_yuv422(hdmi->output_bus_format)) {
17442+		switch (depth) {
17443+		case 16:
17444+			tmdsclock = pixelclock * 2;
17445+			break;
17446+		case 12:
17447+			tmdsclock = pixelclock * 3 / 2;
17448+			break;
17449+		case 10:
17450+			tmdsclock = pixelclock * 5 / 4;
17451+			break;
17452+		default:
17453+			break;
17454+		}
17455+	}
17456+
17457+	return tmdsclock;
17458+}
17459+
17460+static void hdmi_select_link_config(struct rockchip_hdmi *hdmi,
17461+				    struct drm_crtc_state *crtc_state,
17462+				    unsigned int tmdsclk)
17463+{
17464+	struct drm_display_mode *mode = &crtc_state->mode;
17465+	int max_lanes, max_rate_per_lane;
17466+	int max_dsc_lanes, max_dsc_rate_per_lane;
17467+	unsigned long max_frl_rate;
17468+
17469+	max_lanes = hdmi->max_lanes;
17470+	max_rate_per_lane = hdmi->max_frl_rate_per_lane;
17471+	max_frl_rate = max_lanes * max_rate_per_lane * 1000000;
17472+
17473+	hdmi->link_cfg.dsc_mode = false;
17474+	hdmi->link_cfg.frl_lanes = max_lanes;
17475+	hdmi->link_cfg.rate_per_lane = max_rate_per_lane;
17476+
17477+	if (!max_frl_rate || tmdsclk < HDMI20_MAX_RATE) {
17478+		dev_info(hdmi->dev, "use tmds mode\n");
17479+		hdmi->link_cfg.frl_mode = false;
17480+		return;
17481+	}
17482+
17483+	hdmi->link_cfg.frl_mode = true;
17484+
17485+	if (!hdmi->dsc_cap.v_1p2)
17486+		return;
17487+
17488+	max_dsc_lanes = hdmi->dsc_cap.max_lanes;
17489+	max_dsc_rate_per_lane =
17490+		hdmi->dsc_cap.max_frl_rate_per_lane;
17491+
17492+	if (mode->clock >= HDMI_8K60_RATE &&
17493+	    !hdmi_bus_fmt_is_yuv420(hdmi->bus_format) &&
17494+	    !hdmi_bus_fmt_is_yuv422(hdmi->bus_format)) {
17495+		hdmi->link_cfg.dsc_mode = true;
17496+		hdmi->link_cfg.frl_lanes = max_dsc_lanes;
17497+		hdmi->link_cfg.rate_per_lane = max_dsc_rate_per_lane;
17498+	} else {
17499+		hdmi->link_cfg.dsc_mode = false;
17500+		hdmi->link_cfg.frl_lanes = max_lanes;
17501+		hdmi->link_cfg.rate_per_lane = max_rate_per_lane;
17502+	}
17503+}
17504+
17505+/////////////////////////////////////////////////////////////////////////////////////
17506+
17507+static int hdmi_dsc_get_slice_height(int vactive)
17508+{
17509+	int slice_height;
17510+
17511+	/*
17512+	 * Slice Height determination : HDMI2.1 Section 7.7.5.2
17513+	 * Select smallest slice height >=96, that results in a valid PPS and
17514+	 * requires minimum padding lines required for final slice.
17515+	 *
17516+	 * Assumption : Vactive is even.
17517+	 */
17518+	for (slice_height = 96; slice_height <= vactive; slice_height += 2)
17519+		if (vactive % slice_height == 0)
17520+			return slice_height;
17521+
17522+	return 0;
17523+}
17524+
17525+static int hdmi_dsc_get_num_slices(struct rockchip_hdmi *hdmi,
17526+				   struct drm_crtc_state *crtc_state,
17527+				   int src_max_slices, int src_max_slice_width,
17528+				   int hdmi_max_slices, int hdmi_throughput)
17529+{
17530+/* Pixel rates in KPixels/sec */
17531+#define HDMI_DSC_PEAK_PIXEL_RATE		2720000
17532+/*
17533+ * Rates at which the source and sink are required to process pixels in each
17534+ * slice, can be two levels: either at least 340000KHz or at least 40000KHz.
17535+ */
17536+#define HDMI_DSC_MAX_ENC_THROUGHPUT_0		340000
17537+#define HDMI_DSC_MAX_ENC_THROUGHPUT_1		400000
17538+
17539+/* Spec limits the slice width to 2720 pixels */
17540+#define MAX_HDMI_SLICE_WIDTH			2720
17541+	int kslice_adjust;
17542+	int adjusted_clk_khz;
17543+	int min_slices;
17544+	int target_slices;
17545+	int max_throughput; /* max clock freq. in khz per slice */
17546+	int max_slice_width;
17547+	int slice_width;
17548+	int pixel_clock = crtc_state->mode.clock;
17549+
17550+	if (!hdmi_throughput)
17551+		return 0;
17552+
17553+	/*
17554+	 * Slice Width determination : HDMI2.1 Section 7.7.5.1
17555+	 * kslice_adjust factor for 4:2:0, and 4:2:2 formats is 0.5, where as
17556+	 * for 4:4:4 is 1.0. Multiplying these factors by 10 and later
17557+	 * dividing adjusted clock value by 10.
17558+	 */
17559+	if (hdmi_bus_fmt_is_yuv444(hdmi->output_bus_format) ||
17560+	    hdmi_bus_fmt_is_rgb(hdmi->output_bus_format))
17561+		kslice_adjust = 10;
17562+	else
17563+		kslice_adjust = 5;
17564+
17565+	/*
17566+	 * As per spec, the rate at which the source and the sink process
17567+	 * the pixels per slice are at two levels: at least 340Mhz or 400Mhz.
17568+	 * This depends upon the pixel clock rate and output formats
17569+	 * (kslice adjust).
17570+	 * If pixel clock * kslice adjust >= 2720MHz slices can be processed
17571+	 * at max 340MHz, otherwise they can be processed at max 400MHz.
17572+	 */
17573+
17574+	adjusted_clk_khz = DIV_ROUND_UP(kslice_adjust * pixel_clock, 10);
17575+
17576+	if (adjusted_clk_khz <= HDMI_DSC_PEAK_PIXEL_RATE)
17577+		max_throughput = HDMI_DSC_MAX_ENC_THROUGHPUT_0;
17578+	else
17579+		max_throughput = HDMI_DSC_MAX_ENC_THROUGHPUT_1;
17580+
17581+	/*
17582+	 * Taking into account the sink's capability for maximum
17583+	 * clock per slice (in MHz) as read from HF-VSDB.
17584+	 */
17585+	max_throughput = min(max_throughput, hdmi_throughput * 1000);
17586+
17587+	min_slices = DIV_ROUND_UP(adjusted_clk_khz, max_throughput);
17588+	max_slice_width = min(MAX_HDMI_SLICE_WIDTH, src_max_slice_width);
17589+
17590+	/*
17591+	 * Keep on increasing the num of slices/line, starting from min_slices
17592+	 * per line till we get such a number, for which the slice_width is
17593+	 * just less than max_slice_width. The slices/line selected should be
17594+	 * less than or equal to the max horizontal slices that the combination
17595+	 * of PCON encoder and HDMI decoder can support.
17596+	 */
17597+	do {
17598+		if (min_slices <= 1 && src_max_slices >= 1 && hdmi_max_slices >= 1)
17599+			target_slices = 1;
17600+		else if (min_slices <= 2 && src_max_slices >= 2 && hdmi_max_slices >= 2)
17601+			target_slices = 2;
17602+		else if (min_slices <= 4 && src_max_slices >= 4 && hdmi_max_slices >= 4)
17603+			target_slices = 4;
17604+		else if (min_slices <= 8 && src_max_slices >= 8 && hdmi_max_slices >= 8)
17605+			target_slices = 8;
17606+		else if (min_slices <= 12 && src_max_slices >= 12 && hdmi_max_slices >= 12)
17607+			target_slices = 12;
17608+		else if (min_slices <= 16 && src_max_slices >= 16 && hdmi_max_slices >= 16)
17609+			target_slices = 16;
17610+		else
17611+			return 0;
17612+
17613+		slice_width = DIV_ROUND_UP(crtc_state->mode.hdisplay, target_slices);
17614+		if (slice_width > max_slice_width)
17615+			min_slices = target_slices + 1;
17616+	} while (slice_width > max_slice_width);
17617+
17618+	return target_slices;
17619+}
17620+
17621+static int hdmi_dsc_slices(struct rockchip_hdmi *hdmi,
17622+			   struct drm_crtc_state *crtc_state)
17623+{
17624+	int hdmi_throughput = hdmi->dsc_cap.clk_per_slice;
17625+	int hdmi_max_slices = hdmi->dsc_cap.max_slices;
17626+	int rk_max_slices = 8;
17627+	int rk_max_slice_width = 2048;
17628+
17629+	return hdmi_dsc_get_num_slices(hdmi, crtc_state, rk_max_slices,
17630+				       rk_max_slice_width,
17631+				       hdmi_max_slices, hdmi_throughput);
17632+}
17633+
17634+static int
17635+hdmi_dsc_get_bpp(struct rockchip_hdmi *hdmi, int src_fractional_bpp,
17636+		 int slice_width, int num_slices, bool hdmi_all_bpp,
17637+		 int hdmi_max_chunk_bytes)
17638+{
17639+	int max_dsc_bpp, min_dsc_bpp;
17640+	int target_bytes;
17641+	bool bpp_found = false;
17642+	int bpp_decrement_x16;
17643+	int bpp_target;
17644+	int bpp_target_x16;
17645+
17646+	/*
17647+	 * Get min bpp and max bpp as per Table 7.23, in HDMI2.1 spec
17648+	 * Start with the max bpp and keep on decrementing with
17649+	 * fractional bpp, if supported by PCON DSC encoder
17650+	 *
17651+	 * for each bpp we check if no of bytes can be supported by HDMI sink
17652+	 */
17653+
17654+	/* only 9\10\12 bpp was tested */
17655+	min_dsc_bpp = 9;
17656+	max_dsc_bpp = 12;
17657+
17658+	/*
17659+	 * Taking into account if all dsc_all_bpp supported by HDMI2.1 sink
17660+	 * Section 7.7.34 : Source shall not enable compressed Video
17661+	 * Transport with bpp_target settings above 12 bpp unless
17662+	 * DSC_all_bpp is set to 1.
17663+	 */
17664+	if (!hdmi_all_bpp)
17665+		max_dsc_bpp = min(max_dsc_bpp, 12);
17666+
17667+	/*
17668+	 * The Sink has a limit of compressed data in bytes for a scanline,
17669+	 * as described in max_chunk_bytes field in HFVSDB block of edid.
17670+	 * The no. of bytes depend on the target bits per pixel that the
17671+	 * source configures. So we start with the max_bpp and calculate
17672+	 * the target_chunk_bytes. We keep on decrementing the target_bpp,
17673+	 * till we get the target_chunk_bytes just less than what the sink's
17674+	 * max_chunk_bytes, or else till we reach the min_dsc_bpp.
17675+	 *
17676+	 * The decrement is according to the fractional support from PCON DSC
17677+	 * encoder. For fractional BPP we use bpp_target as a multiple of 16.
17678+	 *
17679+	 * bpp_target_x16 = bpp_target * 16
17680+	 * So we need to decrement by {1, 2, 4, 8, 16} for fractional bpps
17681+	 * {1/16, 1/8, 1/4, 1/2, 1} respectively.
17682+	 */
17683+
17684+	bpp_target = max_dsc_bpp;
17685+
17686+	/* src does not support fractional bpp implies decrement by 16 for bppx16 */
17687+	if (!src_fractional_bpp)
17688+		src_fractional_bpp = 1;
17689+	bpp_decrement_x16 = DIV_ROUND_UP(16, src_fractional_bpp);
17690+	bpp_target_x16 = bpp_target * 16;
17691+
17692+	while (bpp_target_x16 > (min_dsc_bpp * 16)) {
17693+		int bpp;
17694+
17695+		bpp = DIV_ROUND_UP(bpp_target_x16, 16);
17696+		target_bytes = DIV_ROUND_UP((num_slices * slice_width * bpp), 8);
17697+		if (target_bytes <= hdmi_max_chunk_bytes) {
17698+			bpp_found = true;
17699+			break;
17700+		}
17701+		bpp_target_x16 -= bpp_decrement_x16;
17702+	}
17703+	if (bpp_found)
17704+		return bpp_target_x16;
17705+
17706+	return 0;
17707+}
17708+
17709+static int
17710+dw_hdmi_dsc_bpp(struct rockchip_hdmi *hdmi,
17711+		int num_slices, int slice_width)
17712+{
17713+	bool hdmi_all_bpp = hdmi->dsc_cap.all_bpp;
17714+	int fractional_bpp = 0;
17715+	int hdmi_max_chunk_bytes = hdmi->dsc_cap.total_chunk_kbytes * 1024;
17716+
17717+	return hdmi_dsc_get_bpp(hdmi, fractional_bpp, slice_width,
17718+				num_slices, hdmi_all_bpp,
17719+				hdmi_max_chunk_bytes);
17720+}
17721+
17722+static int dw_hdmi_qp_set_link_cfg(struct rockchip_hdmi *hdmi,
17723+				   u16 pic_width, u16 pic_height,
17724+				   u16 slice_width, u16 slice_height,
17725+				   u16 bits_per_pixel, u8 bits_per_component)
17726+{
17727+	int i;
17728+
17729+	for (i = 0; i < PPS_TABLE_LEN; i++)
17730+		if (pic_width == pps_datas[i].pic_width &&
17731+		    pic_height == pps_datas[i].pic_height &&
17732+		    slice_width == pps_datas[i].slice_width &&
17733+		    slice_height == pps_datas[i].slice_height &&
17734+		    bits_per_component == pps_datas[i].bpc &&
17735+		    bits_per_pixel == pps_datas[i].bpp &&
17736+		    hdmi_bus_fmt_is_rgb(hdmi->output_bus_format) == pps_datas[i].convert_rgb)
17737+			break;
17738+
17739+	if (i == PPS_TABLE_LEN) {
17740+		dev_err(hdmi->dev, "can't find pps cfg!\n");
17741+		return -EINVAL;
17742+	}
17743+
17744+	memcpy(hdmi->link_cfg.pps_payload, pps_datas[i].raw_pps, 128);
17745+	hdmi->link_cfg.hcactive = DIV_ROUND_UP(slice_width * (bits_per_pixel / 16), 8) *
17746+		(pic_width / slice_width);
17747+
17748+	return 0;
17749+}
17750+
17751+static void dw_hdmi_qp_dsc_configure(struct rockchip_hdmi *hdmi,
17752+				     struct rockchip_crtc_state *s,
17753+				     struct drm_crtc_state *crtc_state)
17754+{
17755+	int ret;
17756+	int slice_height;
17757+	int slice_width;
17758+	int bits_per_pixel;
17759+	int slice_count;
17760+	bool hdmi_is_dsc_1_2;
17761+	unsigned int depth = hdmi_bus_fmt_color_depth(hdmi->output_bus_format);
17762+
17763+	if (!crtc_state)
17764+		return;
17765+
17766+	hdmi_is_dsc_1_2 = hdmi->dsc_cap.v_1p2;
17767+
17768+	if (!hdmi_is_dsc_1_2)
17769+		return;
17770+
17771+	slice_height = hdmi_dsc_get_slice_height(crtc_state->mode.vdisplay);
17772+	if (!slice_height)
17773+		return;
17774+
17775+	slice_count = hdmi_dsc_slices(hdmi, crtc_state);
17776+	if (!slice_count)
17777+		return;
17778+
17779+	slice_width = DIV_ROUND_UP(crtc_state->mode.hdisplay, slice_count);
17780+
17781+	bits_per_pixel = dw_hdmi_dsc_bpp(hdmi, slice_count, slice_width);
17782+	if (!bits_per_pixel)
17783+		return;
17784+
17785+	ret = dw_hdmi_qp_set_link_cfg(hdmi, crtc_state->mode.hdisplay,
17786+				      crtc_state->mode.vdisplay, slice_width,
17787+				      slice_height, bits_per_pixel, depth);
17788+
17789+	if (ret) {
17790+		dev_err(hdmi->dev, "set vdsc cfg failed\n");
17791+		return;
17792+	}
17793+	dev_info(hdmi->dev, "dsc_enable\n");
17794+	s->dsc_enable = 1;
17795+	s->dsc_sink_cap.version_major = 1;
17796+	s->dsc_sink_cap.version_minor = 2;
17797+	s->dsc_sink_cap.slice_width = slice_width;
17798+	s->dsc_sink_cap.slice_height = slice_height;
17799+	s->dsc_sink_cap.target_bits_per_pixel_x16 = bits_per_pixel;
17800+	s->dsc_sink_cap.block_pred = 1;
17801+	s->dsc_sink_cap.native_420 = 0;
17802+
17803+	memcpy(&s->pps, hdmi->link_cfg.pps_payload, 128);
17804+}
17805+/////////////////////////////////////////////////////////////////////////////////////////
17806+
17807+static int rockchip_hdmi_update_phy_table(struct rockchip_hdmi *hdmi,
17808+					  u32 *config,
17809+					  int phy_table_size)
17810+{
17811+	int i;
17812+
17813+	if (phy_table_size > ARRAY_SIZE(rockchip_phy_config)) {
17814+		dev_err(hdmi->dev, "phy table array number is out of range\n");
17815+		return -E2BIG;
17816+	}
17817+
17818+	for (i = 0; i < phy_table_size; i++) {
17819+		if (config[i * 4] != 0)
17820+			rockchip_phy_config[i].mpixelclock = (u64)config[i * 4];
17821+		else
17822+			rockchip_phy_config[i].mpixelclock = ~0UL;
17823+		rockchip_phy_config[i].sym_ctr = (u16)config[i * 4 + 1];
17824+		rockchip_phy_config[i].term = (u16)config[i * 4 + 2];
17825+		rockchip_phy_config[i].vlev_ctr = (u16)config[i * 4 + 3];
17826+	}
17827+
17828+	return 0;
17829+}
17830+
17831+static void repo_hpd_event(struct work_struct *p_work)
17832+{
17833+	struct rockchip_hdmi *hdmi = container_of(p_work, struct rockchip_hdmi, work.work);
17834+	bool change;
17835+
17836+	change = drm_helper_hpd_irq_event(hdmi->encoder.dev);
17837+	if (change) {
17838+		dev_dbg(hdmi->dev, "hpd stat changed:%d\n", hdmi->hpd_stat);
17839+		dw_hdmi_qp_cec_set_hpd(hdmi->hdmi_qp, hdmi->hpd_stat, change);
17840+	}
17841+}
17842+
17843+static irqreturn_t rockchip_hdmi_hardirq(int irq, void *dev_id)
17844+{
17845+	struct rockchip_hdmi *hdmi = dev_id;
17846+	u32 intr_stat, val;
17847+
17848+	regmap_read(hdmi->regmap, RK3588_GRF_SOC_STATUS1, &intr_stat);
17849+
17850+	if (intr_stat) {
17851+		dev_dbg(hdmi->dev, "hpd irq %#x\n", intr_stat);
17852+
17853+		if (!hdmi->id)
17854+			val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK,
17855+					    RK3588_HDMI0_HPD_INT_MSK);
17856+		else
17857+			val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_MSK,
17858+					    RK3588_HDMI1_HPD_INT_MSK);
17859+		regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
17860+		return IRQ_WAKE_THREAD;
17861+	}
17862+
17863+	return IRQ_NONE;
17864+}
17865+
17866+static irqreturn_t rockchip_hdmi_irq(int irq, void *dev_id)
17867+{
17868+	struct rockchip_hdmi *hdmi = dev_id;
17869+	u32 intr_stat, val;
17870+	int msecs;
17871+	bool stat;
17872+
17873+	regmap_read(hdmi->regmap, RK3588_GRF_SOC_STATUS1, &intr_stat);
17874+
17875+	if (!intr_stat)
17876+		return IRQ_NONE;
17877+
17878+	if (!hdmi->id) {
17879+		val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_CLR,
17880+				    RK3588_HDMI0_HPD_INT_CLR);
17881+		if (intr_stat & RK3588_HDMI0_LEVEL_INT)
17882+			stat = true;
17883+		else
17884+			stat = false;
17885+	} else {
17886+		val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_CLR,
17887+				    RK3588_HDMI1_HPD_INT_CLR);
17888+		if (intr_stat & RK3588_HDMI1_LEVEL_INT)
17889+			stat = true;
17890+		else
17891+			stat = false;
17892+	}
17893+
17894+	regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
17895+
17896+	if (stat) {
17897+		hdmi->hpd_stat = true;
17898+		msecs = 150;
17899+	} else {
17900+		hdmi->hpd_stat = false;
17901+		msecs = 20;
17902+	}
17903+	mod_delayed_work(hdmi->workqueue, &hdmi->work, msecs_to_jiffies(msecs));
17904+
17905+	if (!hdmi->id) {
17906+		val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_CLR,
17907+				    RK3588_HDMI0_HPD_INT_CLR) |
17908+		      HIWORD_UPDATE(0, RK3588_HDMI0_HPD_INT_MSK);
17909+	} else {
17910+		val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_CLR,
17911+				    RK3588_HDMI1_HPD_INT_CLR) |
17912+		      HIWORD_UPDATE(0, RK3588_HDMI1_HPD_INT_MSK);
17913+	}
17914+
17915+	regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
17916+
17917+	return IRQ_HANDLED;
17918+}
17919+
17920+static void init_hpd_work(struct rockchip_hdmi *hdmi)
17921+{
17922+	hdmi->workqueue = create_workqueue("hpd_queue");
17923+	INIT_DELAYED_WORK(&hdmi->work, repo_hpd_event);
17924+}
17925+
17926+static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
17927+{
17928+	int ret, val, phy_table_size;
17929+	u32 *phy_config;
17930+	struct device_node *np = hdmi->dev->of_node;
17931+
17932+	hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
17933+	if (IS_ERR(hdmi->regmap)) {
17934+		DRM_DEV_ERROR(hdmi->dev, "Unable to get rockchip,grf\n");
17935+		return PTR_ERR(hdmi->regmap);
17936+	}
17937+
17938+	if (hdmi->is_hdmi_qp) {
17939+		hdmi->vo1_regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,vo1_grf");
17940+		if (IS_ERR(hdmi->vo1_regmap)) {
17941+			DRM_DEV_ERROR(hdmi->dev, "Unable to get rockchip,vo1_grf\n");
17942+			return PTR_ERR(hdmi->vo1_regmap);
17943+		}
17944+	}
17945+
17946+	hdmi->phyref_clk = devm_clk_get(hdmi->dev, "vpll");
17947+	if (PTR_ERR(hdmi->phyref_clk) == -ENOENT)
17948+		hdmi->phyref_clk = devm_clk_get(hdmi->dev, "ref");
17949+
17950+	if (PTR_ERR(hdmi->phyref_clk) == -ENOENT) {
17951+		hdmi->phyref_clk = NULL;
17952+	} else if (PTR_ERR(hdmi->phyref_clk) == -EPROBE_DEFER) {
17953+		return -EPROBE_DEFER;
17954+	} else if (IS_ERR(hdmi->phyref_clk)) {
17955+		DRM_DEV_ERROR(hdmi->dev, "failed to get grf clock\n");
17956+		return PTR_ERR(hdmi->phyref_clk);
17957+	}
17958+
17959+	hdmi->grf_clk = devm_clk_get(hdmi->dev, "grf");
17960+	if (PTR_ERR(hdmi->grf_clk) == -ENOENT) {
17961+		hdmi->grf_clk = NULL;
17962+	} else if (PTR_ERR(hdmi->grf_clk) == -EPROBE_DEFER) {
17963+		return -EPROBE_DEFER;
17964+	} else if (IS_ERR(hdmi->grf_clk)) {
17965+		DRM_DEV_ERROR(hdmi->dev, "failed to get grf clock\n");
17966+		return PTR_ERR(hdmi->grf_clk);
17967+	}
17968+
17969+	hdmi->hclk_vio = devm_clk_get(hdmi->dev, "hclk_vio");
17970+	if (PTR_ERR(hdmi->hclk_vio) == -ENOENT) {
17971+		hdmi->hclk_vio = NULL;
17972+	} else if (PTR_ERR(hdmi->hclk_vio) == -EPROBE_DEFER) {
17973+		return -EPROBE_DEFER;
17974+	} else if (IS_ERR(hdmi->hclk_vio)) {
17975+		dev_err(hdmi->dev, "failed to get hclk_vio clock\n");
17976+		return PTR_ERR(hdmi->hclk_vio);
17977+	}
17978+
17979+	hdmi->hclk_vop = devm_clk_get(hdmi->dev, "hclk");
17980+	if (PTR_ERR(hdmi->hclk_vop) == -ENOENT) {
17981+		hdmi->hclk_vop = NULL;
17982+	} else if (PTR_ERR(hdmi->hclk_vop) == -EPROBE_DEFER) {
17983+		return -EPROBE_DEFER;
17984+	} else if (IS_ERR(hdmi->hclk_vop)) {
17985+		dev_err(hdmi->dev, "failed to get hclk_vop clock\n");
17986+		return PTR_ERR(hdmi->hclk_vop);
17987+	}
17988+
17989+	hdmi->aud_clk = devm_clk_get_optional(hdmi->dev, "aud");
17990+	if (IS_ERR(hdmi->aud_clk)) {
17991+		dev_err_probe(hdmi->dev, PTR_ERR(hdmi->aud_clk),
17992+			      "failed to get aud_clk clock\n");
17993+		return PTR_ERR(hdmi->aud_clk);
17994+	}
17995+
17996+	hdmi->hpd_clk = devm_clk_get_optional(hdmi->dev, "hpd");
17997+	if (IS_ERR(hdmi->hpd_clk)) {
17998+		dev_err_probe(hdmi->dev, PTR_ERR(hdmi->hpd_clk),
17999+			      "failed to get hpd_clk clock\n");
18000+		return PTR_ERR(hdmi->hpd_clk);
18001+	}
18002+
18003+	hdmi->hclk_vo1 = devm_clk_get_optional(hdmi->dev, "hclk_vo1");
18004+	if (IS_ERR(hdmi->hclk_vo1)) {
18005+		dev_err_probe(hdmi->dev, PTR_ERR(hdmi->hclk_vo1),
18006+			      "failed to get hclk_vo1 clock\n");
18007+		return PTR_ERR(hdmi->hclk_vo1);
18008+	}
18009+
18010+	hdmi->earc_clk = devm_clk_get_optional(hdmi->dev, "earc");
18011+	if (IS_ERR(hdmi->earc_clk)) {
18012+		dev_err_probe(hdmi->dev, PTR_ERR(hdmi->earc_clk),
18013+			      "failed to get earc_clk clock\n");
18014+		return PTR_ERR(hdmi->earc_clk);
18015+	}
18016+
18017+	hdmi->hdmitx_ref = devm_clk_get_optional(hdmi->dev, "hdmitx_ref");
18018+	if (IS_ERR(hdmi->hdmitx_ref)) {
18019+		dev_err_probe(hdmi->dev, PTR_ERR(hdmi->hdmitx_ref),
18020+			      "failed to get hdmitx_ref clock\n");
18021+		return PTR_ERR(hdmi->hdmitx_ref);
18022+	}
18023+
18024+	hdmi->pclk = devm_clk_get_optional(hdmi->dev, "pclk");
18025+	if (IS_ERR(hdmi->pclk)) {
18026+		dev_err_probe(hdmi->dev, PTR_ERR(hdmi->pclk),
18027+			      "failed to get pclk clock\n");
18028+		return PTR_ERR(hdmi->pclk);
18029+	}
18030+
18031+	hdmi->enable_gpio = devm_gpiod_get_optional(hdmi->dev, "enable",
18032+						    GPIOD_OUT_HIGH);
18033+	if (IS_ERR(hdmi->enable_gpio)) {
18034+		ret = PTR_ERR(hdmi->enable_gpio);
18035+		dev_err(hdmi->dev, "failed to request enable GPIO: %d\n", ret);
18036+		return ret;
18037+	}
18038+
18039+	hdmi->skip_check_420_mode =
18040+		of_property_read_bool(np, "skip-check-420-mode");
18041+
18042+	if (of_get_property(np, "rockchip,phy-table", &val)) {
18043+		phy_config = kmalloc(val, GFP_KERNEL);
18044+		if (!phy_config) {
18045+			/* use default table when kmalloc failed. */
18046+			dev_err(hdmi->dev, "kmalloc phy table failed\n");
18047+
18048+			return -ENOMEM;
18049+		}
18050+		phy_table_size = val / 16;
18051+		of_property_read_u32_array(np, "rockchip,phy-table",
18052+					   phy_config, val / sizeof(u32));
18053+		ret = rockchip_hdmi_update_phy_table(hdmi, phy_config,
18054+						     phy_table_size);
18055+		if (ret) {
18056+			kfree(phy_config);
18057+			return ret;
18058+		}
18059+		kfree(phy_config);
18060+	} else {
18061+		dev_dbg(hdmi->dev, "use default hdmi phy table\n");
18062+	}
18063+
18064+	return 0;
18065+}
18066+
18067+static enum drm_mode_status
18068+dw_hdmi_rockchip_mode_valid(struct drm_connector *connector, void *data,
18069+			    const struct drm_display_info *info,
18070+			    const struct drm_display_mode *mode)
18071+{
18072+	struct drm_encoder *encoder = connector->encoder;
18073+	enum drm_mode_status status = MODE_OK;
18074+	struct drm_device *dev = connector->dev;
18075+	struct rockchip_drm_private *priv = dev->dev_private;
18076+	struct drm_crtc *crtc;
18077+	struct rockchip_hdmi *hdmi;
18078+
18079+	/*
18080+	 * Pixel clocks we support are always < 2GHz and so fit in an
18081+	 * int.  We should make sure source rate does too so we don't get
18082+	 * overflow when we multiply by 1000.
18083+	 */
18084+	if (mode->clock > INT_MAX / 1000)
18085+		return MODE_BAD;
18086+
18087+	if (!encoder) {
18088+		const struct drm_connector_helper_funcs *funcs;
18089+
18090+		funcs = connector->helper_private;
18091+		if (funcs->atomic_best_encoder)
18092+			encoder = funcs->atomic_best_encoder(connector,
18093+							     connector->state);
18094+		else
18095+			encoder = funcs->best_encoder(connector);
18096+	}
18097+
18098+	if (!encoder || !encoder->possible_crtcs)
18099+		return MODE_BAD;
18100+
18101+	hdmi = to_rockchip_hdmi(encoder);
18102+
18103+	/*
18104+	 * If sink max TMDS clock < 340MHz, we should check the mode pixel
18105+	 * clock > 340MHz is YCbCr420 or not and whether the platform supports
18106+	 * YCbCr420.
18107+	 */
18108+	if (!hdmi->skip_check_420_mode) {
18109+		if (mode->clock > 340000 &&
18110+		    connector->display_info.max_tmds_clock < 340000 &&
18111+		    (!drm_mode_is_420(&connector->display_info, mode) ||
18112+		     !connector->ycbcr_420_allowed))
18113+			return MODE_BAD;
18114+
18115+		if (hdmi->max_tmdsclk <= 340000 && mode->clock > 340000 &&
18116+		    !drm_mode_is_420(&connector->display_info, mode))
18117+			return MODE_BAD;
18118+	};
18119+
18120+	if (hdmi->phy) {
18121+		if (hdmi->is_hdmi_qp)
18122+			phy_set_bus_width(hdmi->phy, mode->clock * 10);
18123+		else
18124+			phy_set_bus_width(hdmi->phy, 8);
18125+	}
18126+
18127+	/*
18128+	 * ensure all drm display mode can work, if someone want support more
18129+	 * resolutions, please limit the possible_crtc, only connect to
18130+	 * needed crtc.
18131+	 */
18132+	drm_for_each_crtc(crtc, connector->dev) {
18133+		int pipe = drm_crtc_index(crtc);
18134+		const struct rockchip_crtc_funcs *funcs =
18135+						priv->crtc_funcs[pipe];
18136+
18137+		if (!(encoder->possible_crtcs & drm_crtc_mask(crtc)))
18138+			continue;
18139+		if (!funcs || !funcs->mode_valid)
18140+			continue;
18141+
18142+		status = funcs->mode_valid(crtc, mode,
18143+					   DRM_MODE_CONNECTOR_HDMIA);
18144+		if (status != MODE_OK)
18145+			return status;
18146+	}
18147+
18148+	return status;
18149+}
18150+
18151+static void dw_hdmi_rockchip_encoder_disable(struct drm_encoder *encoder)
18152+{
18153+	struct rockchip_hdmi *hdmi = to_rockchip_hdmi(encoder);
18154+	struct drm_crtc *crtc = encoder->crtc;
18155+	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
18156+
18157+	if (!hdmi->mode_changed) {
18158+		if (!hdmi->id)
18159+			s->output_if &= ~VOP_OUTPUT_IF_HDMI1;
18160+		else
18161+			s->output_if &= ~VOP_OUTPUT_IF_HDMI0;
18162+	}
18163+	/*
18164+	 * when plug out hdmi it will be switch cvbs and then phy bus width
18165+	 * must be set as 8
18166+	 */
18167+	if (hdmi->phy)
18168+		phy_set_bus_width(hdmi->phy, 8);
18169+}
18170+
18171+static void dw_hdmi_rockchip_encoder_enable(struct drm_encoder *encoder)
18172+{
18173+	struct rockchip_hdmi *hdmi = to_rockchip_hdmi(encoder);
18174+	struct drm_crtc *crtc = encoder->crtc;
18175+	u32 val;
18176+	int mux;
18177+	int ret;
18178+
18179+	if (WARN_ON(!crtc || !crtc->state))
18180+		return;
18181+
18182+	if (hdmi->phy)
18183+		phy_set_bus_width(hdmi->phy, hdmi->phy_bus_width);
18184+
18185+	clk_set_rate(hdmi->phyref_clk,
18186+		     crtc->state->adjusted_mode.crtc_clock * 1000);
18187+
18188+	if (hdmi->chip_data->lcdsel_grf_reg < 0)
18189+		return;
18190+
18191+	mux = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder);
18192+	if (mux)
18193+		val = hdmi->chip_data->lcdsel_lit;
18194+	else
18195+		val = hdmi->chip_data->lcdsel_big;
18196+
18197+	ret = clk_prepare_enable(hdmi->grf_clk);
18198+	if (ret < 0) {
18199+		DRM_DEV_ERROR(hdmi->dev, "failed to enable grfclk %d\n", ret);
18200+		return;
18201+	}
18202+
18203+	ret = regmap_write(hdmi->regmap, hdmi->chip_data->lcdsel_grf_reg, val);
18204+	if (ret != 0)
18205+		DRM_DEV_ERROR(hdmi->dev, "Could not write to GRF: %d\n", ret);
18206+
18207+	if (hdmi->chip_data->lcdsel_grf_reg == RK3288_GRF_SOC_CON6) {
18208+		struct rockchip_crtc_state *s =
18209+				to_rockchip_crtc_state(crtc->state);
18210+		u32 mode_mask = mux ? RK3288_HDMI_LCDC1_YUV420 :
18211+					RK3288_HDMI_LCDC0_YUV420;
18212+
18213+		if (s->output_mode == ROCKCHIP_OUT_MODE_YUV420)
18214+			val = HIWORD_UPDATE(mode_mask, mode_mask);
18215+		else
18216+			val = HIWORD_UPDATE(0, mode_mask);
18217+
18218+		regmap_write(hdmi->regmap, RK3288_GRF_SOC_CON16, val);
18219+	}
18220+
18221+	clk_disable_unprepare(hdmi->grf_clk);
18222+	DRM_DEV_DEBUG(hdmi->dev, "vop %s output to hdmi\n",
18223+		      ret ? "LIT" : "BIG");
18224+}
18225+
18226+static void rk3588_set_link_mode(struct rockchip_hdmi *hdmi)
18227+{
18228+	int val;
18229+	bool is_hdmi0;
18230+
18231+	if (!hdmi->id)
18232+		is_hdmi0 = true;
18233+	else
18234+		is_hdmi0 = false;
18235+
18236+	if (!hdmi->link_cfg.frl_mode) {
18237+		val = HIWORD_UPDATE(0, RK3588_HDMI21_MASK);
18238+		if (is_hdmi0)
18239+			regmap_write(hdmi->vo1_regmap, RK3588_GRF_VO1_CON4, val);
18240+		else
18241+			regmap_write(hdmi->vo1_regmap, RK3588_GRF_VO1_CON7, val);
18242+
18243+		val = HIWORD_UPDATE(0, RK3588_COMPRESS_MODE_MASK | RK3588_COLOR_FORMAT_MASK);
18244+		if (is_hdmi0)
18245+			regmap_write(hdmi->vo1_regmap, RK3588_GRF_VO1_CON3, val);
18246+		else
18247+			regmap_write(hdmi->vo1_regmap, RK3588_GRF_VO1_CON6, val);
18248+
18249+		return;
18250+	}
18251+
18252+	val = HIWORD_UPDATE(RK3588_HDMI21_MASK, RK3588_HDMI21_MASK);
18253+	if (is_hdmi0)
18254+		regmap_write(hdmi->vo1_regmap, RK3588_GRF_VO1_CON4, val);
18255+	else
18256+		regmap_write(hdmi->vo1_regmap, RK3588_GRF_VO1_CON7, val);
18257+
18258+	if (hdmi->link_cfg.dsc_mode) {
18259+		val = HIWORD_UPDATE(RK3588_COMPRESS_MODE_MASK | RK3588_COMPRESSED_DATA,
18260+				    RK3588_COMPRESS_MODE_MASK | RK3588_COLOR_FORMAT_MASK);
18261+		if (is_hdmi0)
18262+			regmap_write(hdmi->vo1_regmap, RK3588_GRF_VO1_CON3, val);
18263+		else
18264+			regmap_write(hdmi->vo1_regmap, RK3588_GRF_VO1_CON6, val);
18265+	} else {
18266+		val = HIWORD_UPDATE(0, RK3588_COMPRESS_MODE_MASK | RK3588_COLOR_FORMAT_MASK);
18267+		if (is_hdmi0)
18268+			regmap_write(hdmi->vo1_regmap, RK3588_GRF_VO1_CON3, val);
18269+		else
18270+			regmap_write(hdmi->vo1_regmap, RK3588_GRF_VO1_CON6, val);
18271+	}
18272+}
18273+
18274+static void rk3588_set_color_format(struct rockchip_hdmi *hdmi, u64 bus_format,
18275+				    u32 depth)
18276+{
18277+	u32 val = 0;
18278+
18279+	switch (bus_format) {
18280+	case MEDIA_BUS_FMT_RGB888_1X24:
18281+	case MEDIA_BUS_FMT_RGB101010_1X30:
18282+		val = HIWORD_UPDATE(0, RK3588_COLOR_FORMAT_MASK);
18283+		break;
18284+	case MEDIA_BUS_FMT_UYYVYY8_0_5X24:
18285+	case MEDIA_BUS_FMT_UYYVYY10_0_5X30:
18286+		val = HIWORD_UPDATE(RK3588_YUV420, RK3588_COLOR_FORMAT_MASK);
18287+		break;
18288+	case MEDIA_BUS_FMT_YUV8_1X24:
18289+	case MEDIA_BUS_FMT_YUV10_1X30:
18290+		val = HIWORD_UPDATE(RK3588_YUV444, RK3588_COLOR_FORMAT_MASK);
18291+		break;
18292+	default:
18293+		dev_err(hdmi->dev, "can't set correct color format\n");
18294+		return;
18295+	}
18296+
18297+	if (hdmi->link_cfg.dsc_mode)
18298+		val = HIWORD_UPDATE(RK3588_COMPRESSED_DATA, RK3588_COLOR_FORMAT_MASK);
18299+
18300+	if (depth == 8)
18301+		val |= HIWORD_UPDATE(RK3588_8BPC, RK3588_COLOR_DEPTH_MASK);
18302+	else
18303+		val |= HIWORD_UPDATE(RK3588_10BPC, RK3588_COLOR_DEPTH_MASK);
18304+
18305+	if (!hdmi->id)
18306+		regmap_write(hdmi->vo1_regmap, RK3588_GRF_VO1_CON3, val);
18307+	else
18308+		regmap_write(hdmi->vo1_regmap, RK3588_GRF_VO1_CON6, val);
18309+}
18310+
18311+static void rk3588_set_grf_cfg(void *data)
18312+{
18313+	struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
18314+	int color_depth;
18315+
18316+	rk3588_set_link_mode(hdmi);
18317+	color_depth = hdmi_bus_fmt_color_depth(hdmi->bus_format);
18318+	rk3588_set_color_format(hdmi, hdmi->bus_format, color_depth);
18319+}
18320+
18321+static void
18322+dw_hdmi_rockchip_select_output(struct drm_connector_state *conn_state,
18323+			       struct drm_crtc_state *crtc_state,
18324+			       struct rockchip_hdmi *hdmi,
18325+			       unsigned int *color_format,
18326+			       unsigned int *output_mode,
18327+			       unsigned long *bus_format,
18328+			       unsigned int *bus_width,
18329+			       unsigned long *enc_out_encoding,
18330+			       unsigned int *eotf)
18331+{
18332+	struct drm_display_info *info = &conn_state->connector->display_info;
18333+	struct drm_display_mode *mode = &crtc_state->mode;
18334+	struct hdr_output_metadata *hdr_metadata;
18335+	u32 vic = drm_match_cea_mode(mode);
18336+	unsigned long tmdsclock, pixclock = mode->crtc_clock;
18337+	unsigned int color_depth;
18338+	bool support_dc = false;
18339+	bool sink_is_hdmi = true;
18340+	u32 max_tmds_clock = info->max_tmds_clock;
18341+	int output_eotf;
18342+
18343+	if (!hdmi->is_hdmi_qp)
18344+		sink_is_hdmi = dw_hdmi_get_output_whether_hdmi(hdmi->hdmi);
18345+
18346+	*color_format = DRM_HDMI_OUTPUT_DEFAULT_RGB;
18347+
18348+	switch (hdmi->hdmi_output) {
18349+	case DRM_HDMI_OUTPUT_YCBCR_HQ:
18350+		if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
18351+			*color_format = DRM_HDMI_OUTPUT_YCBCR444;
18352+		else if (info->color_formats & DRM_COLOR_FORMAT_YCRCB422)
18353+			*color_format = DRM_HDMI_OUTPUT_YCBCR422;
18354+		else if (conn_state->connector->ycbcr_420_allowed &&
18355+			 drm_mode_is_420(info, mode) &&
18356+			 (pixclock >= 594000 && !hdmi->is_hdmi_qp))
18357+			*color_format = DRM_HDMI_OUTPUT_YCBCR420;
18358+		break;
18359+	case DRM_HDMI_OUTPUT_YCBCR_LQ:
18360+		if (conn_state->connector->ycbcr_420_allowed &&
18361+		    drm_mode_is_420(info, mode) && pixclock >= 594000)
18362+			*color_format = DRM_HDMI_OUTPUT_YCBCR420;
18363+		else if (info->color_formats & DRM_COLOR_FORMAT_YCRCB422)
18364+			*color_format = DRM_HDMI_OUTPUT_YCBCR422;
18365+		else if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
18366+			*color_format = DRM_HDMI_OUTPUT_YCBCR444;
18367+		break;
18368+	case DRM_HDMI_OUTPUT_YCBCR420:
18369+		if (conn_state->connector->ycbcr_420_allowed &&
18370+		    drm_mode_is_420(info, mode) && pixclock >= 594000)
18371+			*color_format = DRM_HDMI_OUTPUT_YCBCR420;
18372+		break;
18373+	case DRM_HDMI_OUTPUT_YCBCR422:
18374+		if (info->color_formats & DRM_COLOR_FORMAT_YCRCB422)
18375+			*color_format = DRM_HDMI_OUTPUT_YCBCR422;
18376+		break;
18377+	case DRM_HDMI_OUTPUT_YCBCR444:
18378+		if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
18379+			*color_format = DRM_HDMI_OUTPUT_YCBCR444;
18380+		break;
18381+	case DRM_HDMI_OUTPUT_DEFAULT_RGB:
18382+	default:
18383+		break;
18384+	}
18385+
18386+	if (*color_format == DRM_HDMI_OUTPUT_DEFAULT_RGB &&
18387+	    info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30)
18388+		support_dc = true;
18389+	if (*color_format == DRM_HDMI_OUTPUT_YCBCR444 &&
18390+	    info->edid_hdmi_dc_modes &
18391+	    (DRM_EDID_HDMI_DC_Y444 | DRM_EDID_HDMI_DC_30))
18392+		support_dc = true;
18393+	if (*color_format == DRM_HDMI_OUTPUT_YCBCR422)
18394+		support_dc = true;
18395+	if (*color_format == DRM_HDMI_OUTPUT_YCBCR420 &&
18396+	    info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
18397+		support_dc = true;
18398+
18399+	if (hdmi->colordepth > 8 && support_dc)
18400+		color_depth = 10;
18401+	else
18402+		color_depth = 8;
18403+
18404+	if (!sink_is_hdmi) {
18405+		*color_format = DRM_HDMI_OUTPUT_DEFAULT_RGB;
18406+		color_depth = 8;
18407+	}
18408+
18409+	*eotf = HDMI_EOTF_TRADITIONAL_GAMMA_SDR;
18410+	if (conn_state->hdr_output_metadata) {
18411+		hdr_metadata = (struct hdr_output_metadata *)
18412+			conn_state->hdr_output_metadata->data;
18413+		output_eotf = hdr_metadata->hdmi_metadata_type1.eotf;
18414+		if (output_eotf > HDMI_EOTF_TRADITIONAL_GAMMA_SDR &&
18415+		    output_eotf <= HDMI_EOTF_BT_2100_HLG)
18416+			*eotf = output_eotf;
18417+	}
18418+
18419+	if ((*eotf > HDMI_EOTF_TRADITIONAL_GAMMA_SDR &&
18420+	     conn_state->connector->hdr_sink_metadata.hdmi_type1.eotf &
18421+	     BIT(*eotf)) || (hdmi->colorimetry ==
18422+	     RK_HDMI_COLORIMETRY_BT2020))
18423+		*enc_out_encoding = V4L2_YCBCR_ENC_BT2020;
18424+	else if ((vic == 6) || (vic == 7) || (vic == 21) || (vic == 22) ||
18425+		 (vic == 2) || (vic == 3) || (vic == 17) || (vic == 18))
18426+		*enc_out_encoding = V4L2_YCBCR_ENC_601;
18427+	else
18428+		*enc_out_encoding = V4L2_YCBCR_ENC_709;
18429+
18430+	if (*enc_out_encoding == V4L2_YCBCR_ENC_BT2020) {
18431+		/* BT2020 require color depth at lest 10bit */
18432+		color_depth = 10;
18433+		/* We prefer use YCbCr422 to send 10bit */
18434+		if (info->color_formats & DRM_COLOR_FORMAT_YCRCB422)
18435+			*color_format = DRM_HDMI_OUTPUT_YCBCR422;
18436+		if (hdmi->is_hdmi_qp) {
18437+			if (info->color_formats & DRM_COLOR_FORMAT_YCRCB420) {
18438+				if (mode->clock >= 340000)
18439+					*color_format = DRM_HDMI_OUTPUT_YCBCR420;
18440+				else
18441+					*color_format = DRM_HDMI_OUTPUT_DEFAULT_RGB;
18442+			} else {
18443+				*color_format = DRM_HDMI_OUTPUT_DEFAULT_RGB;
18444+			}
18445+		}
18446+	}
18447+
18448+	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
18449+		pixclock *= 2;
18450+	if ((mode->flags & DRM_MODE_FLAG_3D_MASK) ==
18451+		DRM_MODE_FLAG_3D_FRAME_PACKING)
18452+		pixclock *= 2;
18453+
18454+	if (*color_format == DRM_HDMI_OUTPUT_YCBCR422 || color_depth == 8)
18455+		tmdsclock = pixclock;
18456+	else
18457+		tmdsclock = pixclock * (color_depth) / 8;
18458+
18459+	if (*color_format == DRM_HDMI_OUTPUT_YCBCR420)
18460+		tmdsclock /= 2;
18461+
18462+	/* XXX: max_tmds_clock of some sink is 0, we think it is 340MHz. */
18463+	if (!max_tmds_clock)
18464+		max_tmds_clock = 340000;
18465+
18466+	max_tmds_clock = min(max_tmds_clock, hdmi->max_tmdsclk);
18467+
18468+	if ((tmdsclock > max_tmds_clock) && !hdmi->is_hdmi_qp) {
18469+		if (max_tmds_clock >= 594000) {
18470+			color_depth = 8;
18471+		} else if (max_tmds_clock > 340000) {
18472+			if (drm_mode_is_420(info, mode) || tmdsclock >= 594000)
18473+				*color_format = DRM_HDMI_OUTPUT_YCBCR420;
18474+		} else {
18475+			color_depth = 8;
18476+			if (drm_mode_is_420(info, mode) || tmdsclock >= 594000)
18477+				*color_format = DRM_HDMI_OUTPUT_YCBCR420;
18478+		}
18479+	}
18480+
18481+	if (mode->clock >= 340000 && hdmi->is_hdmi_qp)
18482+		*color_format = DRM_HDMI_OUTPUT_YCBCR420;
18483+
18484+	if (*color_format == DRM_HDMI_OUTPUT_YCBCR420) {
18485+		*output_mode = ROCKCHIP_OUT_MODE_YUV420;
18486+		if (color_depth > 8)
18487+			*bus_format = MEDIA_BUS_FMT_UYYVYY10_0_5X30;
18488+		else
18489+			*bus_format = MEDIA_BUS_FMT_UYYVYY8_0_5X24;
18490+		*bus_width = color_depth / 2;
18491+	} else {
18492+		*output_mode = ROCKCHIP_OUT_MODE_AAAA;
18493+		if (color_depth > 8) {
18494+			if (*color_format != DRM_HDMI_OUTPUT_DEFAULT_RGB &&
18495+			    !hdmi->unsupported_yuv_input)
18496+				*bus_format = MEDIA_BUS_FMT_YUV10_1X30;
18497+			else
18498+				*bus_format = MEDIA_BUS_FMT_RGB101010_1X30;
18499+		} else {
18500+			if (*color_format != DRM_HDMI_OUTPUT_DEFAULT_RGB &&
18501+			    !hdmi->unsupported_yuv_input)
18502+				*bus_format = MEDIA_BUS_FMT_YUV8_1X24;
18503+			else
18504+				*bus_format = MEDIA_BUS_FMT_RGB888_1X24;
18505+		}
18506+		if (*color_format == DRM_HDMI_OUTPUT_YCBCR422)
18507+			*bus_width = 8;
18508+		else
18509+			*bus_width = color_depth;
18510+	}
18511+
18512+	hdmi->bus_format = *bus_format;
18513+
18514+	if (*color_format == DRM_HDMI_OUTPUT_YCBCR422) {
18515+		if (color_depth == 12)
18516+			hdmi->output_bus_format = MEDIA_BUS_FMT_UYVY12_1X24;
18517+		else if (color_depth == 10)
18518+			hdmi->output_bus_format = MEDIA_BUS_FMT_UYVY10_1X20;
18519+		else
18520+			hdmi->output_bus_format = MEDIA_BUS_FMT_UYVY8_1X16;
18521+	} else {
18522+		hdmi->output_bus_format = *bus_format;
18523+	}
18524+}
18525+
18526+static bool
18527+dw_hdmi_rockchip_check_color(struct drm_connector_state *conn_state,
18528+			     struct rockchip_hdmi *hdmi)
18529+{
18530+	struct drm_crtc_state *crtc_state = conn_state->crtc->state;
18531+	unsigned int colorformat;
18532+	unsigned long bus_format;
18533+	unsigned long output_bus_format = hdmi->output_bus_format;
18534+	unsigned long enc_out_encoding = hdmi->enc_out_encoding;
18535+	unsigned int eotf, bus_width;
18536+	unsigned int output_mode;
18537+
18538+	dw_hdmi_rockchip_select_output(conn_state, crtc_state, hdmi,
18539+				       &colorformat,
18540+				       &output_mode, &bus_format, &bus_width,
18541+				       &hdmi->enc_out_encoding, &eotf);
18542+
18543+	if (output_bus_format != hdmi->output_bus_format ||
18544+	    enc_out_encoding != hdmi->enc_out_encoding)
18545+		return true;
18546+	else
18547+		return false;
18548+}
18549+
18550+static int
18551+dw_hdmi_rockchip_encoder_atomic_check(struct drm_encoder *encoder,
18552+				      struct drm_crtc_state *crtc_state,
18553+				      struct drm_connector_state *conn_state)
18554+{
18555+	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
18556+	struct rockchip_hdmi *hdmi = to_rockchip_hdmi(encoder);
18557+	unsigned int colorformat, bus_width, tmdsclk;
18558+	unsigned int output_mode;
18559+	unsigned long bus_format;
18560+	int color_depth;
18561+
18562+	dw_hdmi_rockchip_select_output(conn_state, crtc_state, hdmi,
18563+				       &colorformat,
18564+				       &output_mode, &bus_format, &bus_width,
18565+				       &hdmi->enc_out_encoding, &s->eotf);
18566+
18567+	s->bus_format = bus_format;
18568+	if (hdmi->is_hdmi_qp) {
18569+		color_depth = hdmi_bus_fmt_color_depth(bus_format);
18570+		tmdsclk = hdmi_get_tmdsclock(hdmi, crtc_state->mode.clock);
18571+		if (hdmi_bus_fmt_is_yuv420(hdmi->output_bus_format))
18572+			tmdsclk /= 2;
18573+		hdmi_select_link_config(hdmi, crtc_state, tmdsclk);
18574+
18575+		if (hdmi->link_cfg.frl_mode) {
18576+			gpiod_set_value(hdmi->enable_gpio, 0);
18577+			/* in the current version, support max 40G frl */
18578+			if (hdmi->link_cfg.rate_per_lane >= 10) {
18579+				hdmi->link_cfg.frl_lanes = 4;
18580+				hdmi->link_cfg.rate_per_lane = 10;
18581+			}
18582+			bus_width = hdmi->link_cfg.frl_lanes *
18583+				hdmi->link_cfg.rate_per_lane * 1000000;
18584+			/* 10 bit color depth and frl mode */
18585+			if (color_depth == 10)
18586+				bus_width |=
18587+					COLOR_DEPTH_10BIT | HDMI_FRL_MODE;
18588+			else
18589+				bus_width |= HDMI_FRL_MODE;
18590+		} else {
18591+			gpiod_set_value(hdmi->enable_gpio, 1);
18592+			bus_width = hdmi_get_tmdsclock(hdmi,
18593+						       crtc_state->mode.clock * 10);
18594+			if (hdmi_bus_fmt_is_yuv420(hdmi->output_bus_format))
18595+				bus_width /= 2;
18596+
18597+			if (color_depth == 10)
18598+				bus_width |= COLOR_DEPTH_10BIT;
18599+		}
18600+	}
18601+
18602+	hdmi->phy_bus_width = bus_width;
18603+
18604+	if (hdmi->phy)
18605+		phy_set_bus_width(hdmi->phy, bus_width);
18606+
18607+	s->output_type = DRM_MODE_CONNECTOR_HDMIA;
18608+	s->tv_state = &conn_state->tv;
18609+
18610+	if (!hdmi->id)
18611+		s->output_if |= VOP_OUTPUT_IF_HDMI0;
18612+	else
18613+		s->output_if |= VOP_OUTPUT_IF_HDMI1;
18614+
18615+	s->output_mode = output_mode;
18616+	hdmi->bus_format = s->bus_format;
18617+
18618+	hdmi->mode_changed = crtc_state->mode_changed;
18619+
18620+	if (hdmi->enc_out_encoding == V4L2_YCBCR_ENC_BT2020)
18621+		s->color_space = V4L2_COLORSPACE_BT2020;
18622+	else if (colorformat == DRM_HDMI_OUTPUT_DEFAULT_RGB)
18623+		s->color_space = V4L2_COLORSPACE_DEFAULT;
18624+	else if (hdmi->enc_out_encoding == V4L2_YCBCR_ENC_709)
18625+		s->color_space = V4L2_COLORSPACE_REC709;
18626+	else
18627+		s->color_space = V4L2_COLORSPACE_SMPTE170M;
18628+
18629+	return 0;
18630+}
18631+
18632+
18633+static unsigned long
18634+dw_hdmi_rockchip_get_input_bus_format(void *data)
18635+{
18636+	struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
18637+
18638+	return hdmi->bus_format;
18639+}
18640+
18641+static unsigned long
18642+dw_hdmi_rockchip_get_output_bus_format(void *data)
18643+{
18644+	struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
18645+
18646+	return hdmi->output_bus_format;
18647+}
18648+
18649+static unsigned long
18650+dw_hdmi_rockchip_get_enc_in_encoding(void *data)
18651+{
18652+	struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
18653+
18654+	return hdmi->enc_out_encoding;
18655+}
18656+
18657+static unsigned long
18658+dw_hdmi_rockchip_get_enc_out_encoding(void *data)
18659+{
18660+	struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
18661+
18662+	return hdmi->enc_out_encoding;
18663+}
18664+
18665+static unsigned long
18666+dw_hdmi_rockchip_get_quant_range(void *data)
18667+{
18668+	struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
18669+
18670+	return hdmi->hdmi_quant_range;
18671+}
18672+
18673+static struct drm_property *
18674+dw_hdmi_rockchip_get_hdr_property(void *data)
18675+{
18676+	struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
18677+
18678+	return hdmi->hdr_panel_metadata_property;
18679+}
18680+
18681+static struct drm_property_blob *
18682+dw_hdmi_rockchip_get_hdr_blob(void *data)
18683+{
18684+	struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
18685+
18686+	return hdmi->hdr_panel_blob_ptr;
18687+}
18688+
18689+static bool
18690+dw_hdmi_rockchip_get_color_changed(void *data)
18691+{
18692+	struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
18693+	bool ret = false;
18694+
18695+	if (hdmi->color_changed)
18696+		ret = true;
18697+	hdmi->color_changed = 0;
18698+
18699+	return ret;
18700+}
18701+
18702+static int
18703+dw_hdmi_rockchip_get_yuv422_format(struct drm_connector *connector,
18704+				   struct edid *edid)
18705+{
18706+	if (!connector || !edid)
18707+		return -EINVAL;
18708+
18709+	return rockchip_drm_get_yuv422_format(connector, edid);
18710+}
18711+
18712+static int
18713+dw_hdmi_rockchip_get_edid_dsc_info(void *data, struct edid *edid)
18714+{
18715+	struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
18716+
18717+	if (!edid)
18718+		return -EINVAL;
18719+
18720+	return rockchip_drm_parse_cea_ext(&hdmi->dsc_cap,
18721+					  &hdmi->max_frl_rate_per_lane,
18722+					  &hdmi->max_lanes, edid);
18723+}
18724+
18725+static int
18726+dw_hdmi_rockchip_get_next_hdr_data(void *data, struct edid *edid,
18727+				   struct drm_connector *connector)
18728+{
18729+	int ret;
18730+	struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
18731+	struct next_hdr_sink_data *sink_data = &hdmi->next_hdr_data;
18732+	size_t size = sizeof(*sink_data);
18733+	struct drm_property *property = hdmi->next_hdr_sink_data_property;
18734+	struct drm_property_blob *blob = hdmi->hdr_panel_blob_ptr;
18735+
18736+	if (!edid)
18737+		return -EINVAL;
18738+
18739+	rockchip_drm_parse_next_hdr(sink_data, edid);
18740+
18741+	ret = drm_property_replace_global_blob(connector->dev, &blob, size, sink_data,
18742+					       &connector->base, property);
18743+
18744+	return ret;
18745+};
18746+
18747+static
18748+struct dw_hdmi_link_config *dw_hdmi_rockchip_get_link_cfg(void *data)
18749+{
18750+	struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
18751+
18752+	return &hdmi->link_cfg;
18753+}
18754+
18755+static const struct drm_prop_enum_list color_depth_enum_list[] = {
18756+	{ 0, "Automatic" }, /* Prefer highest color depth */
18757+	{ 8, "24bit" },
18758+	{ 10, "30bit" },
18759+};
18760+
18761+static const struct drm_prop_enum_list drm_hdmi_output_enum_list[] = {
18762+	{ DRM_HDMI_OUTPUT_DEFAULT_RGB, "output_rgb" },
18763+	{ DRM_HDMI_OUTPUT_YCBCR444, "output_ycbcr444" },
18764+	{ DRM_HDMI_OUTPUT_YCBCR422, "output_ycbcr422" },
18765+	{ DRM_HDMI_OUTPUT_YCBCR420, "output_ycbcr420" },
18766+	{ DRM_HDMI_OUTPUT_YCBCR_HQ, "output_ycbcr_high_subsampling" },
18767+	{ DRM_HDMI_OUTPUT_YCBCR_LQ, "output_ycbcr_low_subsampling" },
18768+	{ DRM_HDMI_OUTPUT_INVALID, "invalid_output" },
18769+};
18770+
18771+static const struct drm_prop_enum_list quant_range_enum_list[] = {
18772+	{ HDMI_QUANTIZATION_RANGE_DEFAULT, "default" },
18773+	{ HDMI_QUANTIZATION_RANGE_LIMITED, "limit" },
18774+	{ HDMI_QUANTIZATION_RANGE_FULL, "full" },
18775+};
18776+
18777+static const struct drm_prop_enum_list colorimetry_enum_list[] = {
18778+	{ HDMI_COLORIMETRY_NONE, "None" },
18779+	{ RK_HDMI_COLORIMETRY_BT2020, "ITU_2020" },
18780+};
18781+
18782+static const struct drm_prop_enum_list output_hdmi_dvi_enum_list[] = {
18783+	{ 0, "auto" },
18784+	{ 1, "force_hdmi" },
18785+	{ 2, "force_dvi" },
18786+};
18787+
18788+static const struct drm_prop_enum_list output_type_cap_list[] = {
18789+	{ 0, "DVI" },
18790+	{ 1, "HDMI" },
18791+};
18792+
18793+static void
18794+dw_hdmi_rockchip_attach_properties(struct drm_connector *connector,
18795+				   unsigned int color, int version,
18796+				   void *data)
18797+{
18798+	struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
18799+	struct drm_property *prop;
18800+	struct rockchip_drm_private *private = connector->dev->dev_private;
18801+
18802+	switch (color) {
18803+	case MEDIA_BUS_FMT_RGB101010_1X30:
18804+		hdmi->hdmi_output = DRM_HDMI_OUTPUT_DEFAULT_RGB;
18805+		hdmi->colordepth = 10;
18806+		break;
18807+	case MEDIA_BUS_FMT_YUV8_1X24:
18808+		hdmi->hdmi_output = DRM_HDMI_OUTPUT_YCBCR444;
18809+		hdmi->colordepth = 8;
18810+		break;
18811+	case MEDIA_BUS_FMT_YUV10_1X30:
18812+		hdmi->hdmi_output = DRM_HDMI_OUTPUT_YCBCR444;
18813+		hdmi->colordepth = 10;
18814+		break;
18815+	case MEDIA_BUS_FMT_UYVY10_1X20:
18816+		hdmi->hdmi_output = DRM_HDMI_OUTPUT_YCBCR422;
18817+		hdmi->colordepth = 10;
18818+		break;
18819+	case MEDIA_BUS_FMT_UYVY8_1X16:
18820+		hdmi->hdmi_output = DRM_HDMI_OUTPUT_YCBCR422;
18821+		hdmi->colordepth = 8;
18822+		break;
18823+	case MEDIA_BUS_FMT_UYYVYY8_0_5X24:
18824+		hdmi->hdmi_output = DRM_HDMI_OUTPUT_YCBCR420;
18825+		hdmi->colordepth = 8;
18826+		break;
18827+	case MEDIA_BUS_FMT_UYYVYY10_0_5X30:
18828+		hdmi->hdmi_output = DRM_HDMI_OUTPUT_YCBCR420;
18829+		hdmi->colordepth = 10;
18830+		break;
18831+	default:
18832+		hdmi->hdmi_output = DRM_HDMI_OUTPUT_DEFAULT_RGB;
18833+		hdmi->colordepth = 8;
18834+	}
18835+
18836+	hdmi->bus_format = color;
18837+
18838+	if (hdmi->hdmi_output == DRM_HDMI_OUTPUT_YCBCR422) {
18839+		if (hdmi->colordepth == 12)
18840+			hdmi->output_bus_format = MEDIA_BUS_FMT_UYVY12_1X24;
18841+		else if (hdmi->colordepth == 10)
18842+			hdmi->output_bus_format = MEDIA_BUS_FMT_UYVY10_1X20;
18843+		else
18844+			hdmi->output_bus_format = MEDIA_BUS_FMT_UYVY8_1X16;
18845+	} else {
18846+		hdmi->output_bus_format = hdmi->bus_format;
18847+	}
18848+
18849+	/* RK3368 does not support deep color mode */
18850+	if (!hdmi->color_depth_property && !hdmi->unsupported_deep_color) {
18851+		prop = drm_property_create_enum(connector->dev, 0,
18852+						"hdmi_output_depth",
18853+						color_depth_enum_list,
18854+						ARRAY_SIZE(color_depth_enum_list));
18855+		if (prop) {
18856+			hdmi->color_depth_property = prop;
18857+			drm_object_attach_property(&connector->base, prop, 0);
18858+		}
18859+	}
18860+
18861+	prop = drm_property_create_enum(connector->dev, 0, "hdmi_output_format",
18862+					drm_hdmi_output_enum_list,
18863+					ARRAY_SIZE(drm_hdmi_output_enum_list));
18864+	if (prop) {
18865+		hdmi->hdmi_output_property = prop;
18866+		drm_object_attach_property(&connector->base, prop, 0);
18867+	}
18868+
18869+	prop = drm_property_create_enum(connector->dev, 0,
18870+					"hdmi_output_colorimetry",
18871+					colorimetry_enum_list,
18872+					ARRAY_SIZE(colorimetry_enum_list));
18873+	if (prop) {
18874+		hdmi->colorimetry_property = prop;
18875+		drm_object_attach_property(&connector->base, prop, 0);
18876+	}
18877+
18878+	prop = drm_property_create_range(connector->dev, DRM_MODE_PROP_IMMUTABLE,
18879+					 "hdmi_color_depth_capacity",
18880+					 0, 0xff);
18881+	if (prop) {
18882+		hdmi->colordepth_capacity = prop;
18883+		drm_object_attach_property(&connector->base, prop, 0);
18884+	}
18885+
18886+	prop = drm_property_create_range(connector->dev, DRM_MODE_PROP_IMMUTABLE,
18887+					 "hdmi_output_mode_capacity",
18888+					 0, 0xf);
18889+	if (prop) {
18890+		hdmi->outputmode_capacity = prop;
18891+		drm_object_attach_property(&connector->base, prop, 0);
18892+	}
18893+
18894+	prop = drm_property_create(connector->dev,
18895+				   DRM_MODE_PROP_BLOB |
18896+				   DRM_MODE_PROP_IMMUTABLE,
18897+				   "HDR_PANEL_METADATA", 0);
18898+	if (prop) {
18899+		hdmi->hdr_panel_metadata_property = prop;
18900+		drm_object_attach_property(&connector->base, prop, 0);
18901+	}
18902+
18903+	prop = drm_property_create(connector->dev,
18904+				   DRM_MODE_PROP_BLOB |
18905+				   DRM_MODE_PROP_IMMUTABLE,
18906+				   "NEXT_HDR_SINK_DATA", 0);
18907+	if (prop) {
18908+		hdmi->next_hdr_sink_data_property = prop;
18909+		drm_object_attach_property(&connector->base, prop, 0);
18910+	}
18911+
18912+	if (!hdmi->is_hdmi_qp) {
18913+		prop = drm_property_create_enum(connector->dev, 0,
18914+						"output_hdmi_dvi",
18915+						output_hdmi_dvi_enum_list,
18916+						ARRAY_SIZE(output_hdmi_dvi_enum_list));
18917+		if (prop) {
18918+			hdmi->output_hdmi_dvi = prop;
18919+			drm_object_attach_property(&connector->base, prop, 0);
18920+		}
18921+
18922+		prop = drm_property_create_enum(connector->dev, DRM_MODE_PROP_IMMUTABLE,
18923+						 "output_type_capacity",
18924+						 output_type_cap_list,
18925+						 ARRAY_SIZE(output_type_cap_list));
18926+		if (prop) {
18927+			hdmi->output_type_capacity = prop;
18928+			drm_object_attach_property(&connector->base, prop, 0);
18929+		}
18930+
18931+		prop = drm_property_create_enum(connector->dev, 0,
18932+						"hdmi_quant_range",
18933+						quant_range_enum_list,
18934+						ARRAY_SIZE(quant_range_enum_list));
18935+		if (prop) {
18936+			hdmi->quant_range = prop;
18937+			drm_object_attach_property(&connector->base, prop, 0);
18938+		}
18939+	}
18940+
18941+	prop = connector->dev->mode_config.hdr_output_metadata_property;
18942+	if (version >= 0x211a || hdmi->is_hdmi_qp)
18943+		drm_object_attach_property(&connector->base, prop, 0);
18944+	drm_object_attach_property(&connector->base, private->connector_id_prop, 0);
18945+}
18946+
18947+static void
18948+dw_hdmi_rockchip_destroy_properties(struct drm_connector *connector,
18949+				    void *data)
18950+{
18951+	struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
18952+
18953+	if (hdmi->color_depth_property) {
18954+		drm_property_destroy(connector->dev,
18955+				     hdmi->color_depth_property);
18956+		hdmi->color_depth_property = NULL;
18957+	}
18958+
18959+	if (hdmi->hdmi_output_property) {
18960+		drm_property_destroy(connector->dev,
18961+				     hdmi->hdmi_output_property);
18962+		hdmi->hdmi_output_property = NULL;
18963+	}
18964+
18965+	if (hdmi->colordepth_capacity) {
18966+		drm_property_destroy(connector->dev,
18967+				     hdmi->colordepth_capacity);
18968+		hdmi->colordepth_capacity = NULL;
18969 	}
18970-};
18971 
18972-static const struct dw_hdmi_curr_ctrl rockchip_cur_ctr[] = {
18973-	/*      pixelclk    bpp8    bpp10   bpp12 */
18974-	{
18975-		40000000,  { 0x0018, 0x0018, 0x0018 },
18976-	}, {
18977-		65000000,  { 0x0028, 0x0028, 0x0028 },
18978-	}, {
18979-		66000000,  { 0x0038, 0x0038, 0x0038 },
18980-	}, {
18981-		74250000,  { 0x0028, 0x0038, 0x0038 },
18982-	}, {
18983-		83500000,  { 0x0028, 0x0038, 0x0038 },
18984-	}, {
18985-		146250000, { 0x0038, 0x0038, 0x0038 },
18986-	}, {
18987-		148500000, { 0x0000, 0x0038, 0x0038 },
18988-	}, {
18989-		~0UL,      { 0x0000, 0x0000, 0x0000},
18990+	if (hdmi->outputmode_capacity) {
18991+		drm_property_destroy(connector->dev,
18992+				     hdmi->outputmode_capacity);
18993+		hdmi->outputmode_capacity = NULL;
18994 	}
18995-};
18996 
18997-static const struct dw_hdmi_phy_config rockchip_phy_config[] = {
18998-	/*pixelclk   symbol   term   vlev*/
18999-	{ 74250000,  0x8009, 0x0004, 0x0272},
19000-	{ 148500000, 0x802b, 0x0004, 0x028d},
19001-	{ 297000000, 0x8039, 0x0005, 0x028d},
19002-	{ ~0UL,	     0x0000, 0x0000, 0x0000}
19003-};
19004+	if (hdmi->quant_range) {
19005+		drm_property_destroy(connector->dev,
19006+				     hdmi->quant_range);
19007+		hdmi->quant_range = NULL;
19008+	}
19009 
19010-static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
19011-{
19012-	struct device_node *np = hdmi->dev->of_node;
19013+	if (hdmi->colorimetry_property) {
19014+		drm_property_destroy(connector->dev,
19015+				     hdmi->colorimetry_property);
19016+		hdmi->colordepth_capacity = NULL;
19017+	}
19018 
19019-	hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
19020-	if (IS_ERR(hdmi->regmap)) {
19021-		DRM_DEV_ERROR(hdmi->dev, "Unable to get rockchip,grf\n");
19022-		return PTR_ERR(hdmi->regmap);
19023+	if (hdmi->hdr_panel_metadata_property) {
19024+		drm_property_destroy(connector->dev,
19025+				     hdmi->hdr_panel_metadata_property);
19026+		hdmi->hdr_panel_metadata_property = NULL;
19027 	}
19028 
19029-	hdmi->vpll_clk = devm_clk_get(hdmi->dev, "vpll");
19030-	if (PTR_ERR(hdmi->vpll_clk) == -ENOENT) {
19031-		hdmi->vpll_clk = NULL;
19032-	} else if (PTR_ERR(hdmi->vpll_clk) == -EPROBE_DEFER) {
19033-		return -EPROBE_DEFER;
19034-	} else if (IS_ERR(hdmi->vpll_clk)) {
19035-		DRM_DEV_ERROR(hdmi->dev, "failed to get grf clock\n");
19036-		return PTR_ERR(hdmi->vpll_clk);
19037+	if (hdmi->next_hdr_sink_data_property) {
19038+		drm_property_destroy(connector->dev,
19039+				     hdmi->next_hdr_sink_data_property);
19040+		hdmi->next_hdr_sink_data_property = NULL;
19041 	}
19042 
19043-	hdmi->grf_clk = devm_clk_get(hdmi->dev, "grf");
19044-	if (PTR_ERR(hdmi->grf_clk) == -ENOENT) {
19045-		hdmi->grf_clk = NULL;
19046-	} else if (PTR_ERR(hdmi->grf_clk) == -EPROBE_DEFER) {
19047-		return -EPROBE_DEFER;
19048-	} else if (IS_ERR(hdmi->grf_clk)) {
19049-		DRM_DEV_ERROR(hdmi->dev, "failed to get grf clock\n");
19050-		return PTR_ERR(hdmi->grf_clk);
19051+	if (hdmi->output_hdmi_dvi) {
19052+		drm_property_destroy(connector->dev,
19053+				     hdmi->output_hdmi_dvi);
19054+		hdmi->output_hdmi_dvi = NULL;
19055 	}
19056 
19057-	return 0;
19058+	if (hdmi->output_type_capacity) {
19059+		drm_property_destroy(connector->dev,
19060+				     hdmi->output_type_capacity);
19061+		hdmi->output_type_capacity = NULL;
19062+	}
19063 }
19064 
19065-static enum drm_mode_status
19066-dw_hdmi_rockchip_mode_valid(struct dw_hdmi *hdmi, void *data,
19067-			    const struct drm_display_info *info,
19068-			    const struct drm_display_mode *mode)
19069+static int
19070+dw_hdmi_rockchip_set_property(struct drm_connector *connector,
19071+			      struct drm_connector_state *state,
19072+			      struct drm_property *property,
19073+			      u64 val,
19074+			      void *data)
19075 {
19076-	const struct dw_hdmi_mpll_config *mpll_cfg = rockchip_mpll_cfg;
19077-	int pclk = mode->clock * 1000;
19078-	bool valid = false;
19079-	int i;
19080-
19081-	for (i = 0; mpll_cfg[i].mpixelclock != (~0UL); i++) {
19082-		if (pclk == mpll_cfg[i].mpixelclock) {
19083-			valid = true;
19084-			break;
19085-		}
19086+	struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
19087+	struct drm_mode_config *config = &connector->dev->mode_config;
19088+
19089+	if (property == hdmi->color_depth_property) {
19090+		hdmi->colordepth = val;
19091+		/* If hdmi is disconnected, state->crtc is null */
19092+		if (!state->crtc)
19093+			return 0;
19094+		if (dw_hdmi_rockchip_check_color(state, hdmi))
19095+			hdmi->color_changed++;
19096+		return 0;
19097+	} else if (property == hdmi->hdmi_output_property) {
19098+		hdmi->hdmi_output = val;
19099+		if (!state->crtc)
19100+			return 0;
19101+		if (dw_hdmi_rockchip_check_color(state, hdmi))
19102+			hdmi->color_changed++;
19103+		return 0;
19104+	} else if (property == hdmi->quant_range) {
19105+		u64 quant_range = hdmi->hdmi_quant_range;
19106+
19107+		hdmi->hdmi_quant_range = val;
19108+		if (quant_range != hdmi->hdmi_quant_range)
19109+			dw_hdmi_set_quant_range(hdmi->hdmi);
19110+		return 0;
19111+	} else if (property == config->hdr_output_metadata_property) {
19112+		return 0;
19113+	} else if (property == hdmi->colorimetry_property) {
19114+		hdmi->colorimetry = val;
19115+		return 0;
19116+	} else if (property == hdmi->output_hdmi_dvi) {
19117+		if (hdmi->force_output != val)
19118+			hdmi->color_changed++;
19119+		hdmi->force_output = val;
19120+		dw_hdmi_set_output_type(hdmi->hdmi, val);
19121+		return 0;
19122 	}
19123 
19124-	return (valid) ? MODE_OK : MODE_BAD;
19125-}
19126+	DRM_ERROR("Unknown property [PROP:%d:%s]\n",
19127+		  property->base.id, property->name);
19128 
19129-static void dw_hdmi_rockchip_encoder_disable(struct drm_encoder *encoder)
19130-{
19131+	return -EINVAL;
19132 }
19133 
19134-static bool
19135-dw_hdmi_rockchip_encoder_mode_fixup(struct drm_encoder *encoder,
19136-				    const struct drm_display_mode *mode,
19137-				    struct drm_display_mode *adj_mode)
19138+static int
19139+dw_hdmi_rockchip_get_property(struct drm_connector *connector,
19140+			      const struct drm_connector_state *state,
19141+			      struct drm_property *property,
19142+			      u64 *val,
19143+			      void *data)
19144 {
19145-	return true;
19146-}
19147+	struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
19148+	struct drm_display_info *info = &connector->display_info;
19149+	struct drm_mode_config *config = &connector->dev->mode_config;
19150+	struct rockchip_drm_private *private = connector->dev->dev_private;
19151+
19152+	if (property == hdmi->color_depth_property) {
19153+		*val = hdmi->colordepth;
19154+		return 0;
19155+	} else if (property == hdmi->hdmi_output_property) {
19156+		*val = hdmi->hdmi_output;
19157+		return 0;
19158+	} else if (property == hdmi->colordepth_capacity) {
19159+		*val = BIT(ROCKCHIP_HDMI_DEPTH_8);
19160+		/* RK3368 only support 8bit */
19161+		if (hdmi->unsupported_deep_color)
19162+			return 0;
19163+		if (info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30)
19164+			*val |= BIT(ROCKCHIP_HDMI_DEPTH_10);
19165+		if (info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36)
19166+			*val |= BIT(ROCKCHIP_HDMI_DEPTH_12);
19167+		if (info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_48)
19168+			*val |= BIT(ROCKCHIP_HDMI_DEPTH_16);
19169+		if (info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
19170+			*val |= BIT(ROCKCHIP_HDMI_DEPTH_420_10);
19171+		if (info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
19172+			*val |= BIT(ROCKCHIP_HDMI_DEPTH_420_12);
19173+		if (info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
19174+			*val |= BIT(ROCKCHIP_HDMI_DEPTH_420_16);
19175+		return 0;
19176+	} else if (property == hdmi->outputmode_capacity) {
19177+		*val = BIT(DRM_HDMI_OUTPUT_DEFAULT_RGB);
19178+		if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
19179+			*val |= BIT(DRM_HDMI_OUTPUT_YCBCR444);
19180+		if (info->color_formats & DRM_COLOR_FORMAT_YCRCB422)
19181+			*val |= BIT(DRM_HDMI_OUTPUT_YCBCR422);
19182+		if (connector->ycbcr_420_allowed &&
19183+		    info->color_formats & DRM_COLOR_FORMAT_YCRCB420)
19184+			*val |= BIT(DRM_HDMI_OUTPUT_YCBCR420);
19185+		return 0;
19186+	} else if (property == hdmi->quant_range) {
19187+		*val = hdmi->hdmi_quant_range;
19188+		return 0;
19189+	} else if (property == config->hdr_output_metadata_property) {
19190+		*val = state->hdr_output_metadata ?
19191+			state->hdr_output_metadata->base.id : 0;
19192+		return 0;
19193+	} else if (property == hdmi->colorimetry_property) {
19194+		*val = hdmi->colorimetry;
19195+		return 0;
19196+	} else if (property == private->connector_id_prop) {
19197+		*val = hdmi->id;
19198+		return 0;
19199+	} else if (property == hdmi->output_hdmi_dvi) {
19200+		*val = hdmi->force_output;
19201+		return 0;
19202+	} else if (property == hdmi->output_type_capacity) {
19203+		*val = dw_hdmi_get_output_type_cap(hdmi->hdmi);
19204+		return 0;
19205+	}
19206 
19207-static void dw_hdmi_rockchip_encoder_mode_set(struct drm_encoder *encoder,
19208-					      struct drm_display_mode *mode,
19209-					      struct drm_display_mode *adj_mode)
19210-{
19211-	struct rockchip_hdmi *hdmi = to_rockchip_hdmi(encoder);
19212+	DRM_ERROR("Unknown property [PROP:%d:%s]\n",
19213+		  property->base.id, property->name);
19214 
19215-	clk_set_rate(hdmi->vpll_clk, adj_mode->clock * 1000);
19216+	return -EINVAL;
19217 }
19218 
19219-static void dw_hdmi_rockchip_encoder_enable(struct drm_encoder *encoder)
19220+static const struct dw_hdmi_property_ops dw_hdmi_rockchip_property_ops = {
19221+	.attach_properties	= dw_hdmi_rockchip_attach_properties,
19222+	.destroy_properties	= dw_hdmi_rockchip_destroy_properties,
19223+	.set_property		= dw_hdmi_rockchip_set_property,
19224+	.get_property		= dw_hdmi_rockchip_get_property,
19225+};
19226+
19227+static void dw_hdmi_rockchip_encoder_mode_set(struct drm_encoder *encoder,
19228+					      struct drm_display_mode *mode,
19229+					      struct drm_display_mode *adj)
19230 {
19231 	struct rockchip_hdmi *hdmi = to_rockchip_hdmi(encoder);
19232-	u32 val;
19233-	int ret;
19234-
19235-	if (hdmi->chip_data->lcdsel_grf_reg < 0)
19236+	struct drm_crtc *crtc;
19237+	struct rockchip_crtc_state *s;
19238+	if (!encoder->crtc)
19239 		return;
19240-
19241-	ret = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder);
19242-	if (ret)
19243-		val = hdmi->chip_data->lcdsel_lit;
19244-	else
19245-		val = hdmi->chip_data->lcdsel_big;
19246-
19247-	ret = clk_prepare_enable(hdmi->grf_clk);
19248-	if (ret < 0) {
19249-		DRM_DEV_ERROR(hdmi->dev, "failed to enable grfclk %d\n", ret);
19250+	crtc = encoder->crtc;
19251+	if (!crtc->state)
19252 		return;
19253-	}
19254-
19255-	ret = regmap_write(hdmi->regmap, hdmi->chip_data->lcdsel_grf_reg, val);
19256-	if (ret != 0)
19257-		DRM_DEV_ERROR(hdmi->dev, "Could not write to GRF: %d\n", ret);
19258-
19259-	clk_disable_unprepare(hdmi->grf_clk);
19260-	DRM_DEV_DEBUG(hdmi->dev, "vop %s output to hdmi\n",
19261-		      ret ? "LIT" : "BIG");
19262-}
19263+	s = to_rockchip_crtc_state(crtc->state);
19264 
19265-static int
19266-dw_hdmi_rockchip_encoder_atomic_check(struct drm_encoder *encoder,
19267-				      struct drm_crtc_state *crtc_state,
19268-				      struct drm_connector_state *conn_state)
19269-{
19270-	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
19271+	if (!s)
19272+		return;
19273+	if (hdmi->is_hdmi_qp) {
19274+		s->dsc_enable = 0;
19275+		if (hdmi->link_cfg.dsc_mode)
19276+			dw_hdmi_qp_dsc_configure(hdmi, s, crtc->state);
19277 
19278-	s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
19279-	s->output_type = DRM_MODE_CONNECTOR_HDMIA;
19280+		phy_set_bus_width(hdmi->phy, hdmi->phy_bus_width);
19281+	}
19282 
19283-	return 0;
19284+	clk_set_rate(hdmi->phyref_clk, adj->crtc_clock * 1000);
19285 }
19286 
19287 static const struct drm_encoder_helper_funcs dw_hdmi_rockchip_encoder_helper_funcs = {
19288-	.mode_fixup = dw_hdmi_rockchip_encoder_mode_fixup,
19289-	.mode_set   = dw_hdmi_rockchip_encoder_mode_set,
19290 	.enable     = dw_hdmi_rockchip_encoder_enable,
19291 	.disable    = dw_hdmi_rockchip_encoder_disable,
19292 	.atomic_check = dw_hdmi_rockchip_encoder_atomic_check,
19293+	.mode_set = dw_hdmi_rockchip_encoder_mode_set,
19294 };
19295 
19296-static int dw_hdmi_rockchip_genphy_init(struct dw_hdmi *dw_hdmi, void *data,
19297-					const struct drm_display_info *display,
19298-					const struct drm_display_mode *mode)
19299+static void
19300+dw_hdmi_rockchip_genphy_disable(struct dw_hdmi *dw_hdmi, void *data)
19301 {
19302 	struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
19303 
19304-	return phy_power_on(hdmi->phy);
19305+	while (hdmi->phy->power_count > 0)
19306+		phy_power_off(hdmi->phy);
19307 }
19308 
19309-static void dw_hdmi_rockchip_genphy_disable(struct dw_hdmi *dw_hdmi, void *data)
19310+static int
19311+dw_hdmi_rockchip_genphy_init(struct dw_hdmi *dw_hdmi, void *data,
19312+			     const struct drm_display_info *display,
19313+			     const struct drm_display_mode *mode)
19314 {
19315 	struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
19316 
19317-	phy_power_off(hdmi->phy);
19318+	dw_hdmi_rockchip_genphy_disable(dw_hdmi, data);
19319+	dw_hdmi_set_high_tmds_clock_ratio(dw_hdmi, display);
19320+	return phy_power_on(hdmi->phy);
19321 }
19322 
19323 static void dw_hdmi_rk3228_setup_hpd(struct dw_hdmi *dw_hdmi, void *data)
19324@@ -391,6 +2609,90 @@ static void dw_hdmi_rk3328_setup_hpd(struct dw_hdmi *dw_hdmi, void *data)
19325 			      RK3328_HDMI_HPD_IOE));
19326 }
19327 
19328+static void dw_hdmi_qp_rockchip_phy_disable(struct dw_hdmi_qp *dw_hdmi,
19329+					    void *data)
19330+{
19331+	struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
19332+
19333+	while (hdmi->phy->power_count > 0)
19334+		phy_power_off(hdmi->phy);
19335+}
19336+
19337+static int dw_hdmi_qp_rockchip_genphy_init(struct dw_hdmi_qp *dw_hdmi, void *data,
19338+					   struct drm_display_mode *mode)
19339+{
19340+	struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
19341+
19342+	dw_hdmi_qp_rockchip_phy_disable(dw_hdmi, data);
19343+
19344+	return phy_power_on(hdmi->phy);
19345+}
19346+
19347+static enum drm_connector_status
19348+dw_hdmi_rk3588_read_hpd(struct dw_hdmi_qp *dw_hdmi, void *data)
19349+{
19350+	u32 val;
19351+	int ret;
19352+	struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
19353+
19354+	regmap_read(hdmi->regmap, RK3588_GRF_SOC_STATUS1, &val);
19355+
19356+	if (!hdmi->id) {
19357+		if (val & RK3588_HDMI0_LEVEL_INT) {
19358+			hdmi->hpd_stat = true;
19359+			ret = connector_status_connected;
19360+		} else {
19361+			hdmi->hpd_stat = false;
19362+			ret = connector_status_disconnected;
19363+		}
19364+	} else {
19365+		if (val & RK3588_HDMI1_LEVEL_INT) {
19366+			hdmi->hpd_stat = true;
19367+			ret = connector_status_connected;
19368+		} else {
19369+			hdmi->hpd_stat = false;
19370+			ret = connector_status_disconnected;
19371+		}
19372+	}
19373+
19374+	return ret;
19375+}
19376+
19377+static void dw_hdmi_rk3588_setup_hpd(struct dw_hdmi_qp *dw_hdmi, void *data)
19378+{
19379+	struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
19380+	u32 val;
19381+
19382+	if (!hdmi->id) {
19383+		val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_CLR,
19384+				    RK3588_HDMI0_HPD_INT_CLR) |
19385+		      HIWORD_UPDATE(0, RK3588_HDMI0_HPD_INT_MSK);
19386+	} else {
19387+		val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_CLR,
19388+				    RK3588_HDMI1_HPD_INT_CLR) |
19389+		      HIWORD_UPDATE(0, RK3588_HDMI1_HPD_INT_MSK);
19390+	}
19391+
19392+	regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
19393+}
19394+
19395+static void dw_hdmi_rk3588_phy_set_mode(struct dw_hdmi_qp *dw_hdmi, void *data,
19396+					u32 mode_mask, bool enable)
19397+{
19398+	struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
19399+
19400+	if (!hdmi->phy)
19401+		return;
19402+
19403+	/* set phy earc/frl mode */
19404+	if (enable)
19405+		hdmi->phy_bus_width |= mode_mask;
19406+	else
19407+		hdmi->phy_bus_width &= ~mode_mask;
19408+
19409+	phy_set_bus_width(hdmi->phy, hdmi->phy_bus_width);
19410+}
19411+
19412 static const struct dw_hdmi_phy_ops rk3228_hdmi_phy_ops = {
19413 	.init		= dw_hdmi_rockchip_genphy_init,
19414 	.disable	= dw_hdmi_rockchip_genphy_disable,
19415@@ -412,6 +2714,8 @@ static const struct dw_hdmi_plat_data rk3228_hdmi_drv_data = {
19416 	.phy_ops = &rk3228_hdmi_phy_ops,
19417 	.phy_name = "inno_dw_hdmi_phy2",
19418 	.phy_force_vendor = true,
19419+	.max_tmdsclk = 371250,
19420+	.ycbcr_420_allowed = true,
19421 };
19422 
19423 static struct rockchip_hdmi_chip_data rk3288_chip_data = {
19424@@ -423,9 +2727,13 @@ static struct rockchip_hdmi_chip_data rk3288_chip_data = {
19425 static const struct dw_hdmi_plat_data rk3288_hdmi_drv_data = {
19426 	.mode_valid = dw_hdmi_rockchip_mode_valid,
19427 	.mpll_cfg   = rockchip_mpll_cfg,
19428+	.mpll_cfg_420 = rockchip_rk3288w_mpll_cfg_420,
19429 	.cur_ctr    = rockchip_cur_ctr,
19430 	.phy_config = rockchip_phy_config,
19431 	.phy_data = &rk3288_chip_data,
19432+	.tmds_n_table = rockchip_werid_tmds_n_table,
19433+	.unsupported_yuv_input = true,
19434+	.ycbcr_420_allowed = true,
19435 };
19436 
19437 static const struct dw_hdmi_phy_ops rk3328_hdmi_phy_ops = {
19438@@ -450,6 +2758,24 @@ static const struct dw_hdmi_plat_data rk3328_hdmi_drv_data = {
19439 	.phy_name = "inno_dw_hdmi_phy2",
19440 	.phy_force_vendor = true,
19441 	.use_drm_infoframe = true,
19442+	.max_tmdsclk = 371250,
19443+	.ycbcr_420_allowed = true,
19444+};
19445+
19446+static struct rockchip_hdmi_chip_data rk3368_chip_data = {
19447+	.lcdsel_grf_reg = -1,
19448+};
19449+
19450+static const struct dw_hdmi_plat_data rk3368_hdmi_drv_data = {
19451+	.mode_valid = dw_hdmi_rockchip_mode_valid,
19452+	.mpll_cfg   = rockchip_mpll_cfg,
19453+	.mpll_cfg_420 = rockchip_mpll_cfg_420,
19454+	.cur_ctr    = rockchip_cur_ctr,
19455+	.phy_config = rockchip_phy_config,
19456+	.phy_data = &rk3368_chip_data,
19457+	.unsupported_deep_color = true,
19458+	.max_tmdsclk = 340000,
19459+	.ycbcr_420_allowed = true,
19460 };
19461 
19462 static struct rockchip_hdmi_chip_data rk3399_chip_data = {
19463@@ -461,10 +2787,51 @@ static struct rockchip_hdmi_chip_data rk3399_chip_data = {
19464 static const struct dw_hdmi_plat_data rk3399_hdmi_drv_data = {
19465 	.mode_valid = dw_hdmi_rockchip_mode_valid,
19466 	.mpll_cfg   = rockchip_mpll_cfg,
19467+	.mpll_cfg_420 = rockchip_mpll_cfg_420,
19468 	.cur_ctr    = rockchip_cur_ctr,
19469 	.phy_config = rockchip_phy_config,
19470 	.phy_data = &rk3399_chip_data,
19471 	.use_drm_infoframe = true,
19472+	.ycbcr_420_allowed = true,
19473+};
19474+
19475+static struct rockchip_hdmi_chip_data rk3568_chip_data = {
19476+	.lcdsel_grf_reg = -1,
19477+	.ddc_en_reg = RK3568_GRF_VO_CON1,
19478+};
19479+
19480+static const struct dw_hdmi_plat_data rk3568_hdmi_drv_data = {
19481+	.mode_valid = dw_hdmi_rockchip_mode_valid,
19482+	.mpll_cfg   = rockchip_mpll_cfg,
19483+	.mpll_cfg_420 = rockchip_mpll_cfg_420,
19484+	.cur_ctr    = rockchip_cur_ctr,
19485+	.phy_config = rockchip_phy_config,
19486+	.phy_data = &rk3568_chip_data,
19487+	.ycbcr_420_allowed = true,
19488+	.use_drm_infoframe = true,
19489+};
19490+
19491+static const struct dw_hdmi_qp_phy_ops rk3588_hdmi_phy_ops = {
19492+	.init		= dw_hdmi_qp_rockchip_genphy_init,
19493+	.disable	= dw_hdmi_qp_rockchip_phy_disable,
19494+	.read_hpd	= dw_hdmi_rk3588_read_hpd,
19495+	.setup_hpd	= dw_hdmi_rk3588_setup_hpd,
19496+	.set_mode       = dw_hdmi_rk3588_phy_set_mode,
19497+};
19498+
19499+struct rockchip_hdmi_chip_data rk3588_hdmi_chip_data = {
19500+	.lcdsel_grf_reg = -1,
19501+	.ddc_en_reg = RK3588_GRF_VO1_CON3,
19502+};
19503+
19504+static const struct dw_hdmi_plat_data rk3588_hdmi_drv_data = {
19505+	.phy_data = &rk3588_hdmi_chip_data,
19506+	.qp_phy_ops = &rk3588_hdmi_phy_ops,
19507+	.phy_name = "samsung_hdptx_phy",
19508+	.phy_force_vendor = true,
19509+	.ycbcr_420_allowed = true,
19510+	.is_hdmi_qp = true,
19511+	.use_drm_infoframe = true,
19512 };
19513 
19514 static const struct of_device_id dw_hdmi_rockchip_dt_ids[] = {
19515@@ -477,9 +2844,19 @@ static const struct of_device_id dw_hdmi_rockchip_dt_ids[] = {
19516 	{ .compatible = "rockchip,rk3328-dw-hdmi",
19517 	  .data = &rk3328_hdmi_drv_data
19518 	},
19519+	{
19520+	 .compatible = "rockchip,rk3368-dw-hdmi",
19521+	 .data = &rk3368_hdmi_drv_data
19522+	},
19523 	{ .compatible = "rockchip,rk3399-dw-hdmi",
19524 	  .data = &rk3399_hdmi_drv_data
19525 	},
19526+	{ .compatible = "rockchip,rk3568-dw-hdmi",
19527+	  .data = &rk3568_hdmi_drv_data
19528+	},
19529+	{ .compatible = "rockchip,rk3588-dw-hdmi",
19530+	  .data = &rk3588_hdmi_drv_data
19531+	},
19532 	{},
19533 };
19534 MODULE_DEVICE_TABLE(of, dw_hdmi_rockchip_dt_ids);
19535@@ -493,7 +2870,8 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
19536 	struct drm_device *drm = data;
19537 	struct drm_encoder *encoder;
19538 	struct rockchip_hdmi *hdmi;
19539-	int ret;
19540+	int ret, id;
19541+	u32 val;
19542 
19543 	if (!pdev->dev.of_node)
19544 		return -ENODEV;
19545@@ -508,12 +2886,44 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
19546 	if (!plat_data)
19547 		return -ENOMEM;
19548 
19549+	id = of_alias_get_id(dev->of_node, "hdmi");
19550+	if (id < 0)
19551+		id = 0;
19552+	hdmi->id = id;
19553 	hdmi->dev = &pdev->dev;
19554 	hdmi->chip_data = plat_data->phy_data;
19555+
19556 	plat_data->phy_data = hdmi;
19557+	plat_data->get_input_bus_format =
19558+		dw_hdmi_rockchip_get_input_bus_format;
19559+	plat_data->get_output_bus_format =
19560+		dw_hdmi_rockchip_get_output_bus_format;
19561+	plat_data->get_enc_in_encoding =
19562+		dw_hdmi_rockchip_get_enc_in_encoding;
19563+	plat_data->get_enc_out_encoding =
19564+		dw_hdmi_rockchip_get_enc_out_encoding;
19565+	plat_data->get_quant_range =
19566+		dw_hdmi_rockchip_get_quant_range;
19567+	plat_data->get_hdr_property =
19568+		dw_hdmi_rockchip_get_hdr_property;
19569+	plat_data->get_hdr_blob =
19570+		dw_hdmi_rockchip_get_hdr_blob;
19571+	plat_data->get_color_changed =
19572+		dw_hdmi_rockchip_get_color_changed;
19573+	plat_data->get_yuv422_format =
19574+		dw_hdmi_rockchip_get_yuv422_format;
19575+	plat_data->get_edid_dsc_info =
19576+		dw_hdmi_rockchip_get_edid_dsc_info;
19577+	plat_data->get_next_hdr_data =
19578+		dw_hdmi_rockchip_get_next_hdr_data;
19579+	plat_data->get_link_cfg = dw_hdmi_rockchip_get_link_cfg;
19580+	plat_data->set_grf_cfg = rk3588_set_grf_cfg;
19581+
19582+	plat_data->property_ops = &dw_hdmi_rockchip_property_ops;
19583+
19584 	encoder = &hdmi->encoder;
19585 
19586-	encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
19587+	encoder->possible_crtcs = rockchip_drm_of_find_possible_crtcs(drm, dev->of_node);
19588 	/*
19589 	 * If we failed to find the CRTC(s) which this encoder is
19590 	 * supposed to be connected to, it's because the CRTC has
19591@@ -523,25 +2933,150 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
19592 	if (encoder->possible_crtcs == 0)
19593 		return -EPROBE_DEFER;
19594 
19595+	if (!plat_data->max_tmdsclk)
19596+		hdmi->max_tmdsclk = 594000;
19597+	else
19598+		hdmi->max_tmdsclk = plat_data->max_tmdsclk;
19599+
19600+	hdmi->is_hdmi_qp = plat_data->is_hdmi_qp;
19601+
19602+	hdmi->unsupported_yuv_input = plat_data->unsupported_yuv_input;
19603+	hdmi->unsupported_deep_color = plat_data->unsupported_deep_color;
19604+
19605 	ret = rockchip_hdmi_parse_dt(hdmi);
19606 	if (ret) {
19607 		DRM_DEV_ERROR(hdmi->dev, "Unable to parse OF data\n");
19608 		return ret;
19609 	}
19610 
19611-	ret = clk_prepare_enable(hdmi->vpll_clk);
19612+	ret = clk_prepare_enable(hdmi->aud_clk);
19613+	if (ret) {
19614+		dev_err(hdmi->dev, "Failed to enable HDMI aud_clk: %d\n", ret);
19615+		return ret;
19616+	}
19617+
19618+	ret = clk_prepare_enable(hdmi->hpd_clk);
19619+	if (ret) {
19620+		dev_err(hdmi->dev, "Failed to enable HDMI hpd_clk: %d\n", ret);
19621+		return ret;
19622+	}
19623+
19624+	ret = clk_prepare_enable(hdmi->hclk_vo1);
19625+	if (ret) {
19626+		dev_err(hdmi->dev, "Failed to enable HDMI hclk_vo1: %d\n", ret);
19627+		return ret;
19628+	}
19629+
19630+	ret = clk_prepare_enable(hdmi->earc_clk);
19631+	if (ret) {
19632+		dev_err(hdmi->dev, "Failed to enable HDMI earc_clk: %d\n", ret);
19633+		return ret;
19634+	}
19635+
19636+	ret = clk_prepare_enable(hdmi->hdmitx_ref);
19637+	if (ret) {
19638+		dev_err(hdmi->dev, "Failed to enable HDMI hdmitx_ref: %d\n",
19639+			ret);
19640+		return ret;
19641+	}
19642+
19643+	ret = clk_prepare_enable(hdmi->pclk);
19644+	if (ret) {
19645+		dev_err(hdmi->dev, "Failed to enable HDMI pclk: %d\n", ret);
19646+		return ret;
19647+	}
19648+
19649+	if (hdmi->chip_data->ddc_en_reg == RK3568_GRF_VO_CON1) {
19650+		regmap_write(hdmi->regmap, RK3568_GRF_VO_CON1,
19651+			     HIWORD_UPDATE(RK3568_HDMI_SDAIN_MSK |
19652+					   RK3568_HDMI_SCLIN_MSK,
19653+					   RK3568_HDMI_SDAIN_MSK |
19654+					   RK3568_HDMI_SCLIN_MSK));
19655+	}
19656+
19657+	if (hdmi->is_hdmi_qp) {
19658+		if (!hdmi->id) {
19659+			val = HIWORD_UPDATE(RK3588_SCLIN_MASK, RK3588_SCLIN_MASK) |
19660+			      HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) |
19661+			      HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) |
19662+			      HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK);
19663+			regmap_write(hdmi->vo1_regmap, RK3588_GRF_VO1_CON3, val);
19664+
19665+			val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK,
19666+					    RK3588_SET_HPD_PATH_MASK);
19667+			regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val);
19668+
19669+			val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL,
19670+					    RK3588_HDMI0_GRANT_SEL);
19671+			regmap_write(hdmi->vo1_regmap, RK3588_GRF_VO1_CON9, val);
19672+		} else {
19673+			val = HIWORD_UPDATE(RK3588_SCLIN_MASK, RK3588_SCLIN_MASK) |
19674+			      HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) |
19675+			      HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) |
19676+			      HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK);
19677+			regmap_write(hdmi->vo1_regmap, RK3588_GRF_VO1_CON6, val);
19678+
19679+			val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK,
19680+					    RK3588_SET_HPD_PATH_MASK);
19681+			regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val);
19682+
19683+			val = HIWORD_UPDATE(RK3588_HDMI1_GRANT_SEL,
19684+					    RK3588_HDMI1_GRANT_SEL);
19685+			regmap_write(hdmi->vo1_regmap, RK3588_GRF_VO1_CON9, val);
19686+		}
19687+		init_hpd_work(hdmi);
19688+	}
19689+
19690+	ret = clk_prepare_enable(hdmi->phyref_clk);
19691 	if (ret) {
19692 		DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n",
19693 			      ret);
19694 		return ret;
19695 	}
19696 
19697+	ret = clk_prepare_enable(hdmi->hclk_vio);
19698+	if (ret) {
19699+		dev_err(hdmi->dev, "Failed to enable HDMI hclk_vio: %d\n",
19700+			ret);
19701+		return ret;
19702+	}
19703+
19704+	ret = clk_prepare_enable(hdmi->hclk_vop);
19705+	if (ret) {
19706+		dev_err(hdmi->dev, "Failed to enable HDMI hclk_vop: %d\n",
19707+			ret);
19708+		return ret;
19709+	}
19710+
19711+	if (!hdmi->id)
19712+		val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK, RK3588_HDMI0_HPD_INT_MSK);
19713+	else
19714+		val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_MSK, RK3588_HDMI1_HPD_INT_MSK);
19715+	regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
19716+
19717+	if (hdmi->is_hdmi_qp) {
19718+		hdmi->hpd_irq = platform_get_irq(pdev, 4);
19719+		if (hdmi->hpd_irq < 0)
19720+			return hdmi->hpd_irq;
19721+
19722+		ret = devm_request_threaded_irq(hdmi->dev, hdmi->hpd_irq,
19723+						rockchip_hdmi_hardirq,
19724+						rockchip_hdmi_irq,
19725+						IRQF_SHARED, "dw-hdmi-qp-hpd",
19726+						hdmi);
19727+		if (ret)
19728+			return ret;
19729+	}
19730+
19731 	hdmi->phy = devm_phy_optional_get(dev, "hdmi");
19732 	if (IS_ERR(hdmi->phy)) {
19733-		ret = PTR_ERR(hdmi->phy);
19734-		if (ret != -EPROBE_DEFER)
19735-			DRM_DEV_ERROR(hdmi->dev, "failed to get phy\n");
19736-		return ret;
19737+		hdmi->phy = devm_phy_optional_get(dev, "hdmi_phy");
19738+		if (IS_ERR(hdmi->phy)) {
19739+			ret = PTR_ERR(hdmi->phy);
19740+			if (ret != -EPROBE_DEFER)
19741+				DRM_DEV_ERROR(hdmi->dev, "failed to get phy\n");
19742+			return ret;
19743+		}
19744 	}
19745 
19746 	drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
19747@@ -549,6 +3084,23 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
19748 
19749 	platform_set_drvdata(pdev, hdmi);
19750 
19751+	if (hdmi->is_hdmi_qp) {
19752+		hdmi->hdmi_qp = dw_hdmi_qp_bind(pdev, encoder, plat_data);
19753+
19754+		if (IS_ERR(hdmi->hdmi_qp)) {
19755+			ret = PTR_ERR(hdmi->hdmi_qp);
19756+			drm_encoder_cleanup(encoder);
19757+		}
19758+
19759+		if (plat_data->connector) {
19760+			hdmi->sub_dev.connector = plat_data->connector;
19761+			hdmi->sub_dev.of_node = dev->of_node;
19762+			rockchip_drm_register_sub_dev(&hdmi->sub_dev);
19763+		}
19764+
19765+		return ret;
19766+	}
19767+
19768 	hdmi->hdmi = dw_hdmi_bind(pdev, encoder, plat_data);
19769 
19770 	/*
19771@@ -558,7 +3110,20 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
19772 	if (IS_ERR(hdmi->hdmi)) {
19773 		ret = PTR_ERR(hdmi->hdmi);
19774 		drm_encoder_cleanup(encoder);
19775-		clk_disable_unprepare(hdmi->vpll_clk);
19776+		clk_disable_unprepare(hdmi->aud_clk);
19777+		clk_disable_unprepare(hdmi->phyref_clk);
19778+		clk_disable_unprepare(hdmi->hclk_vop);
19779+		clk_disable_unprepare(hdmi->hpd_clk);
19780+		clk_disable_unprepare(hdmi->hclk_vo1);
19781+		clk_disable_unprepare(hdmi->earc_clk);
19782+		clk_disable_unprepare(hdmi->hdmitx_ref);
19783+		clk_disable_unprepare(hdmi->pclk);
19784+	}
19785+
19786+	if (plat_data->connector) {
19787+		hdmi->sub_dev.connector = plat_data->connector;
19788+		hdmi->sub_dev.of_node = dev->of_node;
19789+		rockchip_drm_register_sub_dev(&hdmi->sub_dev);
19790 	}
19791 
19792 	return ret;
19793@@ -569,8 +3134,27 @@ static void dw_hdmi_rockchip_unbind(struct device *dev, struct device *master,
19794 {
19795 	struct rockchip_hdmi *hdmi = dev_get_drvdata(dev);
19796 
19797-	dw_hdmi_unbind(hdmi->hdmi);
19798-	clk_disable_unprepare(hdmi->vpll_clk);
19799+	if (hdmi->is_hdmi_qp) {
19800+		cancel_delayed_work(&hdmi->work);
19801+		flush_workqueue(hdmi->workqueue);
19802+		destroy_workqueue(hdmi->workqueue);
19803+	}
19804+
19805+	if (hdmi->sub_dev.connector)
19806+		rockchip_drm_unregister_sub_dev(&hdmi->sub_dev);
19807+
19808+	if (hdmi->is_hdmi_qp)
19809+		dw_hdmi_qp_unbind(hdmi->hdmi_qp);
19810+	else
19811+		dw_hdmi_unbind(hdmi->hdmi);
19812+	clk_disable_unprepare(hdmi->aud_clk);
19813+	clk_disable_unprepare(hdmi->phyref_clk);
19814+	clk_disable_unprepare(hdmi->hclk_vop);
19815+	clk_disable_unprepare(hdmi->hpd_clk);
19816+	clk_disable_unprepare(hdmi->hclk_vo1);
19817+	clk_disable_unprepare(hdmi->earc_clk);
19818+	clk_disable_unprepare(hdmi->hdmitx_ref);
19819+	clk_disable_unprepare(hdmi->pclk);
19820 }
19821 
19822 static const struct component_ops dw_hdmi_rockchip_ops = {
19823@@ -580,32 +3164,105 @@ static const struct component_ops dw_hdmi_rockchip_ops = {
19824 
19825 static int dw_hdmi_rockchip_probe(struct platform_device *pdev)
19826 {
19827+	pm_runtime_enable(&pdev->dev);
19828+	pm_runtime_get_sync(&pdev->dev);
19829+
19830 	return component_add(&pdev->dev, &dw_hdmi_rockchip_ops);
19831 }
19832 
19833+static void dw_hdmi_rockchip_shutdown(struct platform_device *pdev)
19834+{
19835+	struct rockchip_hdmi *hdmi = dev_get_drvdata(&pdev->dev);
19836+
19837+	if (!hdmi)
19838+		return;
19839+
19840+	if (hdmi->is_hdmi_qp) {
19841+		cancel_delayed_work(&hdmi->work);
19842+		flush_workqueue(hdmi->workqueue);
19843+		dw_hdmi_qp_suspend(hdmi->dev, hdmi->hdmi_qp);
19844+	} else {
19845+		dw_hdmi_suspend(hdmi->hdmi);
19846+	}
19847+	pm_runtime_put_sync(&pdev->dev);
19848+}
19849+
19850 static int dw_hdmi_rockchip_remove(struct platform_device *pdev)
19851 {
19852 	component_del(&pdev->dev, &dw_hdmi_rockchip_ops);
19853+	pm_runtime_disable(&pdev->dev);
19854+
19855+	return 0;
19856+}
19857+
19858+static int dw_hdmi_rockchip_suspend(struct device *dev)
19859+{
19860+	struct rockchip_hdmi *hdmi = dev_get_drvdata(dev);
19861+
19862+	if (hdmi->is_hdmi_qp)
19863+		dw_hdmi_qp_suspend(dev, hdmi->hdmi_qp);
19864+	else
19865+		dw_hdmi_suspend(hdmi->hdmi);
19866+	pm_runtime_put_sync(dev);
19867 
19868 	return 0;
19869 }
19870 
19871-static int __maybe_unused dw_hdmi_rockchip_resume(struct device *dev)
19872+static int dw_hdmi_rockchip_resume(struct device *dev)
19873 {
19874 	struct rockchip_hdmi *hdmi = dev_get_drvdata(dev);
19875+	u32 val;
19876+
19877+	if (hdmi->is_hdmi_qp) {
19878+		if (!hdmi->id) {
19879+			val = HIWORD_UPDATE(RK3588_SCLIN_MASK, RK3588_SCLIN_MASK) |
19880+			      HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) |
19881+			      HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) |
19882+			      HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK);
19883+			regmap_write(hdmi->vo1_regmap, RK3588_GRF_VO1_CON3, val);
19884+
19885+			val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK,
19886+					    RK3588_SET_HPD_PATH_MASK);
19887+			regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val);
19888+
19889+			val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL,
19890+					    RK3588_HDMI0_GRANT_SEL);
19891+			regmap_write(hdmi->vo1_regmap, RK3588_GRF_VO1_CON9, val);
19892+		} else {
19893+			val = HIWORD_UPDATE(RK3588_SCLIN_MASK, RK3588_SCLIN_MASK) |
19894+			      HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) |
19895+			      HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) |
19896+			      HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK);
19897+			regmap_write(hdmi->vo1_regmap, RK3588_GRF_VO1_CON6, val);
19898+
19899+			val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK,
19900+					    RK3588_SET_HPD_PATH_MASK);
19901+			regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val);
19902+
19903+			val = HIWORD_UPDATE(RK3588_HDMI1_GRANT_SEL,
19904+					    RK3588_HDMI1_GRANT_SEL);
19905+			regmap_write(hdmi->vo1_regmap, RK3588_GRF_VO1_CON9, val);
19906+		}
19907 
19908-	dw_hdmi_resume(hdmi->hdmi);
19909+		dw_hdmi_qp_resume(dev, hdmi->hdmi_qp);
19910+		drm_helper_hpd_irq_event(hdmi->encoder.dev);
19911+	} else {
19912+		dw_hdmi_resume(hdmi->hdmi);
19913+	}
19914+	pm_runtime_get_sync(dev);
19915 
19916 	return 0;
19917 }
19918 
19919 static const struct dev_pm_ops dw_hdmi_rockchip_pm = {
19920-	SET_SYSTEM_SLEEP_PM_OPS(NULL, dw_hdmi_rockchip_resume)
19921+	SET_SYSTEM_SLEEP_PM_OPS(dw_hdmi_rockchip_suspend,
19922+				dw_hdmi_rockchip_resume)
19923 };
19924 
19925 struct platform_driver dw_hdmi_rockchip_pltfm_driver = {
19926 	.probe  = dw_hdmi_rockchip_probe,
19927 	.remove = dw_hdmi_rockchip_remove,
19928+	.shutdown = dw_hdmi_rockchip_shutdown,
19929 	.driver = {
19930 		.name = "dwhdmi-rockchip",
19931 		.pm = &dw_hdmi_rockchip_pm,
19932diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
19933index 78120da5e..898a99fe1 100644
19934--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
19935+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
19936@@ -602,7 +602,7 @@ static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi)
19937 	struct drm_encoder *encoder = &hdmi->encoder;
19938 	struct device *dev = hdmi->dev;
19939 
19940-	encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
19941+	encoder->possible_crtcs = rockchip_drm_of_find_possible_crtcs(drm, dev->of_node);
19942 
19943 	/*
19944 	 * If we failed to find the CRTC(s) which this encoder is
19945diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
19946index 0f3eb392f..35944088d 100644
19947--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
19948+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
19949@@ -6,17 +6,24 @@
19950  * based on exynos_drm_drv.c
19951  */
19952 
19953+#include <linux/dma-buf-cache.h>
19954 #include <linux/dma-mapping.h>
19955 #include <linux/dma-iommu.h>
19956+#include <linux/genalloc.h>
19957 #include <linux/pm_runtime.h>
19958 #include <linux/module.h>
19959+#include <linux/of_address.h>
19960 #include <linux/of_graph.h>
19961 #include <linux/of_platform.h>
19962+#include <linux/clk.h>
19963 #include <linux/component.h>
19964 #include <linux/console.h>
19965 #include <linux/iommu.h>
19966+#include <linux/of_reserved_mem.h>
19967 
19968+#include <drm/drm_debugfs.h>
19969 #include <drm/drm_drv.h>
19970+#include <drm/drm_displayid.h>
19971 #include <drm/drm_fb_helper.h>
19972 #include <drm/drm_gem_cma_helper.h>
19973 #include <drm/drm_of.h>
19974@@ -27,16 +34,820 @@
19975 #include "rockchip_drm_fb.h"
19976 #include "rockchip_drm_fbdev.h"
19977 #include "rockchip_drm_gem.h"
19978+#include <drm/rockchip_drm_logo.h>
19979+
19980+#include "../drm_crtc_internal.h"
19981 
19982 #define DRIVER_NAME	"rockchip"
19983 #define DRIVER_DESC	"RockChip Soc DRM"
19984 #define DRIVER_DATE	"20140818"
19985-#define DRIVER_MAJOR	1
19986+#define DRIVER_MAJOR	3
19987 #define DRIVER_MINOR	0
19988 
19989 static bool is_support_iommu = true;
19990 static struct drm_driver rockchip_drm_driver;
19991 
19992+void drm_mode_convert_to_split_mode(struct drm_display_mode *mode)
19993+{
19994+	u16 hactive, hfp, hsync, hbp;
19995+
19996+	hactive = mode->hdisplay;
19997+	hfp = mode->hsync_start - mode->hdisplay;
19998+	hsync = mode->hsync_end - mode->hsync_start;
19999+	hbp = mode->htotal - mode->hsync_end;
20000+
20001+	mode->clock *= 2;
20002+	mode->hdisplay = hactive * 2;
20003+	mode->hsync_start = mode->hdisplay + hfp * 2;
20004+	mode->hsync_end = mode->hsync_start + hsync * 2;
20005+	mode->htotal = mode->hsync_end + hbp * 2;
20006+	drm_mode_set_name(mode);
20007+}
20008+EXPORT_SYMBOL(drm_mode_convert_to_split_mode);
20009+
20010+void drm_mode_convert_to_origin_mode(struct drm_display_mode *mode)
20011+{
20012+	u16 hactive, hfp, hsync, hbp;
20013+
20014+	hactive = mode->hdisplay;
20015+	hfp = mode->hsync_start - mode->hdisplay;
20016+	hsync = mode->hsync_end - mode->hsync_start;
20017+	hbp = mode->htotal - mode->hsync_end;
20018+
20019+	mode->clock /= 2;
20020+	mode->hdisplay = hactive / 2;
20021+	mode->hsync_start = mode->hdisplay + hfp / 2;
20022+	mode->hsync_end = mode->hsync_start + hsync / 2;
20023+	mode->htotal = mode->hsync_end + hbp / 2;
20024+}
20025+EXPORT_SYMBOL(drm_mode_convert_to_origin_mode);
20026+
20027+/**
20028+ * drm_connector_oob_hotplug_event - Report out-of-band hotplug event to connector
20029+ * @connector: connector to report the event on
20030+ *
20031+ * On some hardware a hotplug event notification may come from outside the display
20032+ * driver / device. An example of this is some USB Type-C setups where the hardware
20033+ * muxes the DisplayPort data and aux-lines but does not pass the altmode HPD
20034+ * status bit to the GPU's DP HPD pin.
20035+ *
20036+ * This function can be used to report these out-of-band events after obtaining
20037+ * a drm_connector reference through calling drm_connector_find_by_fwnode().
20038+ */
20039+void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode)
20040+{
20041+	struct rockchip_drm_sub_dev *sub_dev;
20042+
20043+	if (!connector_fwnode || !connector_fwnode->dev)
20044+		return;
20045+
20046+	sub_dev = rockchip_drm_get_sub_dev(dev_of_node(connector_fwnode->dev));
20047+
20048+	if (sub_dev && sub_dev->connector && sub_dev->oob_hotplug_event)
20049+		sub_dev->oob_hotplug_event(sub_dev->connector);
20050+}
20051+EXPORT_SYMBOL(drm_connector_oob_hotplug_event);
20052+
20053+uint32_t rockchip_drm_get_bpp(const struct drm_format_info *info)
20054+{
20055+	/* use whatever a driver has set */
20056+	if (info->cpp[0])
20057+		return info->cpp[0] * 8;
20058+
20059+	switch (info->format) {
20060+	case DRM_FORMAT_YUV420_8BIT:
20061+		return 12;
20062+	case DRM_FORMAT_YUV420_10BIT:
20063+		return 15;
20064+	case DRM_FORMAT_VUY101010:
20065+		return 30;
20066+	default:
20067+		break;
20068+	}
20069+
20070+	/* all attempts failed */
20071+	return 0;
20072+}
20073+EXPORT_SYMBOL(rockchip_drm_get_bpp);
20074+
20075+/**
20076+ * rockchip_drm_of_find_possible_crtcs - find the possible CRTCs for an active
20077+ * encoder port
20078+ * @dev: DRM device
20079+ * @port: encoder port to scan for endpoints
20080+ *
20081+ * Scan all active endpoints attached to a port, locate their attached CRTCs,
20082+ * and generate the DRM mask of CRTCs which may be attached to this
20083+ * encoder.
20084+ *
20085+ * See Documentation/devicetree/bindings/graph.txt for the bindings.
20086+ */
20087+uint32_t rockchip_drm_of_find_possible_crtcs(struct drm_device *dev,
20088+					     struct device_node *port)
20089+{
20090+	struct device_node *remote_port, *ep;
20091+	uint32_t possible_crtcs = 0;
20092+
20093+	for_each_endpoint_of_node(port, ep) {
20094+		if (!of_device_is_available(ep))
20095+			continue;
20096+
20097+		remote_port = of_graph_get_remote_port(ep);
20098+		if (!remote_port) {
20099+			of_node_put(ep);
20100+			return 0;
20101+		}
20102+
20103+		possible_crtcs |= drm_of_crtc_port_mask(dev, remote_port);
20104+
20105+		of_node_put(remote_port);
20106+	}
20107+
20108+	return possible_crtcs;
20109+}
20110+EXPORT_SYMBOL(rockchip_drm_of_find_possible_crtcs);
20111+
20112+static DEFINE_MUTEX(rockchip_drm_sub_dev_lock);
20113+static LIST_HEAD(rockchip_drm_sub_dev_list);
20114+
20115+void rockchip_drm_register_sub_dev(struct rockchip_drm_sub_dev *sub_dev)
20116+{
20117+	mutex_lock(&rockchip_drm_sub_dev_lock);
20118+	list_add_tail(&sub_dev->list, &rockchip_drm_sub_dev_list);
20119+	mutex_unlock(&rockchip_drm_sub_dev_lock);
20120+}
20121+EXPORT_SYMBOL(rockchip_drm_register_sub_dev);
20122+
20123+void rockchip_drm_unregister_sub_dev(struct rockchip_drm_sub_dev *sub_dev)
20124+{
20125+	mutex_lock(&rockchip_drm_sub_dev_lock);
20126+	list_del(&sub_dev->list);
20127+	mutex_unlock(&rockchip_drm_sub_dev_lock);
20128+}
20129+EXPORT_SYMBOL(rockchip_drm_unregister_sub_dev);
20130+
20131+struct rockchip_drm_sub_dev *rockchip_drm_get_sub_dev(struct device_node *node)
20132+{
20133+	struct rockchip_drm_sub_dev *sub_dev = NULL;
20134+	bool found = false;
20135+
20136+	mutex_lock(&rockchip_drm_sub_dev_lock);
20137+	list_for_each_entry(sub_dev, &rockchip_drm_sub_dev_list, list) {
20138+		if (sub_dev->of_node == node) {
20139+			found = true;
20140+			break;
20141+		}
20142+	}
20143+	mutex_unlock(&rockchip_drm_sub_dev_lock);
20144+
20145+	return found ? sub_dev : NULL;
20146+}
20147+EXPORT_SYMBOL(rockchip_drm_get_sub_dev);
20148+
20149+int rockchip_drm_get_sub_dev_type(void)
20150+{
20151+	int connector_type = DRM_MODE_CONNECTOR_Unknown;
20152+	struct rockchip_drm_sub_dev *sub_dev = NULL;
20153+
20154+	mutex_lock(&rockchip_drm_sub_dev_lock);
20155+	list_for_each_entry(sub_dev, &rockchip_drm_sub_dev_list, list) {
20156+		if (sub_dev->connector->encoder) {
20157+			connector_type = sub_dev->connector->connector_type;
20158+			break;
20159+		}
20160+	}
20161+	mutex_unlock(&rockchip_drm_sub_dev_lock);
20162+
20163+	return connector_type;
20164+}
20165+EXPORT_SYMBOL(rockchip_drm_get_sub_dev_type);
20166+
20167+void rockchip_drm_te_handle(struct drm_crtc *crtc)
20168+{
20169+	struct rockchip_drm_private *priv = crtc->dev->dev_private;
20170+	int pipe = drm_crtc_index(crtc);
20171+
20172+	if (priv->crtc_funcs[pipe] && priv->crtc_funcs[pipe]->te_handler)
20173+		priv->crtc_funcs[pipe]->te_handler(crtc);
20174+}
20175+EXPORT_SYMBOL(rockchip_drm_te_handle);
20176+
20177+static const struct drm_display_mode rockchip_drm_default_modes[] = {
20178+	/* 4 - 1280x720@60Hz 16:9 */
20179+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
20180+		   1430, 1650, 0, 720, 725, 730, 750, 0,
20181+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
20182+	  .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
20183+	/* 16 - 1920x1080@60Hz 16:9 */
20184+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
20185+		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
20186+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
20187+	  .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
20188+	/* 31 - 1920x1080@50Hz 16:9 */
20189+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
20190+		   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
20191+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
20192+	  .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
20193+	/* 19 - 1280x720@50Hz 16:9 */
20194+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
20195+		   1760, 1980, 0, 720, 725, 730, 750, 0,
20196+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
20197+	  .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
20198+	/* 0x10 - 1024x768@60Hz */
20199+	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
20200+		   1184, 1344, 0,  768, 771, 777, 806, 0,
20201+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
20202+	/* 17 - 720x576@50Hz 4:3 */
20203+	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
20204+		   796, 864, 0, 576, 581, 586, 625, 0,
20205+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
20206+	  .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
20207+	/* 2 - 720x480@60Hz 4:3 */
20208+	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
20209+		   798, 858, 0, 480, 489, 495, 525, 0,
20210+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
20211+	  .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
20212+};
20213+
20214+int rockchip_drm_add_modes_noedid(struct drm_connector *connector)
20215+{
20216+	struct drm_device *dev = connector->dev;
20217+	struct drm_display_mode *mode;
20218+	int i, count, num_modes = 0;
20219+
20220+	mutex_lock(&rockchip_drm_sub_dev_lock);
20221+	count = ARRAY_SIZE(rockchip_drm_default_modes);
20222+
20223+	for (i = 0; i < count; i++) {
20224+		const struct drm_display_mode *ptr = &rockchip_drm_default_modes[i];
20225+
20226+		mode = drm_mode_duplicate(dev, ptr);
20227+		if (mode) {
20228+			if (!i)
20229+				mode->type = DRM_MODE_TYPE_PREFERRED;
20230+			drm_mode_probed_add(connector, mode);
20231+			num_modes++;
20232+		}
20233+	}
20234+	mutex_unlock(&rockchip_drm_sub_dev_lock);
20235+
20236+	return num_modes;
20237+}
20238+EXPORT_SYMBOL(rockchip_drm_add_modes_noedid);
20239+
20240+static int
20241+cea_db_tag(const u8 *db)
20242+{
20243+	return db[0] >> 5;
20244+}
20245+
20246+static int
20247+cea_db_payload_len(const u8 *db)
20248+{
20249+	return db[0] & 0x1f;
20250+}
20251+
20252+#define for_each_cea_db(cea, i, start, end) \
20253+	for ((i) = (start); \
20254+	     (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); \
20255+	     (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
20256+
20257+#define HDMI_NEXT_HDR_VSDB_OUI 0xd04601
20258+
20259+static bool cea_db_is_hdmi_next_hdr_block(const u8 *db)
20260+{
20261+	unsigned int oui;
20262+
20263+	if (cea_db_tag(db) != 0x07)
20264+		return false;
20265+
20266+	if (cea_db_payload_len(db) < 11)
20267+		return false;
20268+
20269+	oui = db[3] << 16 | db[2] << 8 | db[1];
20270+
20271+	return oui == HDMI_NEXT_HDR_VSDB_OUI;
20272+}
20273+
20274+static bool cea_db_is_hdmi_forum_vsdb(const u8 *db)
20275+{
20276+	unsigned int oui;
20277+
20278+	if (cea_db_tag(db) != 0x03)
20279+		return false;
20280+
20281+	if (cea_db_payload_len(db) < 7)
20282+		return false;
20283+
20284+	oui = db[3] << 16 | db[2] << 8 | db[1];
20285+
20286+	return oui == HDMI_FORUM_IEEE_OUI;
20287+}
20288+
20289+static int
20290+cea_db_offsets(const u8 *cea, int *start, int *end)
20291+{
20292+	/* DisplayID CTA extension blocks and top-level CEA EDID
20293+	 * block header definitions differ in the following bytes:
20294+	 *   1) Byte 2 of the header specifies length differently,
20295+	 *   2) Byte 3 is only present in the CEA top level block.
20296+	 *
20297+	 * The different definitions for byte 2 follow.
20298+	 *
20299+	 * DisplayID CTA extension block defines byte 2 as:
20300+	 *   Number of payload bytes
20301+	 *
20302+	 * CEA EDID block defines byte 2 as:
20303+	 *   Byte number (decimal) within this block where the 18-byte
20304+	 *   DTDs begin. If no non-DTD data is present in this extension
20305+	 *   block, the value should be set to 04h (the byte after next).
20306+	 *   If set to 00h, there are no DTDs present in this block and
20307+	 *   no non-DTD data.
20308+	 */
20309+	if (cea[0] == 0x81) {
20310+		/*
20311+		 * for_each_displayid_db() has already verified
20312+		 * that these stay within expected bounds.
20313+		 */
20314+		*start = 3;
20315+		*end = *start + cea[2];
20316+	} else if (cea[0] == 0x02) {
20317+		/* Data block offset in CEA extension block */
20318+		*start = 4;
20319+		*end = cea[2];
20320+		if (*end == 0)
20321+			*end = 127;
20322+		if (*end < 4 || *end > 127)
20323+			return -ERANGE;
20324+	} else {
20325+		return -EOPNOTSUPP;
20326+	}
20327+
20328+	return 0;
20329+}
20330+
20331+static u8 *find_edid_extension(const struct edid *edid,
20332+			       int ext_id, int *ext_index)
20333+{
20334+	u8 *edid_ext = NULL;
20335+	int i;
20336+
20337+	/* No EDID or EDID extensions */
20338+	if (edid == NULL || edid->extensions == 0)
20339+		return NULL;
20340+
20341+	/* Find CEA extension */
20342+	for (i = *ext_index; i < edid->extensions; i++) {
20343+		edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
20344+		if (edid_ext[0] == ext_id)
20345+			break;
20346+	}
20347+
20348+	if (i >= edid->extensions)
20349+		return NULL;
20350+
20351+	*ext_index = i + 1;
20352+
20353+	return edid_ext;
20354+}
20355+
20356+static int validate_displayid(u8 *displayid, int length, int idx)
20357+{
20358+	int i, dispid_length;
20359+	u8 csum = 0;
20360+	struct displayid_hdr *base;
20361+
20362+	base = (struct displayid_hdr *)&displayid[idx];
20363+
20364+	DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
20365+		      base->rev, base->bytes, base->prod_id, base->ext_count);
20366+
20367+	/* +1 for DispID checksum */
20368+	dispid_length = sizeof(*base) + base->bytes + 1;
20369+	if (dispid_length > length - idx)
20370+		return -EINVAL;
20371+
20372+	for (i = 0; i < dispid_length; i++)
20373+		csum += displayid[idx + i];
20374+	if (csum) {
20375+		DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum);
20376+		return -EINVAL;
20377+	}
20378+
20379+	return 0;
20380+}
20381+
20382+static u8 *find_displayid_extension(const struct edid *edid,
20383+				    int *length, int *idx,
20384+				    int *ext_index)
20385+{
20386+	u8 *displayid = find_edid_extension(edid, 0x70, ext_index);
20387+	struct displayid_hdr *base;
20388+	int ret;
20389+
20390+	if (!displayid)
20391+		return NULL;
20392+
20393+	/* EDID extensions block checksum isn't for us */
20394+	*length = EDID_LENGTH - 1;
20395+	*idx = 1;
20396+
20397+	ret = validate_displayid(displayid, *length, *idx);
20398+	if (ret)
20399+		return NULL;
20400+
20401+	base = (struct displayid_hdr *)&displayid[*idx];
20402+	*length = *idx + sizeof(*base) + base->bytes;
20403+
20404+	return displayid;
20405+}
20406+
20407+static u8 *find_cea_extension(const struct edid *edid)
20408+{
20409+	int length, idx;
20410+	struct displayid_block *block;
20411+	u8 *cea;
20412+	u8 *displayid;
20413+	int ext_index;
20414+
20415+	/* Look for a top level CEA extension block */
20416+	/* FIXME: make callers iterate through multiple CEA ext blocks? */
20417+	ext_index = 0;
20418+	cea = find_edid_extension(edid, 0x02, &ext_index);
20419+	if (cea)
20420+		return cea;
20421+
20422+	/* CEA blocks can also be found embedded in a DisplayID block */
20423+	ext_index = 0;
20424+	for (;;) {
20425+		displayid = find_displayid_extension(edid, &length, &idx,
20426+						     &ext_index);
20427+		if (!displayid)
20428+			return NULL;
20429+
20430+		idx += sizeof(struct displayid_hdr);
20431+		for_each_displayid_db(displayid, block, idx, length) {
20432+			if (block->tag == 0x81)
20433+				return (u8 *)block;
20434+		}
20435+	}
20436+
20437+	return NULL;
20438+}
20439+
20440+#define EDID_CEA_YCRCB422	(1 << 4)
20441+
20442+int rockchip_drm_get_yuv422_format(struct drm_connector *connector,
20443+				   struct edid *edid)
20444+{
20445+	struct drm_display_info *info;
20446+	const u8 *edid_ext;
20447+
20448+	if (!connector || !edid)
20449+		return -EINVAL;
20450+
20451+	info = &connector->display_info;
20452+
20453+	edid_ext = find_cea_extension(edid);
20454+	if (!edid_ext)
20455+		return -EINVAL;
20456+
20457+	if (edid_ext[3] & EDID_CEA_YCRCB422)
20458+		info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
20459+
20460+	return 0;
20461+}
20462+EXPORT_SYMBOL(rockchip_drm_get_yuv422_format);
20463+
20464+static
20465+void get_max_frl_rate(int max_frl_rate, u8 *max_lanes, u8 *max_rate_per_lane)
20466+{
20467+	switch (max_frl_rate) {
20468+	case 1:
20469+		*max_lanes = 3;
20470+		*max_rate_per_lane = 3;
20471+		break;
20472+	case 2:
20473+		*max_lanes = 3;
20474+		*max_rate_per_lane = 6;
20475+		break;
20476+	case 3:
20477+		*max_lanes = 4;
20478+		*max_rate_per_lane = 6;
20479+		break;
20480+	case 4:
20481+		*max_lanes = 4;
20482+		*max_rate_per_lane = 8;
20483+		break;
20484+	case 5:
20485+		*max_lanes = 4;
20486+		*max_rate_per_lane = 10;
20487+		break;
20488+	case 6:
20489+		*max_lanes = 4;
20490+		*max_rate_per_lane = 12;
20491+		break;
20492+	case 0:
20493+	default:
20494+		*max_lanes = 0;
20495+		*max_rate_per_lane = 0;
20496+	}
20497+}
20498+
20499+#define EDID_DSC_10BPC			(1 << 0)
20500+#define EDID_DSC_12BPC			(1 << 1)
20501+#define EDID_DSC_16BPC			(1 << 2)
20502+#define EDID_DSC_ALL_BPP		(1 << 3)
20503+#define EDID_DSC_NATIVE_420		(1 << 6)
20504+#define EDID_DSC_1P2			(1 << 7)
20505+#define EDID_DSC_MAX_FRL_RATE_MASK	0xf0
20506+#define EDID_DSC_MAX_SLICES		0xf
20507+#define EDID_DSC_TOTAL_CHUNK_KBYTES	0x3f
20508+#define EDID_MAX_FRL_RATE_MASK		0xf0
20509+
20510+static
20511+void parse_edid_forum_vsdb(struct rockchip_drm_dsc_cap *dsc_cap,
20512+			   u8 *max_frl_rate_per_lane, u8 *max_lanes,
20513+			   const u8 *hf_vsdb)
20514+{
20515+	u8 max_frl_rate;
20516+	u8 dsc_max_frl_rate;
20517+	u8 dsc_max_slices;
20518+
20519+	if (!hf_vsdb[7])
20520+		return;
20521+
20522+	DRM_DEBUG_KMS("hdmi_21 sink detected. parsing edid\n");
20523+	max_frl_rate = (hf_vsdb[7] & EDID_MAX_FRL_RATE_MASK) >> 4;
20524+	get_max_frl_rate(max_frl_rate, max_lanes,
20525+			 max_frl_rate_per_lane);
20526+
20527+	if (cea_db_payload_len(hf_vsdb) < 13)
20528+		return;
20529+
20530+	dsc_cap->v_1p2 = hf_vsdb[11] & EDID_DSC_1P2;
20531+
20532+	if (!dsc_cap->v_1p2)
20533+		return;
20534+
20535+	dsc_cap->native_420 = hf_vsdb[11] & EDID_DSC_NATIVE_420;
20536+	dsc_cap->all_bpp = hf_vsdb[11] & EDID_DSC_ALL_BPP;
20537+
20538+	if (hf_vsdb[11] & EDID_DSC_16BPC)
20539+		dsc_cap->bpc_supported = 16;
20540+	else if (hf_vsdb[11] & EDID_DSC_12BPC)
20541+		dsc_cap->bpc_supported = 12;
20542+	else if (hf_vsdb[11] & EDID_DSC_10BPC)
20543+		dsc_cap->bpc_supported = 10;
20544+	else
20545+		dsc_cap->bpc_supported = 0;
20546+
20547+	dsc_max_frl_rate = (hf_vsdb[12] & EDID_DSC_MAX_FRL_RATE_MASK) >> 4;
20548+	get_max_frl_rate(dsc_max_frl_rate, &dsc_cap->max_lanes,
20549+			 &dsc_cap->max_frl_rate_per_lane);
20550+	dsc_cap->total_chunk_kbytes = hf_vsdb[13] & EDID_DSC_TOTAL_CHUNK_KBYTES;
20551+
20552+	dsc_max_slices = hf_vsdb[12] & EDID_DSC_MAX_SLICES;
20553+	switch (dsc_max_slices) {
20554+	case 1:
20555+		dsc_cap->max_slices = 1;
20556+		dsc_cap->clk_per_slice = 340;
20557+		break;
20558+	case 2:
20559+		dsc_cap->max_slices = 2;
20560+		dsc_cap->clk_per_slice = 340;
20561+		break;
20562+	case 3:
20563+		dsc_cap->max_slices = 4;
20564+		dsc_cap->clk_per_slice = 340;
20565+		break;
20566+	case 4:
20567+		dsc_cap->max_slices = 8;
20568+		dsc_cap->clk_per_slice = 340;
20569+		break;
20570+	case 5:
20571+		dsc_cap->max_slices = 8;
20572+		dsc_cap->clk_per_slice = 400;
20573+		break;
20574+	case 6:
20575+		dsc_cap->max_slices = 12;
20576+		dsc_cap->clk_per_slice = 400;
20577+		break;
20578+	case 7:
20579+		dsc_cap->max_slices = 16;
20580+		dsc_cap->clk_per_slice = 400;
20581+		break;
20582+	case 0:
20583+	default:
20584+		dsc_cap->max_slices = 0;
20585+		dsc_cap->clk_per_slice = 0;
20586+	}
20587+}
20588+
20589+enum {
20590+	VER_26_BYTE_V0,
20591+	VER_15_BYTE_V1,
20592+	VER_12_BYTE_V1,
20593+	VER_12_BYTE_V2,
20594+};
20595+
20596+static int check_next_hdr_version(const u8 *next_hdr_db)
20597+{
20598+	u16 ver;
20599+
20600+	ver = (next_hdr_db[5] & 0xf0) << 8 | next_hdr_db[0];
20601+
20602+	switch (ver) {
20603+	case 0x00f9:
20604+		return VER_26_BYTE_V0;
20605+	case 0x20ee:
20606+		return VER_15_BYTE_V1;
20607+	case 0x20eb:
20608+		return VER_12_BYTE_V1;
20609+	case 0x40eb:
20610+		return VER_12_BYTE_V2;
20611+	default:
20612+		return -ENOENT;
20613+	}
20614+}
20615+
20616+static void parse_ver_26_v0_data(struct ver_26_v0 *hdr, const u8 *data)
20617+{
20618+	hdr->yuv422_12bit = data[5] & BIT(0);
20619+	hdr->support_2160p_60 = (data[5] & BIT(1)) >> 1;
20620+	hdr->global_dimming = (data[5] & BIT(2)) >> 2;
20621+
20622+	hdr->dm_major_ver = (data[21] & 0xf0) >> 4;
20623+	hdr->dm_minor_ver = data[21] & 0xf;
20624+
20625+	hdr->t_min_pq = (data[19] << 4) | ((data[18] & 0xf0) >> 4);
20626+	hdr->t_max_pq = (data[20] << 4) | (data[18] & 0xf);
20627+
20628+	hdr->rx = (data[7] << 4) | ((data[6] & 0xf0) >> 4);
20629+	hdr->ry = (data[8] << 4) | (data[6] & 0xf);
20630+	hdr->gx = (data[10] << 4) | ((data[9] & 0xf0) >> 4);
20631+	hdr->gy = (data[11] << 4) | (data[9] & 0xf);
20632+	hdr->bx = (data[13] << 4) | ((data[12] & 0xf0) >> 4);
20633+	hdr->by = (data[14] << 4) | (data[12] & 0xf);
20634+	hdr->wx = (data[16] << 4) | ((data[15] & 0xf0) >> 4);
20635+	hdr->wy = (data[17] << 4) | (data[15] & 0xf);
20636+}
20637+
20638+static void parse_ver_15_v1_data(struct ver_15_v1 *hdr, const u8 *data)
20639+{
20640+	hdr->yuv422_12bit = data[5] & BIT(0);
20641+	hdr->support_2160p_60 = (data[5] & BIT(1)) >> 1;
20642+	hdr->global_dimming = data[6] & BIT(0);
20643+
20644+	hdr->dm_version = (data[5] & 0x1c) >> 2;
20645+
20646+	hdr->colorimetry = data[7] & BIT(0);
20647+
20648+	hdr->t_max_lum = (data[6] & 0xfe) >> 1;
20649+	hdr->t_min_lum = (data[7] & 0xfe) >> 1;
20650+
20651+	hdr->rx = data[9];
20652+	hdr->ry = data[10];
20653+	hdr->gx = data[11];
20654+	hdr->gy = data[12];
20655+	hdr->bx = data[13];
20656+	hdr->by = data[14];
20657+}
20658+
20659+static void parse_ver_12_v1_data(struct ver_12_v1 *hdr, const u8 *data)
20660+{
20661+	hdr->yuv422_12bit = data[5] & BIT(0);
20662+	hdr->support_2160p_60 = (data[5] & BIT(1)) >> 1;
20663+	hdr->global_dimming = data[6] & BIT(0);
20664+
20665+	hdr->dm_version = (data[5] & 0x1c) >> 2;
20666+
20667+	hdr->colorimetry = data[7] & BIT(0);
20668+
20669+	hdr->t_max_lum = (data[6] & 0xfe) >> 1;
20670+	hdr->t_min_lum = (data[7] & 0xfe) >> 1;
20671+
20672+	hdr->low_latency = data[8] & 0x3;
20673+
20674+	hdr->unique_rx = (data[11] & 0xf8) >> 3;
20675+	hdr->unique_ry = (data[11] & 0x7) << 2 | (data[10] & BIT(0)) << 1 |
20676+		(data[9] & BIT(0));
20677+	hdr->unique_gx = (data[9] & 0xfe) >> 1;
20678+	hdr->unique_gy = (data[10] & 0xfe) >> 1;
20679+	hdr->unique_bx = (data[8] & 0xe0) >> 5;
20680+	hdr->unique_by = (data[8] & 0x1c) >> 2;
20681+}
20682+
20683+static void parse_ver_12_v2_data(struct ver_12_v2 *hdr, const u8 *data)
20684+{
20685+	hdr->yuv422_12bit = data[5] & BIT(0);
20686+	hdr->backlt_ctrl = (data[5] & BIT(1)) >> 1;
20687+	hdr->global_dimming = (data[6] & BIT(2)) >> 2;
20688+
20689+	hdr->dm_version = (data[5] & 0x1c) >> 2;
20690+	hdr->backlt_min_luma = data[6] & 0x3;
20691+	hdr->interface = data[7] & 0x3;
20692+	hdr->yuv444_10b_12b = (data[8] & BIT(0)) << 1 | (data[9] & BIT(0));
20693+
20694+	hdr->t_min_pq_v2 = (data[6] & 0xf8) >> 3;
20695+	hdr->t_max_pq_v2 = (data[7] & 0xf8) >> 3;
20696+
20697+	hdr->unique_rx = (data[10] & 0xf8) >> 3;
20698+	hdr->unique_ry = (data[11] & 0xf8) >> 3;
20699+	hdr->unique_gx = (data[8] & 0xfe) >> 1;
20700+	hdr->unique_gy = (data[9] & 0xfe) >> 1;
20701+	hdr->unique_bx = data[10] & 0x7;
20702+	hdr->unique_by = data[11] & 0x7;
20703+}
20704+
20705+static
20706+void parse_next_hdr_block(struct next_hdr_sink_data *sink_data,
20707+			  const u8 *next_hdr_db)
20708+{
20709+	int version;
20710+
20711+	version = check_next_hdr_version(next_hdr_db);
20712+	if (version < 0)
20713+		return;
20714+
20715+	sink_data->version = version;
20716+
20717+	switch (version) {
20718+	case VER_26_BYTE_V0:
20719+		parse_ver_26_v0_data(&sink_data->ver_26_v0, next_hdr_db);
20720+		break;
20721+	case VER_15_BYTE_V1:
20722+		parse_ver_15_v1_data(&sink_data->ver_15_v1, next_hdr_db);
20723+		break;
20724+	case VER_12_BYTE_V1:
20725+		parse_ver_12_v1_data(&sink_data->ver_12_v1, next_hdr_db);
20726+		break;
20727+	case VER_12_BYTE_V2:
20728+		parse_ver_12_v2_data(&sink_data->ver_12_v2, next_hdr_db);
20729+		break;
20730+	default:
20731+		break;
20732+	}
20733+}
20734+
20735+int rockchip_drm_parse_cea_ext(struct rockchip_drm_dsc_cap *dsc_cap,
20736+			       u8 *max_frl_rate_per_lane, u8 *max_lanes,
20737+			       const struct edid *edid)
20738+{
20739+	const u8 *edid_ext;
20740+	int i, start, end;
20741+
20742+	if (!dsc_cap || !max_frl_rate_per_lane || !max_lanes || !edid)
20743+		return -EINVAL;
20744+
20745+	edid_ext = find_cea_extension(edid);
20746+	if (!edid_ext)
20747+		return -EINVAL;
20748+
20749+	if (cea_db_offsets(edid_ext, &start, &end))
20750+		return -EINVAL;
20751+
20752+	for_each_cea_db(edid_ext, i, start, end) {
20753+		const u8 *db = &edid_ext[i];
20754+
20755+		if (cea_db_is_hdmi_forum_vsdb(db))
20756+			parse_edid_forum_vsdb(dsc_cap, max_frl_rate_per_lane,
20757+					      max_lanes, db);
20758+	}
20759+
20760+	return 0;
20761+}
20762+EXPORT_SYMBOL(rockchip_drm_parse_cea_ext);
20763+
20764+int rockchip_drm_parse_next_hdr(struct next_hdr_sink_data *sink_data,
20765+				const struct edid *edid)
20766+{
20767+	const u8 *edid_ext;
20768+	int i, start, end;
20769+
20770+	if (!sink_data || !edid)
20771+		return -EINVAL;
20772+
20773+	memset(sink_data, 0, sizeof(struct next_hdr_sink_data));
20774+
20775+	edid_ext = find_cea_extension(edid);
20776+	if (!edid_ext)
20777+		return -EINVAL;
20778+
20779+	if (cea_db_offsets(edid_ext, &start, &end))
20780+		return -EINVAL;
20781+
20782+	for_each_cea_db(edid_ext, i, start, end) {
20783+		const u8 *db = &edid_ext[i];
20784+
20785+		if (cea_db_is_hdmi_next_hdr_block(db))
20786+			parse_next_hdr_block(sink_data, db);
20787+	}
20788+
20789+	return 0;
20790+}
20791+EXPORT_SYMBOL(rockchip_drm_parse_next_hdr);
20792+
20793 /*
20794  * Attach a (component) device to the shared drm dma mapping from master drm
20795  * device.  This is used by the VOPs to map GEM buffers to a common DMA
20796@@ -72,6 +883,55 @@ void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
20797 	iommu_detach_device(domain, dev);
20798 }
20799 
20800+int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
20801+				 const struct rockchip_crtc_funcs *crtc_funcs)
20802+{
20803+	int pipe = drm_crtc_index(crtc);
20804+	struct rockchip_drm_private *priv = crtc->dev->dev_private;
20805+
20806+	if (pipe >= ROCKCHIP_MAX_CRTC)
20807+		return -EINVAL;
20808+
20809+	priv->crtc_funcs[pipe] = crtc_funcs;
20810+
20811+	return 0;
20812+}
20813+
20814+void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc)
20815+{
20816+	int pipe = drm_crtc_index(crtc);
20817+	struct rockchip_drm_private *priv = crtc->dev->dev_private;
20818+
20819+	if (pipe >= ROCKCHIP_MAX_CRTC)
20820+		return;
20821+
20822+	priv->crtc_funcs[pipe] = NULL;
20823+}
20824+
20825+static int rockchip_drm_fault_handler(struct iommu_domain *iommu,
20826+				      struct device *dev,
20827+				      unsigned long iova, int flags, void *arg)
20828+{
20829+	struct drm_device *drm_dev = arg;
20830+	struct rockchip_drm_private *priv = drm_dev->dev_private;
20831+	struct drm_crtc *crtc;
20832+
20833+	DRM_ERROR("iommu fault handler flags: 0x%x\n", flags);
20834+	drm_for_each_crtc(crtc, drm_dev) {
20835+		int pipe = drm_crtc_index(crtc);
20836+
20837+		if (priv->crtc_funcs[pipe] &&
20838+		    priv->crtc_funcs[pipe]->regs_dump)
20839+			priv->crtc_funcs[pipe]->regs_dump(crtc, NULL);
20840+
20841+		if (priv->crtc_funcs[pipe] &&
20842+		    priv->crtc_funcs[pipe]->debugfs_dump)
20843+			priv->crtc_funcs[pipe]->debugfs_dump(crtc, NULL);
20844+	}
20845+
20846+	return 0;
20847+}
20848+
20849 static int rockchip_drm_init_iommu(struct drm_device *drm_dev)
20850 {
20851 	struct rockchip_drm_private *private = drm_dev->dev_private;
20852@@ -94,6 +954,9 @@ static int rockchip_drm_init_iommu(struct drm_device *drm_dev)
20853 	drm_mm_init(&private->mm, start, end - start + 1);
20854 	mutex_init(&private->mm_lock);
20855 
20856+	iommu_set_fault_handler(private->domain, rockchip_drm_fault_handler,
20857+				drm_dev);
20858+
20859 	return 0;
20860 }
20861 
20862@@ -108,6 +971,229 @@ static void rockchip_iommu_cleanup(struct drm_device *drm_dev)
20863 	iommu_domain_free(private->domain);
20864 }
20865 
20866+#ifdef CONFIG_DEBUG_FS
20867+static int rockchip_drm_mm_dump(struct seq_file *s, void *data)
20868+{
20869+	struct drm_info_node *node = s->private;
20870+	struct drm_minor *minor = node->minor;
20871+	struct drm_device *drm_dev = minor->dev;
20872+	struct rockchip_drm_private *priv = drm_dev->dev_private;
20873+	struct drm_printer p = drm_seq_file_printer(s);
20874+
20875+	if (!priv->domain)
20876+		return 0;
20877+	mutex_lock(&priv->mm_lock);
20878+	drm_mm_print(&priv->mm, &p);
20879+	mutex_unlock(&priv->mm_lock);
20880+
20881+	return 0;
20882+}
20883+
20884+static int rockchip_drm_summary_show(struct seq_file *s, void *data)
20885+{
20886+	struct drm_info_node *node = s->private;
20887+	struct drm_minor *minor = node->minor;
20888+	struct drm_device *drm_dev = minor->dev;
20889+	struct rockchip_drm_private *priv = drm_dev->dev_private;
20890+	struct drm_crtc *crtc;
20891+
20892+	drm_for_each_crtc(crtc, drm_dev) {
20893+		int pipe = drm_crtc_index(crtc);
20894+
20895+		if (priv->crtc_funcs[pipe] &&
20896+		    priv->crtc_funcs[pipe]->debugfs_dump)
20897+			priv->crtc_funcs[pipe]->debugfs_dump(crtc, s);
20898+	}
20899+
20900+	return 0;
20901+}
20902+
20903+static struct drm_info_list rockchip_debugfs_files[] = {
20904+	{ "summary", rockchip_drm_summary_show, 0, NULL },
20905+	{ "mm_dump", rockchip_drm_mm_dump, 0, NULL },
20906+};
20907+
20908+static void rockchip_drm_debugfs_init(struct drm_minor *minor)
20909+{
20910+	struct drm_device *dev = minor->dev;
20911+	struct rockchip_drm_private *priv = dev->dev_private;
20912+	struct drm_crtc *crtc;
20913+
20914+	drm_debugfs_create_files(rockchip_debugfs_files,
20915+				 ARRAY_SIZE(rockchip_debugfs_files),
20916+				 minor->debugfs_root, minor);
20917+
20918+	drm_for_each_crtc(crtc, dev) {
20919+		int pipe = drm_crtc_index(crtc);
20920+
20921+		if (priv->crtc_funcs[pipe] &&
20922+		    priv->crtc_funcs[pipe]->debugfs_init)
20923+			priv->crtc_funcs[pipe]->debugfs_init(minor, crtc);
20924+	}
20925+}
20926+#endif
20927+
20928+static int rockchip_drm_create_properties(struct drm_device *dev)
20929+{
20930+	struct drm_property *prop;
20931+	struct rockchip_drm_private *private = dev->dev_private;
20932+
20933+	prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
20934+					 "EOTF", 0, 5);
20935+	if (!prop)
20936+		return -ENOMEM;
20937+	private->eotf_prop = prop;
20938+
20939+	prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
20940+					 "COLOR_SPACE", 0, 12);
20941+	if (!prop)
20942+		return -ENOMEM;
20943+	private->color_space_prop = prop;
20944+
20945+	prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
20946+					 "ASYNC_COMMIT", 0, 1);
20947+	if (!prop)
20948+		return -ENOMEM;
20949+	private->async_commit_prop = prop;
20950+
20951+	prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
20952+					 "SHARE_ID", 0, UINT_MAX);
20953+	if (!prop)
20954+		return -ENOMEM;
20955+	private->share_id_prop = prop;
20956+
20957+	prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_IMMUTABLE,
20958+					 "CONNECTOR_ID", 0, 0xf);
20959+	if (!prop)
20960+		return -ENOMEM;
20961+	private->connector_id_prop = prop;
20962+
20963+	prop = drm_property_create_object(dev,
20964+					  DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_IMMUTABLE,
20965+					  "SOC_ID", DRM_MODE_OBJECT_CRTC);
20966+	private->soc_id_prop = prop;
20967+
20968+	prop = drm_property_create_object(dev,
20969+					  DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_IMMUTABLE,
20970+					  "PORT_ID", DRM_MODE_OBJECT_CRTC);
20971+	private->port_id_prop = prop;
20972+
20973+	private->aclk_prop = drm_property_create_range(dev, 0, "ACLK", 0, UINT_MAX);
20974+	private->bg_prop = drm_property_create_range(dev, 0, "BACKGROUND", 0, UINT_MAX);
20975+	private->line_flag_prop = drm_property_create_range(dev, 0, "LINE_FLAG1", 0, UINT_MAX);
20976+
20977+	return drm_mode_create_tv_properties(dev, 0, NULL);
20978+}
20979+
20980+static void rockchip_attach_connector_property(struct drm_device *drm)
20981+{
20982+	struct drm_connector *connector;
20983+	struct drm_mode_config *conf = &drm->mode_config;
20984+	struct drm_connector_list_iter conn_iter;
20985+
20986+	mutex_lock(&drm->mode_config.mutex);
20987+
20988+#define ROCKCHIP_PROP_ATTACH(prop, v) \
20989+		drm_object_attach_property(&connector->base, prop, v)
20990+
20991+	drm_connector_list_iter_begin(drm, &conn_iter);
20992+	drm_for_each_connector_iter(connector, &conn_iter) {
20993+		ROCKCHIP_PROP_ATTACH(conf->tv_brightness_property, 50);
20994+		ROCKCHIP_PROP_ATTACH(conf->tv_contrast_property, 50);
20995+		ROCKCHIP_PROP_ATTACH(conf->tv_saturation_property, 50);
20996+		ROCKCHIP_PROP_ATTACH(conf->tv_hue_property, 50);
20997+	}
20998+	drm_connector_list_iter_end(&conn_iter);
20999+#undef ROCKCHIP_PROP_ATTACH
21000+
21001+	mutex_unlock(&drm->mode_config.mutex);
21002+}
21003+
21004+static void rockchip_drm_set_property_default(struct drm_device *drm)
21005+{
21006+	struct drm_connector *connector;
21007+	struct drm_mode_config *conf = &drm->mode_config;
21008+	struct drm_atomic_state *state;
21009+	int ret;
21010+	struct drm_connector_list_iter conn_iter;
21011+
21012+	drm_modeset_lock_all(drm);
21013+
21014+	state = drm_atomic_helper_duplicate_state(drm, conf->acquire_ctx);
21015+	if (!state) {
21016+		DRM_ERROR("failed to alloc atomic state\n");
21017+		goto err_unlock;
21018+	}
21019+	state->acquire_ctx = conf->acquire_ctx;
21020+
21021+	drm_connector_list_iter_begin(drm, &conn_iter);
21022+	drm_for_each_connector_iter(connector, &conn_iter) {
21023+		struct drm_connector_state *connector_state;
21024+
21025+		connector_state = drm_atomic_get_connector_state(state,
21026+								 connector);
21027+		if (IS_ERR(connector_state)) {
21028+			DRM_ERROR("Connector[%d]: Failed to get state\n", connector->base.id);
21029+			continue;
21030+		}
21031+
21032+		connector_state->tv.brightness = 50;
21033+		connector_state->tv.contrast = 50;
21034+		connector_state->tv.saturation = 50;
21035+		connector_state->tv.hue = 50;
21036+	}
21037+	drm_connector_list_iter_end(&conn_iter);
21038+
21039+	ret = drm_atomic_commit(state);
21040+	WARN_ON(ret == -EDEADLK);
21041+	if (ret)
21042+		DRM_ERROR("Failed to update properties\n");
21043+	drm_atomic_state_put(state);
21044+
21045+err_unlock:
21046+	drm_modeset_unlock_all(drm);
21047+}
21048+
21049+static int rockchip_gem_pool_init(struct drm_device *drm)
21050+{
21051+	struct rockchip_drm_private *private = drm->dev_private;
21052+	struct device_node *np = drm->dev->of_node;
21053+	struct device_node *node;
21054+	phys_addr_t start, size;
21055+	struct resource res;
21056+	int ret;
21057+
21058+	node = of_parse_phandle(np, "secure-memory-region", 0);
21059+	if (!node)
21060+		return -ENXIO;
21061+
21062+	ret = of_address_to_resource(node, 0, &res);
21063+	if (ret)
21064+		return ret;
21065+	start = res.start;
21066+	size = resource_size(&res);
21067+	if (!size)
21068+		return -ENOMEM;
21069+
21070+	private->secure_buffer_pool = gen_pool_create(PAGE_SHIFT, -1);
21071+	if (!private->secure_buffer_pool)
21072+		return -ENOMEM;
21073+
21074+	gen_pool_add(private->secure_buffer_pool, start, size, -1);
21075+
21076+	return 0;
21077+}
21078+
21079+static void rockchip_gem_pool_destroy(struct drm_device *drm)
21080+{
21081+	struct rockchip_drm_private *private = drm->dev_private;
21082+
21083+	if (!private->secure_buffer_pool)
21084+		return;
21085+
21086+	gen_pool_destroy(private->secure_buffer_pool);
21087+}
21088+
21089 static int rockchip_drm_bind(struct device *dev)
21090 {
21091 	struct drm_device *drm_dev;
21092@@ -126,10 +1212,32 @@ static int rockchip_drm_bind(struct device *dev)
21093 		goto err_free;
21094 	}
21095 
21096+	mutex_init(&private->ovl_lock);
21097+
21098 	drm_dev->dev_private = private;
21099 
21100 	INIT_LIST_HEAD(&private->psr_list);
21101 	mutex_init(&private->psr_list_lock);
21102+	mutex_init(&private->commit_lock);
21103+
21104+	private->hdmi_pll.pll = devm_clk_get_optional(dev, "hdmi-tmds-pll");
21105+	if (PTR_ERR(private->hdmi_pll.pll) == -EPROBE_DEFER) {
21106+		ret = -EPROBE_DEFER;
21107+		goto err_free;
21108+	} else if (IS_ERR(private->hdmi_pll.pll)) {
21109+		dev_err(dev, "failed to get hdmi-tmds-pll\n");
21110+		ret = PTR_ERR(private->hdmi_pll.pll);
21111+		goto err_free;
21112+	}
21113+	private->default_pll.pll = devm_clk_get_optional(dev, "default-vop-pll");
21114+	if (PTR_ERR(private->default_pll.pll) == -EPROBE_DEFER) {
21115+		ret = -EPROBE_DEFER;
21116+		goto err_free;
21117+	} else if (IS_ERR(private->default_pll.pll)) {
21118+		dev_err(dev, "failed to get default vop pll\n");
21119+		ret = PTR_ERR(private->default_pll.pll);
21120+		goto err_free;
21121+	}
21122 
21123 	ret = rockchip_drm_init_iommu(drm_dev);
21124 	if (ret)
21125@@ -140,17 +1248,19 @@ static int rockchip_drm_bind(struct device *dev)
21126 		goto err_iommu_cleanup;
21127 
21128 	rockchip_drm_mode_config_init(drm_dev);
21129-
21130+	rockchip_drm_create_properties(drm_dev);
21131 	/* Try to bind all sub drivers. */
21132 	ret = component_bind_all(dev, drm_dev);
21133 	if (ret)
21134-		goto err_iommu_cleanup;
21135+		goto err_mode_config_cleanup;
21136 
21137+	rockchip_attach_connector_property(drm_dev);
21138 	ret = drm_vblank_init(drm_dev, drm_dev->mode_config.num_crtc);
21139 	if (ret)
21140 		goto err_unbind_all;
21141 
21142 	drm_mode_config_reset(drm_dev);
21143+	rockchip_drm_set_property_default(drm_dev);
21144 
21145 	/*
21146 	 * enable drm irq mode.
21147@@ -158,12 +1268,21 @@ static int rockchip_drm_bind(struct device *dev)
21148 	 */
21149 	drm_dev->irq_enabled = true;
21150 
21151+	/* init kms poll for handling hpd */
21152+	drm_kms_helper_poll_init(drm_dev);
21153+
21154+	rockchip_gem_pool_init(drm_dev);
21155+	ret = of_reserved_mem_device_init(drm_dev->dev);
21156+	if (ret)
21157+		DRM_DEBUG_KMS("No reserved memory region assign to drm\n");
21158+
21159+	rockchip_drm_show_logo(drm_dev);
21160+
21161 	ret = rockchip_drm_fbdev_init(drm_dev);
21162 	if (ret)
21163 		goto err_unbind_all;
21164 
21165-	/* init kms poll for handling hpd */
21166-	drm_kms_helper_poll_init(drm_dev);
21167+	drm_dev->mode_config.allow_fb_modifiers = true;
21168 
21169 	ret = drm_dev_register(drm_dev, 0);
21170 	if (ret)
21171@@ -171,13 +1290,18 @@ static int rockchip_drm_bind(struct device *dev)
21172 
21173 	return 0;
21174 err_kms_helper_poll_fini:
21175+	rockchip_gem_pool_destroy(drm_dev);
21176 	drm_kms_helper_poll_fini(drm_dev);
21177 	rockchip_drm_fbdev_fini(drm_dev);
21178 err_unbind_all:
21179 	component_unbind_all(dev, drm_dev);
21180+err_mode_config_cleanup:
21181+	drm_mode_config_cleanup(drm_dev);
21182 err_iommu_cleanup:
21183 	rockchip_iommu_cleanup(drm_dev);
21184 err_free:
21185+	drm_dev->dev_private = NULL;
21186+	dev_set_drvdata(dev, NULL);
21187 	drm_dev_put(drm_dev);
21188 	return ret;
21189 }
21190@@ -189,15 +1313,121 @@ static void rockchip_drm_unbind(struct device *dev)
21191 	drm_dev_unregister(drm_dev);
21192 
21193 	rockchip_drm_fbdev_fini(drm_dev);
21194+	rockchip_gem_pool_destroy(drm_dev);
21195 	drm_kms_helper_poll_fini(drm_dev);
21196 
21197 	drm_atomic_helper_shutdown(drm_dev);
21198 	component_unbind_all(dev, drm_dev);
21199+	drm_mode_config_cleanup(drm_dev);
21200 	rockchip_iommu_cleanup(drm_dev);
21201 
21202+	drm_dev->dev_private = NULL;
21203+	dev_set_drvdata(dev, NULL);
21204 	drm_dev_put(drm_dev);
21205 }
21206 
21207+static void rockchip_drm_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
21208+						    struct drm_file *file_priv)
21209+{
21210+	struct rockchip_drm_private *priv = crtc->dev->dev_private;
21211+	int pipe = drm_crtc_index(crtc);
21212+
21213+	if (pipe < ROCKCHIP_MAX_CRTC &&
21214+	    priv->crtc_funcs[pipe] &&
21215+	    priv->crtc_funcs[pipe]->cancel_pending_vblank)
21216+		priv->crtc_funcs[pipe]->cancel_pending_vblank(crtc, file_priv);
21217+}
21218+
21219+static int rockchip_drm_open(struct drm_device *dev, struct drm_file *file)
21220+{
21221+	struct drm_crtc *crtc;
21222+
21223+	drm_for_each_crtc(crtc, dev)
21224+		crtc->primary->fb = NULL;
21225+
21226+	return 0;
21227+}
21228+
21229+static void rockchip_drm_postclose(struct drm_device *dev,
21230+				   struct drm_file *file_priv)
21231+{
21232+	struct drm_crtc *crtc;
21233+
21234+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
21235+		rockchip_drm_crtc_cancel_pending_vblank(crtc, file_priv);
21236+}
21237+
21238+static void rockchip_drm_lastclose(struct drm_device *dev)
21239+{
21240+	struct rockchip_drm_private *priv = dev->dev_private;
21241+
21242+	if (!priv->logo)
21243+		drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev_helper);
21244+}
21245+
21246+static struct drm_pending_vblank_event *
21247+rockchip_drm_add_vcnt_event(struct drm_crtc *crtc, struct drm_file *file_priv)
21248+{
21249+	struct drm_pending_vblank_event *e;
21250+	struct drm_device *dev = crtc->dev;
21251+	unsigned long flags;
21252+
21253+	e = kzalloc(sizeof(*e), GFP_KERNEL);
21254+	if (!e)
21255+		return NULL;
21256+
21257+	e->pipe = drm_crtc_index(crtc);
21258+	e->event.base.type = DRM_EVENT_ROCKCHIP_CRTC_VCNT;
21259+	e->event.base.length = sizeof(e->event.vbl);
21260+	e->event.vbl.crtc_id = crtc->base.id;
21261+	/* store crtc pipe id */
21262+	e->event.vbl.user_data = e->pipe;
21263+
21264+	spin_lock_irqsave(&dev->event_lock, flags);
21265+	drm_event_reserve_init_locked(dev, file_priv, &e->base, &e->event.base);
21266+	spin_unlock_irqrestore(&dev->event_lock, flags);
21267+
21268+	return e;
21269+}
21270+
21271+static int rockchip_drm_get_vcnt_event_ioctl(struct drm_device *dev, void *data,
21272+					     struct drm_file *file_priv)
21273+{
21274+	struct rockchip_drm_private *priv = dev->dev_private;
21275+	union drm_wait_vblank *vblwait = data;
21276+	struct drm_pending_vblank_event *e;
21277+	struct drm_crtc *crtc;
21278+	unsigned int flags, pipe;
21279+
21280+	flags = vblwait->request.type & (_DRM_VBLANK_FLAGS_MASK | _DRM_ROCKCHIP_VCNT_EVENT);
21281+	pipe = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
21282+	if (pipe)
21283+		pipe = pipe >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
21284+	else
21285+		pipe = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
21286+
21287+	crtc = drm_crtc_from_index(dev, pipe);
21288+
21289+	if (flags & _DRM_ROCKCHIP_VCNT_EVENT) {
21290+		e = rockchip_drm_add_vcnt_event(crtc, file_priv);
21291+		priv->vcnt[pipe].event = e;
21292+	}
21293+
21294+	return 0;
21295+}
21296+
21297+static const struct drm_ioctl_desc rockchip_ioctls[] = {
21298+	DRM_IOCTL_DEF_DRV(ROCKCHIP_GEM_CREATE, rockchip_gem_create_ioctl,
21299+			  DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
21300+	DRM_IOCTL_DEF_DRV(ROCKCHIP_GEM_MAP_OFFSET,
21301+			  rockchip_gem_map_offset_ioctl,
21302+			  DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
21303+	DRM_IOCTL_DEF_DRV(ROCKCHIP_GEM_GET_PHYS, rockchip_gem_get_phys_ioctl,
21304+			  DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
21305+	DRM_IOCTL_DEF_DRV(ROCKCHIP_GET_VCNT_EVENT, rockchip_drm_get_vcnt_event_ioctl,
21306+			  DRM_UNLOCKED),
21307+};
21308+
21309 static const struct file_operations rockchip_drm_driver_fops = {
21310 	.owner = THIS_MODULE,
21311 	.open = drm_open,
21312@@ -209,19 +1439,160 @@ static const struct file_operations rockchip_drm_driver_fops = {
21313 	.release = drm_release,
21314 };
21315 
21316+static int rockchip_drm_gem_dmabuf_begin_cpu_access(struct dma_buf *dma_buf,
21317+						    enum dma_data_direction dir)
21318+{
21319+	struct drm_gem_object *obj = dma_buf->priv;
21320+
21321+	return rockchip_gem_prime_begin_cpu_access(obj, dir);
21322+}
21323+
21324+static int rockchip_drm_gem_dmabuf_end_cpu_access(struct dma_buf *dma_buf,
21325+						  enum dma_data_direction dir)
21326+{
21327+	struct drm_gem_object *obj = dma_buf->priv;
21328+
21329+	return rockchip_gem_prime_end_cpu_access(obj, dir);
21330+}
21331+
21332+static int rockchip_drm_gem_begin_cpu_access_partial(
21333+	struct dma_buf *dma_buf,
21334+	enum dma_data_direction dir,
21335+	unsigned int offset, unsigned int len)
21336+{
21337+	struct drm_gem_object *obj = dma_buf->priv;
21338+
21339+	return rockchip_gem_prime_begin_cpu_access_partial(obj, dir, offset, len);
21340+}
21341+
21342+static int rockchip_drm_gem_end_cpu_access_partial(
21343+	struct dma_buf *dma_buf,
21344+	enum dma_data_direction dir,
21345+	unsigned int offset, unsigned int len)
21346+{
21347+	struct drm_gem_object *obj = dma_buf->priv;
21348+
21349+	return rockchip_gem_prime_end_cpu_access_partial(obj, dir, offset, len);
21350+}
21351+
21352+static const struct dma_buf_ops rockchip_drm_gem_prime_dmabuf_ops = {
21353+	.cache_sgt_mapping = true,
21354+	.attach = drm_gem_map_attach,
21355+	.detach = drm_gem_map_detach,
21356+	.map_dma_buf = drm_gem_map_dma_buf,
21357+	.unmap_dma_buf = drm_gem_unmap_dma_buf,
21358+	.release = drm_gem_dmabuf_release,
21359+	.mmap = drm_gem_dmabuf_mmap,
21360+	.vmap = drm_gem_dmabuf_vmap,
21361+	.vunmap = drm_gem_dmabuf_vunmap,
21362+	.get_uuid = drm_gem_dmabuf_get_uuid,
21363+	.begin_cpu_access = rockchip_drm_gem_dmabuf_begin_cpu_access,
21364+	.end_cpu_access = rockchip_drm_gem_dmabuf_end_cpu_access,
21365+	.begin_cpu_access_partial = rockchip_drm_gem_begin_cpu_access_partial,
21366+	.end_cpu_access_partial = rockchip_drm_gem_end_cpu_access_partial,
21367+};
21368+
21369+static struct drm_gem_object *rockchip_drm_gem_prime_import_dev(struct drm_device *dev,
21370+								struct dma_buf *dma_buf,
21371+								struct device *attach_dev)
21372+{
21373+	struct dma_buf_attachment *attach;
21374+	struct sg_table *sgt;
21375+	struct drm_gem_object *obj;
21376+	int ret;
21377+
21378+	if (dma_buf->ops == &rockchip_drm_gem_prime_dmabuf_ops) {
21379+		obj = dma_buf->priv;
21380+		if (obj->dev == dev) {
21381+			/*
21382+			 * Importing dmabuf exported from out own gem increases
21383+			 * refcount on gem itself instead of f_count of dmabuf.
21384+			 */
21385+			drm_gem_object_get(obj);
21386+			return obj;
21387+		}
21388+	}
21389+
21390+	if (!dev->driver->gem_prime_import_sg_table)
21391+		return ERR_PTR(-EINVAL);
21392+
21393+	attach = dma_buf_attach(dma_buf, attach_dev);
21394+	if (IS_ERR(attach))
21395+		return ERR_CAST(attach);
21396+
21397+	get_dma_buf(dma_buf);
21398+
21399+	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
21400+	if (IS_ERR(sgt)) {
21401+		ret = PTR_ERR(sgt);
21402+		goto fail_detach;
21403+	}
21404+
21405+	obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
21406+	if (IS_ERR(obj)) {
21407+		ret = PTR_ERR(obj);
21408+		goto fail_unmap;
21409+	}
21410+
21411+	obj->import_attach = attach;
21412+	obj->resv = dma_buf->resv;
21413+
21414+	return obj;
21415+
21416+fail_unmap:
21417+	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
21418+fail_detach:
21419+	dma_buf_detach(dma_buf, attach);
21420+	dma_buf_put(dma_buf);
21421+
21422+	return ERR_PTR(ret);
21423+}
21424+
21425+static struct drm_gem_object *rockchip_drm_gem_prime_import(struct drm_device *dev,
21426+							    struct dma_buf *dma_buf)
21427+{
21428+	return rockchip_drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
21429+}
21430+
21431+static struct dma_buf *rockchip_drm_gem_prime_export(struct drm_gem_object *obj,
21432+						     int flags)
21433+{
21434+	struct drm_device *dev = obj->dev;
21435+	struct dma_buf_export_info exp_info = {
21436+		.exp_name = KBUILD_MODNAME, /* white lie for debug */
21437+		.owner = dev->driver->fops->owner,
21438+		.ops = &rockchip_drm_gem_prime_dmabuf_ops,
21439+		.size = obj->size,
21440+		.flags = flags,
21441+		.priv = obj,
21442+		.resv = obj->resv,
21443+	};
21444+
21445+	return drm_gem_dmabuf_export(dev, &exp_info);
21446+}
21447+
21448 static struct drm_driver rockchip_drm_driver = {
21449-	.driver_features	= DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
21450-	.lastclose		= drm_fb_helper_lastclose,
21451+	.driver_features	= DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC | DRIVER_RENDER,
21452+	.postclose		= rockchip_drm_postclose,
21453+	.lastclose		= rockchip_drm_lastclose,
21454+	.open			= rockchip_drm_open,
21455 	.gem_vm_ops		= &drm_gem_cma_vm_ops,
21456 	.gem_free_object_unlocked = rockchip_gem_free_object,
21457 	.dumb_create		= rockchip_gem_dumb_create,
21458 	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd,
21459 	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle,
21460+	.gem_prime_import	= rockchip_drm_gem_prime_import,
21461+	.gem_prime_export	= rockchip_drm_gem_prime_export,
21462 	.gem_prime_get_sg_table	= rockchip_gem_prime_get_sg_table,
21463 	.gem_prime_import_sg_table	= rockchip_gem_prime_import_sg_table,
21464 	.gem_prime_vmap		= rockchip_gem_prime_vmap,
21465 	.gem_prime_vunmap	= rockchip_gem_prime_vunmap,
21466 	.gem_prime_mmap		= rockchip_gem_mmap_buf,
21467+#ifdef CONFIG_DEBUG_FS
21468+	.debugfs_init		= rockchip_drm_debugfs_init,
21469+#endif
21470+	.ioctls			= rockchip_ioctls,
21471+	.num_ioctls		= ARRAY_SIZE(rockchip_ioctls),
21472 	.fops			= &rockchip_drm_driver_fops,
21473 	.name	= DRIVER_NAME,
21474 	.desc	= DRIVER_DESC,
21475@@ -371,7 +1742,7 @@ static int rockchip_drm_platform_of_probe(struct device *dev)
21476 		}
21477 
21478 		iommu = of_parse_phandle(port->parent, "iommus", 0);
21479-		if (!iommu || !of_device_is_available(iommu->parent)) {
21480+		if (!iommu || !of_device_is_available(iommu)) {
21481 			DRM_DEV_DEBUG(dev,
21482 				      "no iommu attached for %pOF, using non-iommu buffers\n",
21483 				      port->parent);
21484@@ -422,6 +1793,10 @@ static int rockchip_drm_platform_probe(struct platform_device *pdev)
21485 		return ret;
21486 	}
21487 
21488+	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
21489+	if (ret)
21490+		return ret;
21491+
21492 	return 0;
21493 }
21494 
21495@@ -471,6 +1846,8 @@ static int __init rockchip_drm_init(void)
21496 
21497 	num_rockchip_sub_drivers = 0;
21498 	ADD_ROCKCHIP_SUB_DRIVER(vop_platform_driver, CONFIG_DRM_ROCKCHIP);
21499+	ADD_ROCKCHIP_SUB_DRIVER(vop2_platform_driver, CONFIG_DRM_ROCKCHIP);
21500+	ADD_ROCKCHIP_SUB_DRIVER(vconn_platform_driver, CONFIG_ROCKCHIP_VCONN);
21501 	ADD_ROCKCHIP_SUB_DRIVER(rockchip_lvds_driver,
21502 				CONFIG_ROCKCHIP_LVDS);
21503 	ADD_ROCKCHIP_SUB_DRIVER(rockchip_dp_driver,
21504@@ -480,9 +1857,13 @@ static int __init rockchip_drm_init(void)
21505 				CONFIG_ROCKCHIP_DW_HDMI);
21506 	ADD_ROCKCHIP_SUB_DRIVER(dw_mipi_dsi_rockchip_driver,
21507 				CONFIG_ROCKCHIP_DW_MIPI_DSI);
21508+	ADD_ROCKCHIP_SUB_DRIVER(dw_mipi_dsi2_rockchip_driver,
21509+				CONFIG_ROCKCHIP_DW_MIPI_DSI);
21510 	ADD_ROCKCHIP_SUB_DRIVER(inno_hdmi_driver, CONFIG_ROCKCHIP_INNO_HDMI);
21511 	ADD_ROCKCHIP_SUB_DRIVER(rk3066_hdmi_driver,
21512 				CONFIG_ROCKCHIP_RK3066_HDMI);
21513+	ADD_ROCKCHIP_SUB_DRIVER(rockchip_rgb_driver, CONFIG_ROCKCHIP_RGB);
21514+	ADD_ROCKCHIP_SUB_DRIVER(dw_dp_driver, CONFIG_ROCKCHIP_DW_DP);
21515 
21516 	ret = platform_register_drivers(rockchip_sub_drivers,
21517 					num_rockchip_sub_drivers);
21518@@ -493,6 +1874,8 @@ static int __init rockchip_drm_init(void)
21519 	if (ret)
21520 		goto err_unreg_drivers;
21521 
21522+	rockchip_gem_get_ddr_info();
21523+
21524 	return 0;
21525 
21526 err_unreg_drivers:
21527diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
21528index e33c2dcd0..591969768 100644
21529--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
21530+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
21531@@ -9,21 +9,128 @@
21532 #ifndef _ROCKCHIP_DRM_DRV_H
21533 #define _ROCKCHIP_DRM_DRV_H
21534 
21535-#include <drm/drm_fb_helper.h>
21536 #include <drm/drm_atomic_helper.h>
21537+#include <drm/drm_dsc.h>
21538+#include <drm/drm_fb_helper.h>
21539+#include <drm/drm_fourcc.h>
21540 #include <drm/drm_gem.h>
21541-
21542+#include <uapi/drm/rockchip_drm.h>
21543 #include <linux/module.h>
21544 #include <linux/component.h>
21545 
21546+#include <soc/rockchip/rockchip_dmc.h>
21547+
21548+#include <drm/panel-simple.h>
21549+
21550+#include <drm/rockchip_drm_debugfs.h>
21551+
21552 #define ROCKCHIP_MAX_FB_BUFFER	3
21553 #define ROCKCHIP_MAX_CONNECTOR	2
21554-#define ROCKCHIP_MAX_CRTC	2
21555+#define ROCKCHIP_MAX_CRTC	4
21556+#define ROCKCHIP_MAX_LAYER	16
21557+
21558 
21559 struct drm_device;
21560 struct drm_connector;
21561 struct iommu_domain;
21562 
21563+#define VOP_OUTPUT_IF_RGB	BIT(0)
21564+#define VOP_OUTPUT_IF_BT1120	BIT(1)
21565+#define VOP_OUTPUT_IF_BT656	BIT(2)
21566+#define VOP_OUTPUT_IF_LVDS0	BIT(3)
21567+#define VOP_OUTPUT_IF_LVDS1	BIT(4)
21568+#define VOP_OUTPUT_IF_MIPI0	BIT(5)
21569+#define VOP_OUTPUT_IF_MIPI1	BIT(6)
21570+#define VOP_OUTPUT_IF_eDP0	BIT(7)
21571+#define VOP_OUTPUT_IF_eDP1	BIT(8)
21572+#define VOP_OUTPUT_IF_DP0	BIT(9)
21573+#define VOP_OUTPUT_IF_DP1	BIT(10)
21574+#define VOP_OUTPUT_IF_HDMI0	BIT(11)
21575+#define VOP_OUTPUT_IF_HDMI1	BIT(12)
21576+
21577+#ifndef DRM_FORMAT_NV20
21578+#define DRM_FORMAT_NV20		fourcc_code('N', 'V', '2', '0') /* 2x1 subsampled Cr:Cb plane */
21579+#endif
21580+
21581+#ifndef DRM_FORMAT_NV30
21582+#define DRM_FORMAT_NV30		fourcc_code('N', 'V', '3', '0') /* non-subsampled Cr:Cb plane */
21583+#endif
21584+
21585+struct rockchip_drm_sub_dev {
21586+	struct list_head list;
21587+	struct drm_connector *connector;
21588+	struct device_node *of_node;
21589+	void (*loader_protect)(struct drm_encoder *encoder, bool on);
21590+	void (*oob_hotplug_event)(struct drm_connector *connector);
21591+};
21592+
21593+struct rockchip_sdr2hdr_state {
21594+	int sdr2hdr_func;
21595+
21596+	bool bt1886eotf_pre_conv_en;
21597+	bool rgb2rgb_pre_conv_en;
21598+	bool rgb2rgb_pre_conv_mode;
21599+	bool st2084oetf_pre_conv_en;
21600+
21601+	bool bt1886eotf_post_conv_en;
21602+	bool rgb2rgb_post_conv_en;
21603+	bool rgb2rgb_post_conv_mode;
21604+	bool st2084oetf_post_conv_en;
21605+};
21606+
21607+struct rockchip_hdr_state {
21608+	bool pre_overlay;
21609+	bool hdr2sdr_en;
21610+	struct rockchip_sdr2hdr_state sdr2hdr_state;
21611+};
21612+
21613+struct rockchip_bcsh_state {
21614+	int brightness;
21615+	int contrast;
21616+	int saturation;
21617+	int sin_hue;
21618+	int cos_hue;
21619+};
21620+
21621+struct rockchip_crtc {
21622+	struct drm_crtc crtc;
21623+#if defined(CONFIG_ROCKCHIP_DRM_DEBUG)
21624+	/**
21625+	 * @vop_dump_status the status of vop dump control
21626+	 * @vop_dump_list_head the list head of vop dump list
21627+	 * @vop_dump_list_init_flag init once
21628+	 * @vop_dump_times control the dump times
21629+	 * @frme_count the frame of dump buf
21630+	 */
21631+	enum vop_dump_status vop_dump_status;
21632+	struct list_head vop_dump_list_head;
21633+	bool vop_dump_list_init_flag;
21634+	int vop_dump_times;
21635+	int frame_count;
21636+#endif
21637+};
21638+
21639+struct rockchip_dsc_sink_cap {
21640+	/**
21641+	 * @slice_width: the number of pixel columns that comprise the slice width
21642+	 * @slice_height: the number of pixel rows that comprise the slice height
21643+	 * @block_pred: Does block prediction
21644+	 * @native_420: Does sink support DSC with 4:2:0 compression
21645+	 * @bpc_supported: compressed bpc supported by sink : 10, 12 or 16 bpc
21646+	 * @version_major: DSC major version
21647+	 * @version_minor: DSC minor version
21648+	 * @target_bits_per_pixel_x16: bits num after compress and multiply 16
21649+	 */
21650+	u16 slice_width;
21651+	u16 slice_height;
21652+	bool block_pred;
21653+	bool native_420;
21654+	u8 bpc_supported;
21655+	u8 version_major;
21656+	u8 version_minor;
21657+	u16 target_bits_per_pixel_x16;
21658+};
21659+
21660 struct rockchip_crtc_state {
21661 	struct drm_crtc_state base;
21662 	int output_type;
21663@@ -31,10 +138,206 @@ struct rockchip_crtc_state {
21664 	int output_bpc;
21665 	int output_flags;
21666 	bool enable_afbc;
21667+	/**
21668+	 * @splice_mode: enabled when display a hdisplay > 4096 on rk3588
21669+	 */
21670+	bool splice_mode;
21671+
21672+	/**
21673+	 * @hold_mode: enabled when it's:
21674+	 * (1) mcu hold mode
21675+	 * (2) mipi dsi cmd mode
21676+	 * (3) edp psr mode
21677+	 */
21678+	bool hold_mode;
21679+
21680+	struct drm_tv_connector_state *tv_state;
21681+	int left_margin;
21682+	int right_margin;
21683+	int top_margin;
21684+	int bottom_margin;
21685+	int vdisplay;
21686+	int afbdc_win_format;
21687+	int afbdc_win_width;
21688+	int afbdc_win_height;
21689+	int afbdc_win_ptr;
21690+	int afbdc_win_id;
21691+	int afbdc_en;
21692+	int afbdc_win_vir_width;
21693+	int afbdc_win_xoffset;
21694+	int afbdc_win_yoffset;
21695+	int dsp_layer_sel;
21696+	u32 output_if;
21697+	u32 bus_format;
21698+	u32 bus_flags;
21699+	int yuv_overlay;
21700+	int post_r2y_en;
21701+	int post_y2r_en;
21702+	int post_csc_mode;
21703+	int bcsh_en;
21704+	int color_space;
21705+	int eotf;
21706+	u32 background;
21707+	u32 line_flag;
21708+	u8 mode_update;
21709+	u8 dsc_id;
21710+	u8 dsc_enable;
21711+
21712+	u8 dsc_slice_num;
21713+	u8 dsc_pixel_num;
21714+
21715+	u64 dsc_txp_clk_rate;
21716+	u64 dsc_pxl_clk_rate;
21717+	u64 dsc_cds_clk_rate;
21718+
21719+	struct drm_dsc_picture_parameter_set pps;
21720+	struct rockchip_dsc_sink_cap dsc_sink_cap;
21721+	struct rockchip_hdr_state hdr;
21722 };
21723+
21724 #define to_rockchip_crtc_state(s) \
21725 		container_of(s, struct rockchip_crtc_state, base)
21726 
21727+struct rockchip_drm_vcnt {
21728+	struct drm_pending_vblank_event *event;
21729+	__u32 sequence;
21730+	int pipe;
21731+};
21732+
21733+struct rockchip_logo {
21734+	dma_addr_t dma_addr;
21735+	void *kvaddr;
21736+	phys_addr_t start;
21737+	phys_addr_t size;
21738+	int count;
21739+};
21740+
21741+struct loader_cubic_lut {
21742+	bool enable;
21743+	u32 offset;
21744+};
21745+
21746+struct rockchip_drm_dsc_cap {
21747+	bool v_1p2;
21748+	bool native_420;
21749+	bool all_bpp;
21750+	u8 bpc_supported;
21751+	u8 max_slices;
21752+	u8 max_lanes;
21753+	u8 max_frl_rate_per_lane;
21754+	u8 total_chunk_kbytes;
21755+	int clk_per_slice;
21756+};
21757+
21758+struct ver_26_v0 {
21759+	u8 yuv422_12bit;
21760+	u8 support_2160p_60;
21761+	u8 global_dimming;
21762+	u8 dm_major_ver;
21763+	u8 dm_minor_ver;
21764+	u16 t_min_pq;
21765+	u16 t_max_pq;
21766+	u16 rx;
21767+	u16 ry;
21768+	u16 gx;
21769+	u16 gy;
21770+	u16 bx;
21771+	u16 by;
21772+	u16 wx;
21773+	u16 wy;
21774+} __packed;
21775+
21776+struct ver_15_v1 {
21777+	u8 yuv422_12bit;
21778+	u8 support_2160p_60;
21779+	u8 global_dimming;
21780+	u8 dm_version;
21781+	u8 colorimetry;
21782+	u8 t_max_lum;
21783+	u8 t_min_lum;
21784+	u8 rx;
21785+	u8 ry;
21786+	u8 gx;
21787+	u8 gy;
21788+	u8 bx;
21789+	u8 by;
21790+} __packed;
21791+
21792+struct ver_12_v1 {
21793+	u8 yuv422_12bit;
21794+	u8 support_2160p_60;
21795+	u8 global_dimming;
21796+	u8 dm_version;
21797+	u8 colorimetry;
21798+	u8 low_latency;
21799+	u8 t_max_lum;
21800+	u8 t_min_lum;
21801+	u8 unique_rx;
21802+	u8 unique_ry;
21803+	u8 unique_gx;
21804+	u8 unique_gy;
21805+	u8 unique_bx;
21806+	u8 unique_by;
21807+} __packed;
21808+
21809+struct ver_12_v2 {
21810+	u8 yuv422_12bit;
21811+	u8 backlt_ctrl;
21812+	u8 global_dimming;
21813+	u8 dm_version;
21814+	u8 backlt_min_luma;
21815+	u8 interface;
21816+	u8 yuv444_10b_12b;
21817+	u8 t_min_pq_v2;
21818+	u8 t_max_pq_v2;
21819+	u8 unique_rx;
21820+	u8 unique_ry;
21821+	u8 unique_gx;
21822+	u8 unique_gy;
21823+	u8 unique_bx;
21824+	u8 unique_by;
21825+} __packed;
21826+
21827+struct next_hdr_sink_data {
21828+	u8 version;
21829+	struct ver_26_v0 ver_26_v0;
21830+	struct ver_15_v1 ver_15_v1;
21831+	struct ver_12_v1 ver_12_v1;
21832+	struct ver_12_v2 ver_12_v2;
21833+} __packed;
21834+
21835+/*
21836+ * Rockchip drm private crtc funcs.
21837+ * @loader_protect: protect loader logo crtc's power
21838+ * @enable_vblank: enable crtc vblank irq.
21839+ * @disable_vblank: disable crtc vblank irq.
21840+ * @bandwidth: report present crtc bandwidth consume.
21841+ */
21842+struct rockchip_crtc_funcs {
21843+	int (*loader_protect)(struct drm_crtc *crtc, bool on);
21844+	int (*enable_vblank)(struct drm_crtc *crtc);
21845+	void (*disable_vblank)(struct drm_crtc *crtc);
21846+	size_t (*bandwidth)(struct drm_crtc *crtc,
21847+			    struct drm_crtc_state *crtc_state,
21848+			    struct dmcfreq_vop_info *vop_bw_info);
21849+	void (*cancel_pending_vblank)(struct drm_crtc *crtc,
21850+				      struct drm_file *file_priv);
21851+	int (*debugfs_init)(struct drm_minor *minor, struct drm_crtc *crtc);
21852+	int (*debugfs_dump)(struct drm_crtc *crtc, struct seq_file *s);
21853+	void (*regs_dump)(struct drm_crtc *crtc, struct seq_file *s);
21854+	enum drm_mode_status (*mode_valid)(struct drm_crtc *crtc,
21855+					   const struct drm_display_mode *mode,
21856+					   int output_type);
21857+	void (*crtc_close)(struct drm_crtc *crtc);
21858+	void (*crtc_send_mcu_cmd)(struct drm_crtc *crtc, u32 type, u32 value);
21859+	void (*te_handler)(struct drm_crtc *crtc);
21860+};
21861+
21862+struct rockchip_dclk_pll {
21863+	struct clk *pll;
21864+	unsigned int use_count;
21865+};
21866+
21867 /*
21868  * Rockchip drm private structure.
21869  *
21870@@ -43,13 +346,54 @@ struct rockchip_crtc_state {
21871  * @mm_lock: protect drm_mm on multi-threads.
21872  */
21873 struct rockchip_drm_private {
21874-	struct drm_fb_helper fbdev_helper;
21875+	struct rockchip_logo *logo;
21876+	struct drm_fb_helper *fbdev_helper;
21877 	struct drm_gem_object *fbdev_bo;
21878 	struct iommu_domain *domain;
21879+	struct gen_pool *secure_buffer_pool;
21880 	struct mutex mm_lock;
21881 	struct drm_mm mm;
21882 	struct list_head psr_list;
21883 	struct mutex psr_list_lock;
21884+	struct mutex commit_lock;
21885+
21886+	/* private crtc prop */
21887+	struct drm_property *soc_id_prop;
21888+	struct drm_property *port_id_prop;
21889+	struct drm_property *aclk_prop;
21890+	struct drm_property *bg_prop;
21891+	struct drm_property *line_flag_prop;
21892+
21893+	/* private plane prop */
21894+	struct drm_property *eotf_prop;
21895+	struct drm_property *color_space_prop;
21896+	struct drm_property *async_commit_prop;
21897+	struct drm_property *share_id_prop;
21898+
21899+	/* private connector prop */
21900+	struct drm_property *connector_id_prop;
21901+
21902+	const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC];
21903+
21904+	struct rockchip_dclk_pll default_pll;
21905+	struct rockchip_dclk_pll hdmi_pll;
21906+
21907+	/*
21908+	 * protect some shared overlay resource
21909+	 * OVL_LAYER_SEL/OVL_PORT_SEL
21910+	 */
21911+	struct mutex ovl_lock;
21912+
21913+	struct rockchip_drm_vcnt vcnt[ROCKCHIP_MAX_CRTC];
21914+	/**
21915+	 * @loader_protect
21916+	 * ignore restore_fbdev_mode_atomic when in logo on state
21917+	 */
21918+	bool loader_protect;
21919+
21920+	dma_addr_t cubic_lut_dma_addr;
21921+	void *cubic_lut_kvaddr;
21922+	struct loader_cubic_lut cubic_lut[ROCKCHIP_MAX_CRTC];
21923 };
21924 
21925 int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
21926@@ -57,14 +401,49 @@ int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
21927 void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
21928 				    struct device *dev);
21929 int rockchip_drm_wait_vact_end(struct drm_crtc *crtc, unsigned int mstimeout);
21930+int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
21931+				 const struct rockchip_crtc_funcs *crtc_funcs);
21932+void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc);
21933+
21934+void rockchip_drm_register_sub_dev(struct rockchip_drm_sub_dev *sub_dev);
21935+void rockchip_drm_unregister_sub_dev(struct rockchip_drm_sub_dev *sub_dev);
21936+struct rockchip_drm_sub_dev *rockchip_drm_get_sub_dev(struct device_node *node);
21937+int rockchip_drm_add_modes_noedid(struct drm_connector *connector);
21938+void rockchip_drm_te_handle(struct drm_crtc *crtc);
21939+void drm_mode_convert_to_split_mode(struct drm_display_mode *mode);
21940+void drm_mode_convert_to_origin_mode(struct drm_display_mode *mode);
21941+#if IS_ENABLED(CONFIG_DRM_ROCKCHIP)
21942+int rockchip_drm_get_sub_dev_type(void);
21943+#else
21944+static inline int rockchip_drm_get_sub_dev_type(void)
21945+{
21946+	return DRM_MODE_CONNECTOR_Unknown;
21947+}
21948+#endif
21949 
21950 int rockchip_drm_endpoint_is_subdriver(struct device_node *ep);
21951+uint32_t rockchip_drm_of_find_possible_crtcs(struct drm_device *dev,
21952+					     struct device_node *port);
21953+uint32_t rockchip_drm_get_bpp(const struct drm_format_info *info);
21954+int rockchip_drm_get_yuv422_format(struct drm_connector *connector,
21955+				   struct edid *edid);
21956+int rockchip_drm_parse_cea_ext(struct rockchip_drm_dsc_cap *dsc_cap,
21957+			       u8 *max_frl_rate_per_lane, u8 *max_lanes,
21958+			       const struct edid *edid);
21959+int rockchip_drm_parse_next_hdr(struct next_hdr_sink_data *sink_data,
21960+				const struct edid *edid);
21961+
21962 extern struct platform_driver cdn_dp_driver;
21963 extern struct platform_driver dw_hdmi_rockchip_pltfm_driver;
21964 extern struct platform_driver dw_mipi_dsi_rockchip_driver;
21965+extern struct platform_driver dw_mipi_dsi2_rockchip_driver;
21966 extern struct platform_driver inno_hdmi_driver;
21967 extern struct platform_driver rockchip_dp_driver;
21968 extern struct platform_driver rockchip_lvds_driver;
21969 extern struct platform_driver vop_platform_driver;
21970+extern struct platform_driver vop2_platform_driver;
21971 extern struct platform_driver rk3066_hdmi_driver;
21972+extern struct platform_driver rockchip_rgb_driver;
21973+extern struct platform_driver dw_dp_driver;
21974+extern struct platform_driver vconn_platform_driver;
21975 #endif /* _ROCKCHIP_DRM_DRV_H_ */
21976diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
21977index 3aa37e177..5658fe507 100644
21978--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
21979+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
21980@@ -5,6 +5,7 @@
21981  */
21982 
21983 #include <linux/kernel.h>
21984+#include <linux/devfreq.h>
21985 
21986 #include <drm/drm.h>
21987 #include <drm/drm_atomic.h>
21988@@ -13,13 +14,43 @@
21989 #include <drm/drm_fourcc.h>
21990 #include <drm/drm_gem_framebuffer_helper.h>
21991 #include <drm/drm_probe_helper.h>
21992+#include <soc/rockchip/rockchip_dmc.h>
21993 
21994 #include "rockchip_drm_drv.h"
21995 #include "rockchip_drm_fb.h"
21996 #include "rockchip_drm_gem.h"
21997+#include <drm/rockchip_drm_logo.h>
21998+
21999+static bool is_rockchip_logo_fb(struct drm_framebuffer *fb)
22000+{
22001+	return fb->flags & ROCKCHIP_DRM_MODE_LOGO_FB ? true : false;
22002+}
22003+
22004+static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb)
22005+{
22006+	int i = 0;
22007+
22008+	drm_framebuffer_cleanup(fb);
22009+
22010+	if (is_rockchip_logo_fb(fb)) {
22011+		struct rockchip_drm_logo_fb *rockchip_logo_fb = to_rockchip_logo_fb(fb);
22012+
22013+#ifndef MODULE
22014+		rockchip_free_loader_memory(fb->dev);
22015+#endif
22016+		kfree(rockchip_logo_fb);
22017+	} else {
22018+		for (i = 0; i < 4; i++) {
22019+			if (fb->obj[i])
22020+				drm_gem_object_put(fb->obj[i]);
22021+		}
22022+
22023+		kfree(fb);
22024+	}
22025+}
22026 
22027 static const struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
22028-	.destroy       = drm_gem_fb_destroy,
22029+	.destroy       = rockchip_drm_fb_destroy,
22030 	.create_handle = drm_gem_fb_create_handle,
22031 	.dirty	       = drm_atomic_helper_dirtyfb,
22032 };
22033@@ -53,8 +84,103 @@ rockchip_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cm
22034 	return fb;
22035 }
22036 
22037+struct drm_framebuffer *
22038+rockchip_drm_logo_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd,
22039+			   struct rockchip_logo *logo)
22040+{
22041+	int ret = 0;
22042+	struct rockchip_drm_logo_fb *rockchip_logo_fb;
22043+	struct drm_framebuffer *fb;
22044+
22045+	rockchip_logo_fb = kzalloc(sizeof(*rockchip_logo_fb), GFP_KERNEL);
22046+	if (!rockchip_logo_fb)
22047+		return ERR_PTR(-ENOMEM);
22048+	fb = &rockchip_logo_fb->fb;
22049+
22050+	drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
22051+
22052+	ret = drm_framebuffer_init(dev, fb, &rockchip_drm_fb_funcs);
22053+	if (ret) {
22054+		DRM_DEV_ERROR(dev->dev,
22055+			      "Failed to initialize rockchip logo fb: %d\n",
22056+			      ret);
22057+		kfree(rockchip_logo_fb);
22058+		return ERR_PTR(ret);
22059+	}
22060+
22061+	fb->flags |= ROCKCHIP_DRM_MODE_LOGO_FB;
22062+	rockchip_logo_fb->logo = logo;
22063+	rockchip_logo_fb->fb.obj[0] = &rockchip_logo_fb->rk_obj.base;
22064+	rockchip_logo_fb->rk_obj.dma_addr = logo->dma_addr;
22065+	rockchip_logo_fb->rk_obj.kvaddr = logo->kvaddr;
22066+	logo->count++;
22067+
22068+	return &rockchip_logo_fb->fb;
22069+}
22070+
22071+static int rockchip_drm_bandwidth_atomic_check(struct drm_device *dev,
22072+					       struct drm_atomic_state *state,
22073+					       struct dmcfreq_vop_info *vop_bw_info)
22074+{
22075+	struct rockchip_drm_private *priv = dev->dev_private;
22076+	struct drm_crtc_state *old_crtc_state;
22077+	const struct rockchip_crtc_funcs *funcs;
22078+	struct drm_crtc *crtc;
22079+	int i;
22080+
22081+	vop_bw_info->line_bw_mbyte = 0;
22082+	vop_bw_info->frame_bw_mbyte = 0;
22083+	vop_bw_info->plane_num = 0;
22084+
22085+	for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
22086+		funcs = priv->crtc_funcs[drm_crtc_index(crtc)];
22087+
22088+		if (funcs && funcs->bandwidth)
22089+			funcs->bandwidth(crtc, old_crtc_state, vop_bw_info);
22090+	}
22091+
22092+	return 0;
22093+}
22094+
22095+/**
22096+ * rockchip_drm_atomic_helper_commit_tail_rpm - commit atomic update to hardware
22097+ * @old_state: new modeset state to be committed
22098+ *
22099+ * This is an alternative implementation for the
22100+ * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
22101+ * that support runtime_pm or need the CRTC to be enabled to perform a
22102+ * commit. Otherwise, one should use the default implementation
22103+ * drm_atomic_helper_commit_tail().
22104+ */
22105+static void rockchip_drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
22106+{
22107+	struct drm_device *dev = old_state->dev;
22108+	struct rockchip_drm_private *prv = dev->dev_private;
22109+	struct dmcfreq_vop_info vop_bw_info;
22110+
22111+	drm_atomic_helper_commit_modeset_disables(dev, old_state);
22112+
22113+	drm_atomic_helper_commit_modeset_enables(dev, old_state);
22114+
22115+	rockchip_drm_bandwidth_atomic_check(dev, old_state, &vop_bw_info);
22116+
22117+	rockchip_dmcfreq_vop_bandwidth_update(&vop_bw_info);
22118+
22119+	mutex_lock(&prv->ovl_lock);
22120+	drm_atomic_helper_commit_planes(dev, old_state, DRM_PLANE_COMMIT_ACTIVE_ONLY);
22121+	mutex_unlock(&prv->ovl_lock);
22122+
22123+	drm_atomic_helper_fake_vblank(old_state);
22124+
22125+	drm_atomic_helper_commit_hw_done(old_state);
22126+
22127+	drm_atomic_helper_wait_for_vblanks(dev, old_state);
22128+
22129+	drm_atomic_helper_cleanup_planes(dev, old_state);
22130+}
22131+
22132 static const struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = {
22133-	.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
22134+	.atomic_commit_tail = rockchip_drm_atomic_helper_commit_tail_rpm,
22135 };
22136 
22137 static struct drm_framebuffer *
22138@@ -81,7 +207,7 @@ rockchip_fb_create(struct drm_device *dev, struct drm_file *file,
22139 	}
22140 
22141 	if (drm_is_afbc(mode_cmd->modifier[0])) {
22142-		int ret, i;
22143+		int i;
22144 
22145 		ret = drm_gem_fb_afbc_init(dev, mode_cmd, afbc_fb);
22146 		if (ret) {
22147@@ -98,9 +224,18 @@ rockchip_fb_create(struct drm_device *dev, struct drm_file *file,
22148 	return &afbc_fb->base;
22149 }
22150 
22151+static void rockchip_drm_output_poll_changed(struct drm_device *dev)
22152+{
22153+	struct rockchip_drm_private *private = dev->dev_private;
22154+	struct drm_fb_helper *fb_helper = private->fbdev_helper;
22155+
22156+	if (fb_helper && dev->mode_config.poll_enabled && !private->loader_protect)
22157+		drm_fb_helper_hotplug_event(fb_helper);
22158+}
22159+
22160 static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
22161 	.fb_create = rockchip_fb_create,
22162-	.output_poll_changed = drm_fb_helper_output_poll_changed,
22163+	.output_poll_changed = rockchip_drm_output_poll_changed,
22164 	.atomic_check = drm_atomic_helper_check,
22165 	.atomic_commit = drm_atomic_helper_commit,
22166 };
22167@@ -125,12 +260,13 @@ void rockchip_drm_mode_config_init(struct drm_device *dev)
22168 	dev->mode_config.min_height = 0;
22169 
22170 	/*
22171-	 * set max width and height as default value(4096x4096).
22172+	 * set max width and height as default value(16384x16384).
22173 	 * this value would be used to check framebuffer size limitation
22174 	 * at drm_mode_addfb().
22175 	 */
22176-	dev->mode_config.max_width = 4096;
22177-	dev->mode_config.max_height = 4096;
22178+	dev->mode_config.max_width = 16384;
22179+	dev->mode_config.max_height = 16384;
22180+	dev->mode_config.async_page_flip = true;
22181 
22182 	dev->mode_config.funcs = &rockchip_drm_mode_config_funcs;
22183 	dev->mode_config.helper_private = &rockchip_mode_config_helpers;
22184diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
22185index 1a6965210..3b8755e9f 100644
22186--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
22187+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
22188@@ -7,6 +7,10 @@
22189 #ifndef _ROCKCHIP_DRM_FB_H
22190 #define _ROCKCHIP_DRM_FB_H
22191 
22192+#include "rockchip_drm_gem.h"
22193+
22194+#define ROCKCHIP_DRM_MODE_LOGO_FB	(1<<31) /* used for kernel logo, follow the define: DRM_MODE_FB_MODIFIERS at drm_mode.h */
22195+
22196 struct drm_framebuffer *
22197 rockchip_drm_framebuffer_init(struct drm_device *dev,
22198 			      const struct drm_mode_fb_cmd2 *mode_cmd,
22199@@ -14,4 +18,16 @@ rockchip_drm_framebuffer_init(struct drm_device *dev,
22200 void rockchip_drm_framebuffer_fini(struct drm_framebuffer *fb);
22201 
22202 void rockchip_drm_mode_config_init(struct drm_device *dev);
22203+struct drm_framebuffer *
22204+rockchip_drm_logo_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd,
22205+			   struct rockchip_logo *logo);
22206+
22207+#define to_rockchip_logo_fb(x) container_of(x, struct rockchip_drm_logo_fb, fb)
22208+
22209+struct rockchip_drm_logo_fb {
22210+	struct drm_framebuffer fb;
22211+	struct rockchip_logo *logo;
22212+	struct rockchip_gem_object rk_obj;
22213+};
22214+
22215 #endif /* _ROCKCHIP_DRM_FB_H */
22216diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
22217index 2fdc455c4..065fa7fde 100644
22218--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
22219+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
22220@@ -15,14 +15,12 @@
22221 #include "rockchip_drm_fbdev.h"
22222 
22223 #define PREFERRED_BPP		32
22224-#define to_drm_private(x) \
22225-		container_of(x, struct rockchip_drm_private, fbdev_helper)
22226 
22227 static int rockchip_fbdev_mmap(struct fb_info *info,
22228 			       struct vm_area_struct *vma)
22229 {
22230 	struct drm_fb_helper *helper = info->par;
22231-	struct rockchip_drm_private *private = to_drm_private(helper);
22232+	struct rockchip_drm_private *private = helper->dev->dev_private;
22233 
22234 	return rockchip_gem_mmap_buf(private->fbdev_bo, vma);
22235 }
22236@@ -39,7 +37,7 @@ static const struct fb_ops rockchip_drm_fbdev_ops = {
22237 static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
22238 				     struct drm_fb_helper_surface_size *sizes)
22239 {
22240-	struct rockchip_drm_private *private = to_drm_private(helper);
22241+	struct rockchip_drm_private *private = helper->dev->dev_private;
22242 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
22243 	struct drm_device *dev = helper->dev;
22244 	struct rockchip_gem_object *rk_obj;
22245@@ -60,7 +58,7 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
22246 
22247 	size = mode_cmd.pitches[0] * mode_cmd.height;
22248 
22249-	rk_obj = rockchip_gem_create_object(dev, size, true);
22250+	rk_obj = rockchip_gem_create_object(dev, size, true, 0);
22251 	if (IS_ERR(rk_obj))
22252 		return -ENOMEM;
22253 
22254@@ -120,7 +118,10 @@ int rockchip_drm_fbdev_init(struct drm_device *dev)
22255 	if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
22256 		return -EINVAL;
22257 
22258-	helper = &private->fbdev_helper;
22259+	helper = devm_kzalloc(dev->dev, sizeof(*helper), GFP_KERNEL);
22260+	if (!helper)
22261+		return -ENOMEM;
22262+	private->fbdev_helper = helper;
22263 
22264 	drm_fb_helper_prepare(dev, helper, &rockchip_drm_fb_helper_funcs);
22265 
22266@@ -150,9 +151,10 @@ int rockchip_drm_fbdev_init(struct drm_device *dev)
22267 void rockchip_drm_fbdev_fini(struct drm_device *dev)
22268 {
22269 	struct rockchip_drm_private *private = dev->dev_private;
22270-	struct drm_fb_helper *helper;
22271+	struct drm_fb_helper *helper = private->fbdev_helper;
22272 
22273-	helper = &private->fbdev_helper;
22274+	if (!helper)
22275+		return;
22276 
22277 	drm_fb_helper_unregister_fbi(helper);
22278 
22279diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
22280index 62e5d0970..51dead7f3 100644
22281--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
22282+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
22283@@ -4,7 +4,7 @@
22284  * Author:Mark Yao <mark.yao@rock-chips.com>
22285  */
22286 
22287-#include <linux/dma-buf.h>
22288+#include <linux/dma-buf-cache.h>
22289 #include <linux/iommu.h>
22290 #include <linux/vmalloc.h>
22291 
22292@@ -13,9 +13,25 @@
22293 #include <drm/drm_prime.h>
22294 #include <drm/drm_vma_manager.h>
22295 
22296+#include <linux/genalloc.h>
22297+#include <linux/iommu.h>
22298+#include <linux/pagemap.h>
22299+#include <linux/vmalloc.h>
22300+#include <linux/rockchip/rockchip_sip.h>
22301+
22302 #include "rockchip_drm_drv.h"
22303 #include "rockchip_drm_gem.h"
22304 
22305+static u32 bank_bit_first = 12;
22306+static u32 bank_bit_mask = 0x7;
22307+
22308+struct page_info {
22309+	struct page *page;
22310+	struct list_head list;
22311+};
22312+
22313+#define PG_ROUND       8
22314+
22315 static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
22316 {
22317 	struct drm_device *drm = rk_obj->base.dev;
22318@@ -45,6 +61,8 @@ static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
22319 		goto err_remove_node;
22320 	}
22321 
22322+	iommu_flush_iotlb_all(private->domain);
22323+
22324 	rk_obj->size = ret;
22325 
22326 	return 0;
22327@@ -73,25 +91,137 @@ static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
22328 	return 0;
22329 }
22330 
22331+static void rockchip_gem_free_list(struct list_head lists[])
22332+{
22333+	struct page_info *info, *tmp_info;
22334+	int i;
22335+
22336+	for (i = 0; i < PG_ROUND; i++) {
22337+		list_for_each_entry_safe(info, tmp_info, &lists[i], list) {
22338+			list_del(&info->list);
22339+			kfree(info);
22340+		}
22341+	}
22342+}
22343+
22344+void rockchip_gem_get_ddr_info(void)
22345+{
22346+	struct dram_addrmap_info *ddr_map_info;
22347+
22348+	ddr_map_info = sip_smc_get_dram_map();
22349+	if (ddr_map_info) {
22350+		bank_bit_first = ddr_map_info->bank_bit_first;
22351+		bank_bit_mask = ddr_map_info->bank_bit_mask;
22352+	}
22353+}
22354+
22355 static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
22356 {
22357 	struct drm_device *drm = rk_obj->base.dev;
22358 	int ret, i;
22359 	struct scatterlist *s;
22360-
22361-	rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
22362-	if (IS_ERR(rk_obj->pages))
22363-		return PTR_ERR(rk_obj->pages);
22364+	unsigned int cur_page;
22365+	struct page **pages, **dst_pages;
22366+	int j;
22367+	int n_pages;
22368+	unsigned long chunk_pages;
22369+	unsigned long remain;
22370+	struct list_head lists[PG_ROUND];
22371+	dma_addr_t phys;
22372+	int end = 0;
22373+	unsigned int bit_index;
22374+	unsigned int block_index[PG_ROUND] = {0};
22375+	struct page_info *info;
22376+	unsigned int maximum;
22377+
22378+	for (i = 0; i < PG_ROUND; i++)
22379+		INIT_LIST_HEAD(&lists[i]);
22380+
22381+	pages = drm_gem_get_pages(&rk_obj->base);
22382+	if (IS_ERR(pages))
22383+		return PTR_ERR(pages);
22384+
22385+	rk_obj->pages = pages;
22386 
22387 	rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
22388 
22389+	n_pages = rk_obj->num_pages;
22390+
22391+	dst_pages = __vmalloc(sizeof(struct page *) * n_pages,
22392+			GFP_KERNEL | __GFP_HIGHMEM);
22393+	if (!dst_pages) {
22394+		ret = -ENOMEM;
22395+		goto err_put_pages;
22396+	}
22397+
22398+	DRM_DEBUG_KMS("bank_bit_first = 0x%x, bank_bit_mask = 0x%x\n",
22399+		      bank_bit_first, bank_bit_mask);
22400+
22401+	cur_page = 0;
22402+	remain = n_pages;
22403+	/* look for the end of the current chunk */
22404+	while (remain) {
22405+		for (j = cur_page + 1; j < n_pages; ++j) {
22406+			if (page_to_pfn(pages[j]) !=
22407+				page_to_pfn(pages[j - 1]) + 1)
22408+				break;
22409+		}
22410+
22411+		chunk_pages = j - cur_page;
22412+		if (chunk_pages >= PG_ROUND) {
22413+			for (i = 0; i < chunk_pages; i++)
22414+				dst_pages[end + i] = pages[cur_page + i];
22415+			end += chunk_pages;
22416+		} else {
22417+			for (i = 0; i < chunk_pages; i++) {
22418+				info = kmalloc(sizeof(*info), GFP_KERNEL);
22419+				if (!info) {
22420+					ret = -ENOMEM;
22421+					goto err_put_list;
22422+				}
22423+
22424+				INIT_LIST_HEAD(&info->list);
22425+				info->page = pages[cur_page + i];
22426+				phys = page_to_phys(info->page);
22427+				bit_index = ((phys >> bank_bit_first) & bank_bit_mask) % PG_ROUND;
22428+				list_add_tail(&info->list, &lists[bit_index]);
22429+				block_index[bit_index]++;
22430+			}
22431+		}
22432+
22433+		cur_page = j;
22434+		remain -= chunk_pages;
22435+	}
22436+
22437+	maximum = block_index[0];
22438+	for (i = 1; i < PG_ROUND; i++)
22439+		maximum = max(maximum, block_index[i]);
22440+
22441+	for (i = 0; i < maximum; i++) {
22442+		for (j = 0; j < PG_ROUND; j++) {
22443+			if (!list_empty(&lists[j])) {
22444+				struct page_info *info;
22445+
22446+				info = list_first_entry(&lists[j],
22447+							struct page_info, list);
22448+				dst_pages[end++] = info->page;
22449+				list_del(&info->list);
22450+				kfree(info);
22451+			}
22452+		}
22453+	}
22454+
22455+	DRM_DEBUG_KMS("%s, %d, end = %d, n_pages = %d\n", __func__, __LINE__,
22456+			end, n_pages);
22457 	rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->base.dev,
22458-					    rk_obj->pages, rk_obj->num_pages);
22459+					    dst_pages, rk_obj->num_pages);
22460 	if (IS_ERR(rk_obj->sgt)) {
22461 		ret = PTR_ERR(rk_obj->sgt);
22462-		goto err_put_pages;
22463+		goto err_put_list;
22464 	}
22465 
22466+	rk_obj->pages = dst_pages;
22467+
22468 	/*
22469 	 * Fake up the SG table so that dma_sync_sg_for_device() can be used
22470 	 * to flush the pages associated with it.
22471@@ -104,8 +234,13 @@ static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
22472 
22473 	dma_sync_sgtable_for_device(drm->dev, rk_obj->sgt, DMA_TO_DEVICE);
22474 
22475+	kvfree(pages);
22476+
22477 	return 0;
22478 
22479+err_put_list:
22480+	rockchip_gem_free_list(lists);
22481+	kvfree(dst_pages);
22482 err_put_pages:
22483 	drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
22484 	return ret;
22485@@ -118,59 +253,164 @@ static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
22486 	drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
22487 }
22488 
22489-static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj,
22490-				    bool alloc_kmap)
22491+static inline void *drm_calloc_large(size_t nmemb, size_t size);
22492+static inline void drm_free_large(void *ptr);
22493+static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj);
22494+static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
22495+				  bool alloc_kmap)
22496 {
22497-	int ret;
22498+	struct drm_gem_object *obj = &rk_obj->base;
22499+	struct drm_device *drm = obj->dev;
22500+	struct sg_table *sgt;
22501+	int ret, i;
22502+	struct scatterlist *s;
22503 
22504-	ret = rockchip_gem_get_pages(rk_obj);
22505-	if (ret < 0)
22506-		return ret;
22507+	rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;
22508 
22509-	ret = rockchip_gem_iommu_map(rk_obj);
22510-	if (ret < 0)
22511-		goto err_free;
22512-
22513-	if (alloc_kmap) {
22514-		rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
22515-				      pgprot_writecombine(PAGE_KERNEL));
22516-		if (!rk_obj->kvaddr) {
22517-			DRM_ERROR("failed to vmap() buffer\n");
22518-			ret = -ENOMEM;
22519-			goto err_unmap;
22520-		}
22521+	if (!alloc_kmap)
22522+		rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
22523+
22524+	rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
22525+					 &rk_obj->dma_handle, GFP_KERNEL,
22526+					 rk_obj->dma_attrs);
22527+	if (!rk_obj->kvaddr) {
22528+		DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
22529+		return -ENOMEM;
22530+	}
22531+
22532+	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
22533+	if (!sgt) {
22534+		ret = -ENOMEM;
22535+		goto err_dma_free;
22536 	}
22537 
22538+	ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
22539+				    rk_obj->dma_handle, obj->size,
22540+				    rk_obj->dma_attrs);
22541+	if (ret) {
22542+		DRM_ERROR("failed to allocate sgt, %d\n", ret);
22543+		goto err_sgt_free;
22544+	}
22545+
22546+	for_each_sg(sgt->sgl, s, sgt->nents, i)
22547+		sg_dma_address(s) = sg_phys(s);
22548+
22549+	rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
22550+
22551+	rk_obj->pages = drm_calloc_large(rk_obj->num_pages,
22552+					 sizeof(*rk_obj->pages));
22553+	if (!rk_obj->pages) {
22554+		DRM_ERROR("failed to allocate pages.\n");
22555+		goto err_sg_table_free;
22556+	}
22557+
22558+	if (drm_prime_sg_to_page_addr_arrays(sgt, rk_obj->pages, NULL,
22559+					     rk_obj->num_pages)) {
22560+		DRM_ERROR("invalid sgtable.\n");
22561+		ret = -EINVAL;
22562+		goto err_page_free;
22563+	}
22564+
22565+	rk_obj->sgt = sgt;
22566+
22567 	return 0;
22568 
22569-err_unmap:
22570-	rockchip_gem_iommu_unmap(rk_obj);
22571-err_free:
22572-	rockchip_gem_put_pages(rk_obj);
22573+err_page_free:
22574+	drm_free_large(rk_obj->pages);
22575+err_sg_table_free:
22576+	sg_free_table(sgt);
22577+err_sgt_free:
22578+	kfree(sgt);
22579+err_dma_free:
22580+	dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr,
22581+		       rk_obj->dma_handle, rk_obj->dma_attrs);
22582 
22583 	return ret;
22584 }
22585 
22586-static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
22587-				  bool alloc_kmap)
22588+static inline void *drm_calloc_large(size_t nmemb, size_t size)
22589+{
22590+	if (size != 0 && nmemb > SIZE_MAX / size)
22591+		return NULL;
22592+
22593+	if (size * nmemb <= PAGE_SIZE)
22594+		return kcalloc(nmemb, size, GFP_KERNEL);
22595+
22596+	return __vmalloc(size * nmemb,
22597+			 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
22598+}
22599+
22600+static inline void drm_free_large(void *ptr)
22601+{
22602+	kvfree(ptr);
22603+}
22604+
22605+static int rockchip_gem_alloc_secure(struct rockchip_gem_object *rk_obj)
22606 {
22607 	struct drm_gem_object *obj = &rk_obj->base;
22608 	struct drm_device *drm = obj->dev;
22609+	struct rockchip_drm_private *private = drm->dev_private;
22610+	unsigned long paddr;
22611+	struct sg_table *sgt;
22612+	int ret = 0, i;
22613 
22614-	rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;
22615-
22616-	if (!alloc_kmap)
22617-		rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
22618+	if (!private->secure_buffer_pool) {
22619+		DRM_ERROR("No secure buffer pool found\n");
22620+		return -ENOMEM;
22621+	}
22622 
22623-	rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
22624-					 &rk_obj->dma_addr, GFP_KERNEL,
22625-					 rk_obj->dma_attrs);
22626-	if (!rk_obj->kvaddr) {
22627-		DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
22628+	paddr = gen_pool_alloc(private->secure_buffer_pool, rk_obj->base.size);
22629+	if (!paddr) {
22630+		DRM_ERROR("failed to allocate secure buffer\n");
22631 		return -ENOMEM;
22632 	}
22633 
22634+	rk_obj->dma_handle = paddr;
22635+	rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
22636+
22637+	rk_obj->pages = drm_calloc_large(rk_obj->num_pages,
22638+					 sizeof(*rk_obj->pages));
22639+	if (!rk_obj->pages) {
22640+		DRM_ERROR("failed to allocate pages.\n");
22641+		ret = -ENOMEM;
22642+		goto err_buf_free;
22643+	}
22644+
22645+	i = 0;
22646+	while (i < rk_obj->num_pages) {
22647+		rk_obj->pages[i] = phys_to_page(paddr);
22648+		paddr += PAGE_SIZE;
22649+		i++;
22650+	}
22651+	sgt = drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages);
22652+	if (IS_ERR(sgt)) {
22653+		ret = PTR_ERR(sgt);
22654+		goto err_free_pages;
22655+	}
22656+
22657+	rk_obj->sgt = sgt;
22658+
22659 	return 0;
22660+
22661+err_free_pages:
22662+	drm_free_large(rk_obj->pages);
22663+err_buf_free:
22664+	gen_pool_free(private->secure_buffer_pool, paddr, rk_obj->base.size);
22665+
22666+	return ret;
22667+}
22668+
22669+static void rockchip_gem_free_secure(struct rockchip_gem_object *rk_obj)
22670+{
22671+	struct drm_gem_object *obj = &rk_obj->base;
22672+	struct drm_device *drm = obj->dev;
22673+	struct rockchip_drm_private *private = drm->dev_private;
22674+
22675+	drm_free_large(rk_obj->pages);
22676+	sg_free_table(rk_obj->sgt);
22677+	kfree(rk_obj->sgt);
22678+	gen_pool_free(private->secure_buffer_pool, rk_obj->dma_handle,
22679+		      rk_obj->base.size);
22680 }
22681 
22682 static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
22683@@ -179,18 +419,66 @@ static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
22684 	struct drm_gem_object *obj = &rk_obj->base;
22685 	struct drm_device *drm = obj->dev;
22686 	struct rockchip_drm_private *private = drm->dev_private;
22687+	int ret = 0;
22688+
22689+	if (!private->domain)
22690+		rk_obj->flags |= ROCKCHIP_BO_CONTIG;
22691+
22692+	if (rk_obj->flags & ROCKCHIP_BO_SECURE) {
22693+		rk_obj->buf_type = ROCKCHIP_GEM_BUF_TYPE_SECURE;
22694+		rk_obj->flags |= ROCKCHIP_BO_CONTIG;
22695+		if (alloc_kmap) {
22696+			DRM_ERROR("Not allow alloc secure buffer with kmap\n");
22697+			return -EINVAL;
22698+		}
22699+		ret = rockchip_gem_alloc_secure(rk_obj);
22700+		if (ret)
22701+			return ret;
22702+	} else if (rk_obj->flags & ROCKCHIP_BO_CONTIG) {
22703+		rk_obj->buf_type = ROCKCHIP_GEM_BUF_TYPE_CMA;
22704+		ret = rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
22705+		if (ret)
22706+			return ret;
22707+	} else {
22708+		rk_obj->buf_type = ROCKCHIP_GEM_BUF_TYPE_SHMEM;
22709+		ret = rockchip_gem_get_pages(rk_obj);
22710+		if (ret < 0)
22711+			return ret;
22712+
22713+		if (alloc_kmap) {
22714+			rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages,
22715+					      VM_MAP,
22716+					      pgprot_writecombine(PAGE_KERNEL));
22717+			if (!rk_obj->kvaddr) {
22718+				DRM_ERROR("failed to vmap() buffer\n");
22719+				ret = -ENOMEM;
22720+				goto err_iommu_free;
22721+			}
22722+		}
22723+	}
22724 
22725+	if (private->domain) {
22726+		ret = rockchip_gem_iommu_map(rk_obj);
22727+		if (ret < 0)
22728+			goto err_free;
22729+	} else {
22730+		WARN_ON(!rk_obj->dma_handle);
22731+		rk_obj->dma_addr = rk_obj->dma_handle;
22732+	}
22733+
22734+	return 0;
22735+
22736+err_iommu_free:
22737 	if (private->domain)
22738-		return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap);
22739+		rockchip_gem_iommu_unmap(rk_obj);
22740+err_free:
22741+	if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_SECURE)
22742+		rockchip_gem_free_secure(rk_obj);
22743+	else if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_CMA)
22744+		rockchip_gem_free_dma(rk_obj);
22745 	else
22746-		return rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
22747-}
22748-
22749-static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj)
22750-{
22751-	vunmap(rk_obj->kvaddr);
22752-	rockchip_gem_iommu_unmap(rk_obj);
22753-	rockchip_gem_put_pages(rk_obj);
22754+		rockchip_gem_put_pages(rk_obj);
22755+	return ret;
22756 }
22757 
22758 static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
22759@@ -198,16 +486,29 @@ static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
22760 	struct drm_gem_object *obj = &rk_obj->base;
22761 	struct drm_device *drm = obj->dev;
22762 
22763-	dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
22764-		       rk_obj->dma_attrs);
22765+	drm_free_large(rk_obj->pages);
22766+	sg_free_table(rk_obj->sgt);
22767+	kfree(rk_obj->sgt);
22768+	dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr,
22769+		       rk_obj->dma_handle, rk_obj->dma_attrs);
22770 }
22771 
22772 static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
22773 {
22774-	if (rk_obj->pages)
22775-		rockchip_gem_free_iommu(rk_obj);
22776-	else
22777+	struct drm_device *drm = rk_obj->base.dev;
22778+	struct rockchip_drm_private *private = drm->dev_private;
22779+
22780+	if (private->domain)
22781+		rockchip_gem_iommu_unmap(rk_obj);
22782+
22783+	if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_SHMEM) {
22784+		vunmap(rk_obj->kvaddr);
22785+		rockchip_gem_put_pages(rk_obj);
22786+	} else if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_SECURE) {
22787+		rockchip_gem_free_secure(rk_obj);
22788+	} else {
22789 		rockchip_gem_free_dma(rk_obj);
22790+	}
22791 }
22792 
22793 static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
22794@@ -239,16 +540,24 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
22795 	int ret;
22796 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
22797 
22798+	/* default is wc. */
22799+	if (rk_obj->flags & ROCKCHIP_BO_CACHABLE)
22800+		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
22801+
22802 	/*
22803 	 * We allocated a struct page table for rk_obj, so clear
22804 	 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
22805 	 */
22806 	vma->vm_flags &= ~VM_PFNMAP;
22807 
22808-	if (rk_obj->pages)
22809+	if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_SECURE) {
22810+		DRM_ERROR("Disallow mmap for secure buffer\n");
22811+		ret = -EINVAL;
22812+	} else if (rk_obj->pages) {
22813 		ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
22814-	else
22815+	} else {
22816 		ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
22817+	}
22818 
22819 	if (ret)
22820 		drm_gem_vm_close(vma);
22821@@ -298,9 +607,13 @@ static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
22822 static struct rockchip_gem_object *
22823 	rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
22824 {
22825+	struct address_space *mapping;
22826 	struct rockchip_gem_object *rk_obj;
22827 	struct drm_gem_object *obj;
22828 
22829+	/* Limit the object to 32bit mappings */
22830+	gfp_t gfp_mask = GFP_HIGHUSER | __GFP_RECLAIMABLE | __GFP_DMA32;
22831+
22832 	size = round_up(size, PAGE_SIZE);
22833 
22834 	rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
22835@@ -311,12 +624,15 @@ static struct rockchip_gem_object *
22836 
22837 	drm_gem_object_init(drm, obj, size);
22838 
22839+	mapping = file_inode(obj->filp)->i_mapping;
22840+	mapping_set_gfp_mask(mapping, gfp_mask);
22841+
22842 	return rk_obj;
22843 }
22844 
22845 struct rockchip_gem_object *
22846 rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
22847-			   bool alloc_kmap)
22848+			   bool alloc_kmap, unsigned int flags)
22849 {
22850 	struct rockchip_gem_object *rk_obj;
22851 	int ret;
22852@@ -324,6 +640,7 @@ rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
22853 	rk_obj = rockchip_gem_alloc_object(drm, size);
22854 	if (IS_ERR(rk_obj))
22855 		return rk_obj;
22856+	rk_obj->flags = flags;
22857 
22858 	ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
22859 	if (ret)
22860@@ -336,6 +653,28 @@ rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
22861 	return ERR_PTR(ret);
22862 }
22863 
22864+/*
22865+ * rockchip_gem_destroy - destroy gem object
22866+ *
22867+ * The dma_buf_unmap_attachment and dma_buf_detach will be re-defined if
22868+ * CONFIG_DMABUF_CACHE is enabled.
22869+ *
22870+ * Same as drm_prime_gem_destroy
22871+ */
22872+static void rockchip_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
22873+{
22874+	struct dma_buf_attachment *attach;
22875+	struct dma_buf *dma_buf;
22876+
22877+	attach = obj->import_attach;
22878+	if (sg)
22879+		dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
22880+	dma_buf = attach->dmabuf;
22881+	dma_buf_detach(attach->dmabuf, attach);
22882+	/* remove the reference */
22883+	dma_buf_put(dma_buf);
22884+}
22885+
22886 /*
22887  * rockchip_gem_free_object - (struct drm_driver)->gem_free_object_unlocked
22888  * callback function
22889@@ -353,7 +692,11 @@ void rockchip_gem_free_object(struct drm_gem_object *obj)
22890 			dma_unmap_sgtable(drm->dev, rk_obj->sgt,
22891 					  DMA_BIDIRECTIONAL, 0);
22892 		}
22893-		drm_prime_gem_destroy(obj, rk_obj->sgt);
22894+		drm_free_large(rk_obj->pages);
22895+		if (IS_ENABLED(CONFIG_DMABUF_CACHE))
22896+			rockchip_gem_destroy(obj, rk_obj->sgt);
22897+		else
22898+			drm_prime_gem_destroy(obj, rk_obj->sgt);
22899 	} else {
22900 		rockchip_gem_free_buf(rk_obj);
22901 	}
22902@@ -371,13 +714,14 @@ void rockchip_gem_free_object(struct drm_gem_object *obj)
22903 static struct rockchip_gem_object *
22904 rockchip_gem_create_with_handle(struct drm_file *file_priv,
22905 				struct drm_device *drm, unsigned int size,
22906-				unsigned int *handle)
22907+				unsigned int *handle, unsigned int flags)
22908 {
22909 	struct rockchip_gem_object *rk_obj;
22910 	struct drm_gem_object *obj;
22911 	int ret;
22912+	bool alloc_kmap = flags & ROCKCHIP_BO_ALLOC_KMAP ? true : false;
22913 
22914-	rk_obj = rockchip_gem_create_object(drm, size, false);
22915+	rk_obj = rockchip_gem_create_object(drm, size, alloc_kmap, flags);
22916 	if (IS_ERR(rk_obj))
22917 		return ERR_CAST(rk_obj);
22918 
22919@@ -414,7 +758,7 @@ int rockchip_gem_dumb_create(struct drm_file *file_priv,
22920 			     struct drm_mode_create_dumb *args)
22921 {
22922 	struct rockchip_gem_object *rk_obj;
22923-	int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
22924+	u32 min_pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
22925 
22926 	/*
22927 	 * align to 64 bytes since Mali requires it.
22928@@ -423,7 +767,7 @@ int rockchip_gem_dumb_create(struct drm_file *file_priv,
22929 	args->size = args->pitch * args->height;
22930 
22931 	rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
22932-						 &args->handle);
22933+						 &args->handle, args->flags);
22934 
22935 	return PTR_ERR_OR_ZERO(rk_obj);
22936 }
22937@@ -514,6 +858,21 @@ rockchip_gem_prime_import_sg_table(struct drm_device *drm,
22938 		goto err_free_rk_obj;
22939 	}
22940 
22941+	rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
22942+	rk_obj->pages = drm_calloc_large(rk_obj->num_pages, sizeof(*rk_obj->pages));
22943+	if (!rk_obj->pages) {
22944+		DRM_ERROR("failed to allocate pages.\n");
22945+		ret = -ENOMEM;
22946+		goto err_free_rk_obj;
22947+	}
22948+
22949+	ret = drm_prime_sg_to_page_addr_arrays(sg, rk_obj->pages, NULL, rk_obj->num_pages);
22950+	if (ret < 0) {
22951+		DRM_ERROR("invalid sgtable.\n");
22952+		drm_free_large(rk_obj->pages);
22953+		goto err_free_rk_obj;
22954+	}
22955+
22956 	return &rk_obj->base;
22957 
22958 err_free_rk_obj:
22959@@ -546,3 +905,155 @@ void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
22960 
22961 	/* Nothing to do if allocated by DMA mapping API. */
22962 }
22963+
22964+int rockchip_gem_create_ioctl(struct drm_device *dev, void *data,
22965+			      struct drm_file *file_priv)
22966+{
22967+	struct drm_rockchip_gem_create *args = data;
22968+	struct rockchip_gem_object *rk_obj;
22969+
22970+	rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
22971+						 &args->handle, args->flags);
22972+	return PTR_ERR_OR_ZERO(rk_obj);
22973+}
22974+
22975+int rockchip_gem_map_offset_ioctl(struct drm_device *drm, void *data,
22976+				  struct drm_file *file_priv)
22977+{
22978+	struct drm_rockchip_gem_map_off *args = data;
22979+
22980+	return drm_gem_dumb_map_offset(file_priv, drm, args->handle,
22981+				       &args->offset);
22982+}
22983+
22984+int rockchip_gem_get_phys_ioctl(struct drm_device *dev, void *data,
22985+				struct drm_file *file_priv)
22986+{
22987+	struct drm_rockchip_gem_phys *args = data;
22988+	struct rockchip_gem_object *rk_obj;
22989+	struct drm_gem_object *obj;
22990+	int ret = 0;
22991+
22992+	obj = drm_gem_object_lookup(file_priv, args->handle);
22993+	if (!obj) {
22994+		DRM_ERROR("failed to lookup gem object.\n");
22995+		return -EINVAL;
22996+	}
22997+	rk_obj = to_rockchip_obj(obj);
22998+
22999+	if (!(rk_obj->flags & ROCKCHIP_BO_CONTIG)) {
23000+		DRM_ERROR("Can't get phys address from non-continue buf.\n");
23001+		ret = -EINVAL;
23002+		goto out;
23003+	}
23004+
23005+	args->phy_addr = page_to_phys(rk_obj->pages[0]);
23006+
23007+out:
23008+	drm_gem_object_put(obj);
23009+
23010+	return ret;
23011+}
23012+
23013+int rockchip_gem_prime_begin_cpu_access(struct drm_gem_object *obj,
23014+					enum dma_data_direction dir)
23015+{
23016+	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
23017+	struct drm_device *drm = obj->dev;
23018+
23019+	if (!rk_obj->sgt)
23020+		return 0;
23021+
23022+	dma_sync_sg_for_cpu(drm->dev, rk_obj->sgt->sgl,
23023+			    rk_obj->sgt->nents, dir);
23024+	return 0;
23025+}
23026+
23027+int rockchip_gem_prime_end_cpu_access(struct drm_gem_object *obj,
23028+				      enum dma_data_direction dir)
23029+{
23030+	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
23031+	struct drm_device *drm = obj->dev;
23032+
23033+	if (!rk_obj->sgt)
23034+		return 0;
23035+
23036+	dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl,
23037+			       rk_obj->sgt->nents, dir);
23038+	return 0;
23039+}
23040+
23041+static int rockchip_gem_prime_sgl_sync_range(struct device *dev,
23042+					struct scatterlist *sgl, unsigned int nents,
23043+					unsigned int offset, unsigned int length,
23044+					enum dma_data_direction dir, bool for_cpu)
23045+{
23046+	int i;
23047+	struct scatterlist *sg;
23048+	unsigned int len = 0;
23049+	dma_addr_t sg_dma_addr;
23050+
23051+	for_each_sg(sgl, sg, nents, i) {
23052+		unsigned int sg_offset, sg_left, size = 0;
23053+
23054+		len += sg->length;
23055+		if (len <= offset)
23056+			continue;
23057+
23058+		sg_dma_addr = sg_dma_address(sg);
23059+		sg_left = len - offset;
23060+		sg_offset = sg->length - sg_left;
23061+
23062+		size = (length < sg_left) ? length : sg_left;
23063+		if (for_cpu)
23064+			dma_sync_single_range_for_cpu(dev, sg_dma_addr,
23065+						      sg_offset, size, dir);
23066+		else
23067+			dma_sync_single_range_for_device(dev, sg_dma_addr,
23068+							 sg_offset, size, dir);
23069+
23070+		offset += size;
23071+		length -= size;
23072+
23073+		if (length == 0)
23074+			break;
23075+	}
23076+
23077+	return 0;
23078+}
23079+
23080+int rockchip_gem_prime_begin_cpu_access_partial(struct drm_gem_object *obj,
23081+						enum dma_data_direction dir,
23082+						unsigned int offset,
23083+						unsigned int len)
23084+{
23085+	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
23086+	struct drm_device *drm = obj->dev;
23087+
23088+	if (!rk_obj->sgt)
23089+		return 0;
23090+
23091+	rockchip_gem_prime_sgl_sync_range(drm->dev, rk_obj->sgt->sgl,
23092+					  rk_obj->sgt->nents,
23093+					  offset, len, dir, true);
23094+
23095+	return 0;
23096+}
23097+
23098+int rockchip_gem_prime_end_cpu_access_partial(struct drm_gem_object *obj,
23099+					      enum dma_data_direction dir,
23100+					      unsigned int offset,
23101+					      unsigned int len)
23102+{
23103+	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
23104+	struct drm_device *drm = obj->dev;
23105+
23106+	if (!rk_obj->sgt)
23107+		return 0;
23108+
23109+	rockchip_gem_prime_sgl_sync_range(drm->dev, rk_obj->sgt->sgl,
23110+					  rk_obj->sgt->nents,
23111+					  offset, len, dir, false);
23112+
23113+	return 0;
23114+}
23115diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
23116index 7ffc541be..6a1d8e55b 100644
23117--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
23118+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
23119@@ -7,14 +7,24 @@
23120 #ifndef _ROCKCHIP_DRM_GEM_H
23121 #define _ROCKCHIP_DRM_GEM_H
23122 
23123+#include <linux/dma-direction.h>
23124+
23125 #define to_rockchip_obj(x) container_of(x, struct rockchip_gem_object, base)
23126 
23127+enum rockchip_gem_buf_type {
23128+	ROCKCHIP_GEM_BUF_TYPE_CMA,
23129+	ROCKCHIP_GEM_BUF_TYPE_SHMEM,
23130+	ROCKCHIP_GEM_BUF_TYPE_SECURE,
23131+};
23132+
23133 struct rockchip_gem_object {
23134 	struct drm_gem_object base;
23135 	unsigned int flags;
23136+	enum rockchip_gem_buf_type buf_type;
23137 
23138 	void *kvaddr;
23139-	dma_addr_t dma_addr;
23140+	dma_addr_t dma_addr;	/* iova if iommu enable, otherwise physical address */
23141+	dma_addr_t dma_handle;	/* physical address */
23142 	/* Used when IOMMU is disabled */
23143 	unsigned long dma_attrs;
23144 
23145@@ -42,12 +52,43 @@ int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
23146 			  struct vm_area_struct *vma);
23147 
23148 struct rockchip_gem_object *
23149-	rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
23150-				   bool alloc_kmap);
23151+rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
23152+			   bool alloc_kmap, unsigned int flags);
23153 
23154 void rockchip_gem_free_object(struct drm_gem_object *obj);
23155 
23156 int rockchip_gem_dumb_create(struct drm_file *file_priv,
23157 			     struct drm_device *dev,
23158 			     struct drm_mode_create_dumb *args);
23159+/*
23160+ * request gem object creation and buffer allocation as the size
23161+ * that it is calculated with framebuffer information such as width,
23162+ * height and bpp.
23163+ */
23164+int rockchip_gem_create_ioctl(struct drm_device *dev, void *data,
23165+			      struct drm_file *file_priv);
23166+
23167+/* get buffer offset to map to user space. */
23168+int rockchip_gem_map_offset_ioctl(struct drm_device *dev, void *data,
23169+				  struct drm_file *file_priv);
23170+
23171+int rockchip_gem_get_phys_ioctl(struct drm_device *dev, void *data,
23172+				struct drm_file *file_priv);
23173+
23174+int rockchip_gem_prime_begin_cpu_access(struct drm_gem_object *obj,
23175+					enum dma_data_direction dir);
23176+
23177+int rockchip_gem_prime_end_cpu_access(struct drm_gem_object *obj,
23178+				      enum dma_data_direction dir);
23179+
23180+int rockchip_gem_prime_begin_cpu_access_partial(struct drm_gem_object *obj,
23181+						enum dma_data_direction dir,
23182+						unsigned int offset,
23183+						unsigned int len);
23184+
23185+int rockchip_gem_prime_end_cpu_access_partial(struct drm_gem_object *obj,
23186+					      enum dma_data_direction dir,
23187+					      unsigned int offset,
23188+					      unsigned int len);
23189+void rockchip_gem_get_ddr_info(void);
23190 #endif /* _ROCKCHIP_DRM_GEM_H */
23191diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
23192index af98bfcde..ccf39dcf6 100644
23193--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
23194+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
23195@@ -6,21 +6,28 @@
23196 
23197 #include <linux/clk.h>
23198 #include <linux/component.h>
23199+#include <linux/debugfs.h>
23200 #include <linux/delay.h>
23201+#include <linux/fixp-arith.h>
23202 #include <linux/iopoll.h>
23203 #include <linux/kernel.h>
23204+#include <linux/mfd/syscon.h>
23205 #include <linux/module.h>
23206 #include <linux/of.h>
23207 #include <linux/of_device.h>
23208 #include <linux/overflow.h>
23209 #include <linux/platform_device.h>
23210 #include <linux/pm_runtime.h>
23211+#include <linux/regmap.h>
23212 #include <linux/reset.h>
23213+#include <linux/sort.h>
23214 
23215 #include <drm/drm.h>
23216 #include <drm/drm_atomic.h>
23217 #include <drm/drm_atomic_uapi.h>
23218 #include <drm/drm_crtc.h>
23219+#include <drm/drm_crtc_helper.h>
23220+#include <drm/drm_debugfs.h>
23221 #include <drm/drm_flip_work.h>
23222 #include <drm/drm_fourcc.h>
23223 #include <drm/drm_gem_framebuffer_helper.h>
23224@@ -32,6 +39,12 @@
23225 #ifdef CONFIG_DRM_ANALOGIX_DP
23226 #include <drm/bridge/analogix_dp.h>
23227 #endif
23228+#include <dt-bindings/soc/rockchip-system-status.h>
23229+
23230+#include <soc/rockchip/rockchip_dmc.h>
23231+#include <soc/rockchip/rockchip-system-status.h>
23232+#include <uapi/linux/videodev2.h>
23233+#include "../drm_crtc_internal.h"
23234 
23235 #include "rockchip_drm_drv.h"
23236 #include "rockchip_drm_gem.h"
23237@@ -39,28 +52,65 @@
23238 #include "rockchip_drm_vop.h"
23239 #include "rockchip_rgb.h"
23240 
23241-#define VOP_WIN_SET(vop, win, name, v) \
23242-		vop_reg_set(vop, &win->phy->name, win->base, ~0, v, #name)
23243-#define VOP_SCL_SET(vop, win, name, v) \
23244-		vop_reg_set(vop, &win->phy->scl->name, win->base, ~0, v, #name)
23245-#define VOP_SCL_SET_EXT(vop, win, name, v) \
23246-		vop_reg_set(vop, &win->phy->scl->ext->name, \
23247-			    win->base, ~0, v, #name)
23248+#define VOP_REG_SUPPORT(vop, reg) \
23249+		(reg.mask && \
23250+		 (!reg.major || \
23251+		  (reg.major == VOP_MAJOR(vop->version) && \
23252+		   reg.begin_minor <= VOP_MINOR(vop->version) && \
23253+		   reg.end_minor >= VOP_MINOR(vop->version))))
23254 
23255-#define VOP_WIN_YUV2YUV_SET(vop, win_yuv2yuv, name, v) \
23256-	do { \
23257-		if (win_yuv2yuv && win_yuv2yuv->name.mask) \
23258-			vop_reg_set(vop, &win_yuv2yuv->name, 0, ~0, v, #name); \
23259-	} while (0)
23260+#define VOP_WIN_SUPPORT(vop, win, name) \
23261+		VOP_REG_SUPPORT(vop, win->phy->name)
23262+
23263+#define VOP_WIN_SCL_EXT_SUPPORT(vop, win, name) \
23264+		(win->phy->scl->ext && \
23265+		VOP_REG_SUPPORT(vop, win->phy->scl->ext->name))
23266 
23267-#define VOP_WIN_YUV2YUV_COEFFICIENT_SET(vop, win_yuv2yuv, name, v) \
23268+#define VOP_CTRL_SUPPORT(vop, name) \
23269+		VOP_REG_SUPPORT(vop, vop->data->ctrl->name)
23270+
23271+#define VOP_INTR_SUPPORT(vop, name) \
23272+		VOP_REG_SUPPORT(vop, vop->data->intr->name)
23273+
23274+#define __REG_SET(x, off, mask, shift, v, write_mask, relaxed) \
23275+		vop_mask_write(x, off, mask, shift, v, write_mask, relaxed)
23276+
23277+#define _REG_SET(vop, name, off, reg, mask, v, relaxed) \
23278 	do { \
23279-		if (win_yuv2yuv && win_yuv2yuv->phy->name.mask) \
23280-			vop_reg_set(vop, &win_yuv2yuv->phy->name, win_yuv2yuv->base, ~0, v, #name); \
23281+		if (VOP_REG_SUPPORT(vop, reg)) \
23282+			__REG_SET(vop, off + reg.offset, mask, reg.shift, \
23283+				  v, reg.write_mask, relaxed); \
23284+		else \
23285+			dev_dbg(vop->dev, "Warning: not support "#name"\n"); \
23286 	} while (0)
23287 
23288+#define REG_SET(x, name, off, reg, v, relaxed) \
23289+		_REG_SET(x, name, off, reg, reg.mask, v, relaxed)
23290+#define REG_SET_MASK(x, name, off, reg, mask, v, relaxed) \
23291+		_REG_SET(x, name, off, reg, reg.mask & mask, v, relaxed)
23292+
23293+#define VOP_WIN_SET(x, win, name, v) \
23294+		REG_SET(x, name, win->offset, VOP_WIN_NAME(win, name), v, true)
23295+#define VOP_WIN_SET_EXT(x, win, ext, name, v) \
23296+		REG_SET(x, name, 0, win->ext->name, v, true)
23297+#define VOP_SCL_SET(x, win, name, v) \
23298+		REG_SET(x, name, win->offset, win->phy->scl->name, v, true)
23299+#define VOP_SCL_SET_EXT(x, win, name, v) \
23300+		REG_SET(x, name, win->offset, win->phy->scl->ext->name, v, true)
23301+
23302+#define VOP_CTRL_SET(x, name, v) \
23303+		REG_SET(x, name, 0, (x)->data->ctrl->name, v, false)
23304+
23305+#define VOP_INTR_GET(vop, name) \
23306+		vop_read_reg(vop, 0, &vop->data->ctrl->name)
23307+
23308+#define VOP_INTR_SET(vop, name, v) \
23309+		REG_SET(vop, name, 0, vop->data->intr->name, \
23310+			v, false)
23311 #define VOP_INTR_SET_MASK(vop, name, mask, v) \
23312-		vop_reg_set(vop, &vop->data->intr->name, 0, mask, v, #name)
23313+		REG_SET_MASK(vop, name, 0, vop->data->intr->name, \
23314+			     mask, v, false)
23315+
23316 
23317 #define VOP_REG_SET(vop, group, name, v) \
23318 		    vop_reg_set(vop, &vop->data->group->name, 0, ~0, v, #name)
23319@@ -79,66 +129,129 @@
23320 #define VOP_INTR_GET_TYPE(vop, name, type) \
23321 		vop_get_intr_type(vop, &vop->data->intr->name, type)
23322 
23323-#define VOP_WIN_GET(vop, win, name) \
23324-		vop_read_reg(vop, win->base, &win->phy->name)
23325+#define VOP_CTRL_GET(x, name) \
23326+		vop_read_reg(x, 0, &vop->data->ctrl->name)
23327 
23328-#define VOP_WIN_HAS_REG(win, name) \
23329-	(!!(win->phy->name.mask))
23330+#define VOP_WIN_GET(vop, win, name) \
23331+		vop_read_reg(vop, win->offset, &VOP_WIN_NAME(win, name))
23332 
23333-#define VOP_WIN_GET_YRGBADDR(vop, win) \
23334-		vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
23335+#define VOP_WIN_NAME(win, name) \
23336+		(vop_get_win_phy(win, &win->phy->name)->name)
23337 
23338 #define VOP_WIN_TO_INDEX(vop_win) \
23339 	((vop_win) - (vop_win)->vop->win)
23340 
23341-#define VOP_AFBC_SET(vop, name, v) \
23342+#define VOP_GRF_SET(vop, reg, v) \
23343 	do { \
23344-		if ((vop)->data->afbc) \
23345-			vop_reg_set((vop), &(vop)->data->afbc->name, \
23346-				    0, ~0, v, #name); \
23347+		if (vop->data->grf_ctrl) { \
23348+			vop_grf_writel(vop, vop->data->grf_ctrl->reg, v); \
23349+		} \
23350 	} while (0)
23351 
23352-#define to_vop(x) container_of(x, struct vop, crtc)
23353 #define to_vop_win(x) container_of(x, struct vop_win, base)
23354+#define to_vop_plane_state(x) container_of(x, struct vop_plane_state, base)
23355 
23356-#define AFBC_FMT_RGB565		0x0
23357-#define AFBC_FMT_U8U8U8U8	0x5
23358-#define AFBC_FMT_U8U8U8		0x4
23359+enum vop_pending {
23360+	VOP_PENDING_FB_UNREF,
23361+};
23362 
23363-#define AFBC_TILE_16x16		BIT(4)
23364+struct vop_zpos {
23365+	int win_id;
23366+	int zpos;
23367+};
23368 
23369-/*
23370- * The coefficients of the following matrix are all fixed points.
23371- * The format is S2.10 for the 3x3 part of the matrix, and S9.12 for the offsets.
23372- * They are all represented in two's complement.
23373- */
23374-static const uint32_t bt601_yuv2rgb[] = {
23375-	0x4A8, 0x0,    0x662,
23376-	0x4A8, 0x1E6F, 0x1CBF,
23377-	0x4A8, 0x812,  0x0,
23378-	0x321168, 0x0877CF, 0x2EB127
23379+struct vop_plane_state {
23380+	struct drm_plane_state base;
23381+	int format;
23382+	int zpos;
23383+	struct drm_rect src;
23384+	struct drm_rect dest;
23385+	dma_addr_t yrgb_mst;
23386+	dma_addr_t uv_mst;
23387+	const uint32_t *y2r_table;
23388+	const uint32_t *r2r_table;
23389+	const uint32_t *r2y_table;
23390+	int eotf;
23391+	bool y2r_en;
23392+	bool r2r_en;
23393+	bool r2y_en;
23394+	int color_space;
23395+	u32 color_key;
23396+	unsigned int csc_mode;
23397+	int global_alpha;
23398+	int blend_mode;
23399+	unsigned long offset;
23400+	int pdaf_data_type;
23401+	bool async_commit;
23402+	struct vop_dump_list *planlist;
23403 };
23404 
23405-enum vop_pending {
23406-	VOP_PENDING_FB_UNREF,
23407+struct rockchip_mcu_timing {
23408+	int mcu_pix_total;
23409+	int mcu_cs_pst;
23410+	int mcu_cs_pend;
23411+	int mcu_rw_pst;
23412+	int mcu_rw_pend;
23413+	int mcu_hold_mode;
23414 };
23415 
23416 struct vop_win {
23417+	struct vop_win *parent;
23418 	struct drm_plane base;
23419-	const struct vop_win_data *data;
23420-	const struct vop_win_yuv2yuv_data *yuv2yuv_data;
23421+
23422+	int win_id;
23423+	int area_id;
23424+	u8 plane_id; /* unique plane id */
23425+	const char *name;
23426+
23427+	int zpos;
23428+	uint32_t offset;
23429+	enum drm_plane_type type;
23430+	const struct vop_win_phy *phy;
23431+	const struct vop_csc *csc;
23432+	const uint32_t *data_formats;
23433+	uint32_t nformats;
23434+	const uint64_t *format_modifiers;
23435+	u64 feature;
23436 	struct vop *vop;
23437+	struct vop_plane_state state;
23438+
23439+	struct drm_property *input_width_prop;
23440+	struct drm_property *input_height_prop;
23441+	struct drm_property *output_width_prop;
23442+	struct drm_property *output_height_prop;
23443+	struct drm_property *color_key_prop;
23444+	struct drm_property *scale_prop;
23445+	struct drm_property *name_prop;
23446 };
23447 
23448-struct rockchip_rgb;
23449 struct vop {
23450-	struct drm_crtc crtc;
23451+	struct rockchip_crtc rockchip_crtc;
23452 	struct device *dev;
23453 	struct drm_device *drm_dev;
23454+	struct dentry *debugfs;
23455+	struct drm_info_list *debugfs_files;
23456+	struct drm_property *plane_feature_prop;
23457+	struct drm_property *plane_mask_prop;
23458+	struct drm_property *feature_prop;
23459+
23460+	bool is_iommu_enabled;
23461+	bool is_iommu_needed;
23462 	bool is_enabled;
23463-
23464+	bool support_multi_area;
23465+
23466+	u32 version;
23467+	u32 background;
23468+	u32 line_flag;
23469+	u8 id;
23470+	u8 plane_mask;
23471+	u64 soc_id;
23472+	struct drm_prop_enum_list *plane_name_list;
23473+
23474+	struct drm_tv_connector_state active_tv_state;
23475+	bool pre_overlay;
23476+	bool loader_protect;
23477 	struct completion dsp_hold_completion;
23478-	unsigned int win_enabled;
23479 
23480 	/* protected by dev->event_lock */
23481 	struct drm_pending_vblank_event *event;
23482@@ -149,14 +262,22 @@ struct vop {
23483 	struct completion line_flag_completion;
23484 
23485 	const struct vop_data *data;
23486+	int num_wins;
23487 
23488 	uint32_t *regsbak;
23489 	void __iomem *regs;
23490-	void __iomem *lut_regs;
23491+	struct regmap *grf;
23492 
23493 	/* physical map length of vop register */
23494 	uint32_t len;
23495 
23496+	void __iomem *lut_regs;
23497+	u32 *lut;
23498+	u32 lut_len;
23499+	bool lut_active;
23500+	/* gamma look up table */
23501+	struct drm_color_lut *gamma_lut;
23502+	bool dual_channel_swap;
23503 	/* one time only one process allowed to config the register */
23504 	spinlock_t reg_lock;
23505 	/* lock vop irq reg */
23506@@ -172,16 +293,83 @@ struct vop {
23507 	struct clk *dclk;
23508 	/* vop share memory frequency */
23509 	struct clk *aclk;
23510+	/* vop source handling, optional */
23511+	struct clk *dclk_source;
23512 
23513 	/* vop dclk reset */
23514 	struct reset_control *dclk_rst;
23515 
23516-	/* optional internal rgb encoder */
23517-	struct rockchip_rgb *rgb;
23518+	struct rockchip_dclk_pll *pll;
23519+
23520+	struct rockchip_mcu_timing mcu_timing;
23521 
23522 	struct vop_win win[];
23523 };
23524 
23525+/*
23526+ * bus-format types.
23527+ */
23528+struct drm_bus_format_enum_list {
23529+	int type;
23530+	const char *name;
23531+};
23532+
23533+static const struct drm_bus_format_enum_list drm_bus_format_enum_list[] = {
23534+	{ DRM_MODE_CONNECTOR_Unknown, "Unknown" },
23535+	{ MEDIA_BUS_FMT_RGB565_1X16, "RGB565_1X16" },
23536+	{ MEDIA_BUS_FMT_RGB666_1X18, "RGB666_1X18" },
23537+	{ MEDIA_BUS_FMT_RGB666_1X24_CPADHI, "RGB666_1X24_CPADHI" },
23538+	{ MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, "RGB666_1X7X3_SPWG" },
23539+	{ MEDIA_BUS_FMT_YUV8_1X24, "YUV8_1X24" },
23540+	{ MEDIA_BUS_FMT_UYYVYY8_0_5X24, "UYYVYY8_0_5X24" },
23541+	{ MEDIA_BUS_FMT_YUV10_1X30, "YUV10_1X30" },
23542+	{ MEDIA_BUS_FMT_UYYVYY10_0_5X30, "UYYVYY10_0_5X30" },
23543+	{ MEDIA_BUS_FMT_RGB888_3X8, "RGB888_3X8" },
23544+	{ MEDIA_BUS_FMT_RGB888_DUMMY_4X8, "RGB888_DUMMY_4X8" },
23545+	{ MEDIA_BUS_FMT_RGB888_1X24, "RGB888_1X24" },
23546+	{ MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, "RGB888_1X7X4_SPWG" },
23547+	{ MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, "RGB888_1X7X4_JEIDA" },
23548+	{ MEDIA_BUS_FMT_UYVY8_2X8, "UYVY8_2X8" },
23549+	{ MEDIA_BUS_FMT_YUYV8_1X16, "YUYV8_1X16" },
23550+	{ MEDIA_BUS_FMT_UYVY8_1X16, "UYVY8_1X16" },
23551+};
23552+
23553+static DRM_ENUM_NAME_FN(drm_get_bus_format_name, drm_bus_format_enum_list)
23554+
23555+static inline struct vop *to_vop(struct drm_crtc *crtc)
23556+{
23557+	struct rockchip_crtc *rockchip_crtc;
23558+
23559+	rockchip_crtc = container_of(crtc, struct rockchip_crtc, crtc);
23560+
23561+	return container_of(rockchip_crtc, struct vop, rockchip_crtc);
23562+}
23563+
23564+static void vop_lock(struct vop *vop)
23565+{
23566+	mutex_lock(&vop->vop_lock);
23567+	rockchip_dmcfreq_lock();
23568+}
23569+
23570+static void vop_unlock(struct vop *vop)
23571+{
23572+	rockchip_dmcfreq_unlock();
23573+	mutex_unlock(&vop->vop_lock);
23574+}
23575+
23576+static inline void vop_grf_writel(struct vop *vop, struct vop_reg reg, u32 v)
23577+{
23578+	u32 val = 0;
23579+
23580+	if (IS_ERR_OR_NULL(vop->grf))
23581+		return;
23582+
23583+	if (VOP_REG_SUPPORT(vop, reg)) {
23584+		val = (v << reg.shift) | (reg.mask << (reg.shift + 16));
23585+		regmap_write(vop->grf, reg.offset, val);
23586+	}
23587+}
23588+
23589 static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v)
23590 {
23591 	writel(v, vop->regs + offset);
23592@@ -199,23 +387,15 @@ static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base,
23593 	return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask;
23594 }
23595 
23596-static void vop_reg_set(struct vop *vop, const struct vop_reg *reg,
23597-			uint32_t _offset, uint32_t _mask, uint32_t v,
23598-			const char *reg_name)
23599+static inline void vop_mask_write(struct vop *vop, uint32_t offset,
23600+				  uint32_t mask, uint32_t shift, uint32_t v,
23601+				  bool write_mask, bool relaxed)
23602 {
23603-	int offset, mask, shift;
23604-
23605-	if (!reg || !reg->mask) {
23606-		DRM_DEV_DEBUG(vop->dev, "Warning: not support %s\n", reg_name);
23607+	if (!mask)
23608 		return;
23609-	}
23610-
23611-	offset = reg->offset + _offset;
23612-	mask = reg->mask & _mask;
23613-	shift = reg->shift;
23614 
23615-	if (reg->write_mask) {
23616-		v = ((v << shift) & 0xffff) | (mask << (shift + 16));
23617+	if (write_mask) {
23618+		v = ((v & mask) << shift) | (mask << (shift + 16));
23619 	} else {
23620 		uint32_t cached_val = vop->regsbak[offset >> 2];
23621 
23622@@ -223,12 +403,21 @@ static void vop_reg_set(struct vop *vop, const struct vop_reg *reg,
23623 		vop->regsbak[offset >> 2] = v;
23624 	}
23625 
23626-	if (reg->relaxed)
23627+	if (relaxed)
23628 		writel_relaxed(v, vop->regs + offset);
23629 	else
23630 		writel(v, vop->regs + offset);
23631 }
23632 
23633+static inline const struct vop_win_phy *
23634+vop_get_win_phy(struct vop_win *win, const struct vop_reg *reg)
23635+{
23636+	if (!reg->mask && win->parent)
23637+		return win->parent->phy;
23638+
23639+	return win->phy;
23640+}
23641+
23642 static inline uint32_t vop_get_intr_type(struct vop *vop,
23643 					 const struct vop_reg *reg, int type)
23644 {
23645@@ -243,9 +432,147 @@ static inline uint32_t vop_get_intr_type(struct vop *vop,
23646 	return ret;
23647 }
23648 
23649+static void vop_load_hdr2sdr_table(struct vop *vop)
23650+{
23651+	int i;
23652+	const struct vop_hdr_table *table = vop->data->hdr_table;
23653+	uint32_t hdr2sdr_eetf_oetf_yn[33];
23654+
23655+	for (i = 0; i < 33; i++)
23656+		hdr2sdr_eetf_oetf_yn[i] = table->hdr2sdr_eetf_yn[i] +
23657+				(table->hdr2sdr_bt1886oetf_yn[i] << 16);
23658+
23659+	vop_writel(vop, table->hdr2sdr_eetf_oetf_y0_offset,
23660+		   hdr2sdr_eetf_oetf_yn[0]);
23661+	for (i = 1; i < 33; i++)
23662+		vop_writel(vop,
23663+			   table->hdr2sdr_eetf_oetf_y1_offset + (i - 1) * 4,
23664+			   hdr2sdr_eetf_oetf_yn[i]);
23665+
23666+	vop_writel(vop, table->hdr2sdr_sat_y0_offset,
23667+		   table->hdr2sdr_sat_yn[0]);
23668+	for (i = 1; i < 9; i++)
23669+		vop_writel(vop, table->hdr2sdr_sat_y1_offset + (i - 1) * 4,
23670+			   table->hdr2sdr_sat_yn[i]);
23671+
23672+	VOP_CTRL_SET(vop, hdr2sdr_src_min, table->hdr2sdr_src_range_min);
23673+	VOP_CTRL_SET(vop, hdr2sdr_src_max, table->hdr2sdr_src_range_max);
23674+	VOP_CTRL_SET(vop, hdr2sdr_normfaceetf, table->hdr2sdr_normfaceetf);
23675+	VOP_CTRL_SET(vop, hdr2sdr_dst_min, table->hdr2sdr_dst_range_min);
23676+	VOP_CTRL_SET(vop, hdr2sdr_dst_max, table->hdr2sdr_dst_range_max);
23677+	VOP_CTRL_SET(vop, hdr2sdr_normfacgamma, table->hdr2sdr_normfacgamma);
23678+}
23679+
23680+static void vop_load_sdr2hdr_table(struct vop *vop, uint32_t cmd)
23681+{
23682+	int i;
23683+	const struct vop_hdr_table *table = vop->data->hdr_table;
23684+	uint32_t sdr2hdr_eotf_oetf_yn[65];
23685+	uint32_t sdr2hdr_oetf_dx_dxpow[64];
23686+
23687+	for (i = 0; i < 65; i++) {
23688+		if (cmd == SDR2HDR_FOR_BT2020)
23689+			sdr2hdr_eotf_oetf_yn[i] =
23690+				table->sdr2hdr_bt1886eotf_yn_for_bt2020[i] +
23691+				(table->sdr2hdr_st2084oetf_yn_for_bt2020[i] << 18);
23692+		else if (cmd == SDR2HDR_FOR_HDR)
23693+			sdr2hdr_eotf_oetf_yn[i] =
23694+				table->sdr2hdr_bt1886eotf_yn_for_hdr[i] +
23695+				(table->sdr2hdr_st2084oetf_yn_for_hdr[i] << 18);
23696+		else if (cmd == SDR2HDR_FOR_HLG_HDR)
23697+			sdr2hdr_eotf_oetf_yn[i] =
23698+				table->sdr2hdr_bt1886eotf_yn_for_hlg_hdr[i] +
23699+				(table->sdr2hdr_st2084oetf_yn_for_hlg_hdr[i] << 18);
23700+	}
23701+	vop_writel(vop, table->sdr2hdr_eotf_oetf_y0_offset,
23702+		   sdr2hdr_eotf_oetf_yn[0]);
23703+	for (i = 1; i < 65; i++)
23704+		vop_writel(vop, table->sdr2hdr_eotf_oetf_y1_offset +
23705+			   (i - 1) * 4, sdr2hdr_eotf_oetf_yn[i]);
23706+
23707+	for (i = 0; i < 64; i++) {
23708+		sdr2hdr_oetf_dx_dxpow[i] = table->sdr2hdr_st2084oetf_dxn[i] +
23709+				(table->sdr2hdr_st2084oetf_dxn_pow2[i] << 16);
23710+		vop_writel(vop, table->sdr2hdr_oetf_dx_dxpow1_offset + i * 4,
23711+			   sdr2hdr_oetf_dx_dxpow[i]);
23712+	}
23713+
23714+	for (i = 0; i < 63; i++)
23715+		vop_writel(vop, table->sdr2hdr_oetf_xn1_offset + i * 4,
23716+			   table->sdr2hdr_st2084oetf_xn[i]);
23717+}
23718+
23719+static void vop_load_csc_table(struct vop *vop, u32 offset, const u32 *table)
23720+{
23721+	int i;
23722+
23723+	/*
23724+	 * so far the csc offset is not 0 and in the feature the csc offset
23725+	 * impossible be 0, so when the offset is 0, should return here.
23726+	 */
23727+	if (!table || offset == 0)
23728+		return;
23729+
23730+	for (i = 0; i < 8; i++)
23731+		vop_writel(vop, offset + i * 4, table[i]);
23732+}
23733+
23734 static inline void vop_cfg_done(struct vop *vop)
23735 {
23736-	VOP_REG_SET(vop, common, cfg_done, 1);
23737+	VOP_CTRL_SET(vop, cfg_done, 1);
23738+}
23739+
23740+static bool vop_is_allwin_disabled(struct vop *vop)
23741+{
23742+	int i;
23743+
23744+	for (i = 0; i < vop->num_wins; i++) {
23745+		struct vop_win *win = &vop->win[i];
23746+
23747+		if (VOP_WIN_GET(vop, win, enable) != 0)
23748+			return false;
23749+	}
23750+
23751+	return true;
23752+}
23753+
23754+static void vop_win_disable(struct vop *vop, struct vop_win *win)
23755+{
23756+	/*
23757+	 * FIXUP: some of the vop scale would be abnormal after windows power
23758+	 * on/off so deinit scale to scale_none mode.
23759+	 */
23760+	if (win->phy->scl && win->phy->scl->ext) {
23761+		VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE);
23762+		VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE);
23763+		VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, SCALE_NONE);
23764+		VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, SCALE_NONE);
23765+	}
23766+
23767+	VOP_WIN_SET(vop, win, enable, 0);
23768+	if (win->area_id == 0)
23769+		VOP_WIN_SET(vop, win, gate, 0);
23770+}
23771+
23772+static void vop_disable_allwin(struct vop *vop)
23773+{
23774+	int i;
23775+
23776+	for (i = 0; i < vop->num_wins; i++) {
23777+		struct vop_win *win = &vop->win[i];
23778+
23779+		vop_win_disable(vop, win);
23780+	}
23781+}
23782+
23783+static inline void vop_write_lut(struct vop *vop, uint32_t offset, uint32_t v)
23784+{
23785+	writel(v, vop->lut_regs + offset);
23786+}
23787+
23788+static inline uint32_t vop_read_lut(struct vop *vop, uint32_t offset)
23789+{
23790+	return readl(vop->lut_regs + offset);
23791 }
23792 
23793 static bool has_rb_swapped(uint32_t format)
23794@@ -276,38 +603,150 @@ static enum vop_data_format vop_convert_format(uint32_t format)
23795 	case DRM_FORMAT_BGR565:
23796 		return VOP_FMT_RGB565;
23797 	case DRM_FORMAT_NV12:
23798+	case DRM_FORMAT_NV15:
23799 		return VOP_FMT_YUV420SP;
23800 	case DRM_FORMAT_NV16:
23801+	case DRM_FORMAT_NV20:
23802 		return VOP_FMT_YUV422SP;
23803 	case DRM_FORMAT_NV24:
23804+	case DRM_FORMAT_NV30:
23805 		return VOP_FMT_YUV444SP;
23806+	case DRM_FORMAT_YVYU:
23807+	case DRM_FORMAT_VYUY:
23808+	case DRM_FORMAT_YUYV:
23809+	case DRM_FORMAT_UYVY:
23810+		return VOP_FMT_YUYV;
23811 	default:
23812 		DRM_ERROR("unsupported format[%08x]\n", format);
23813 		return -EINVAL;
23814 	}
23815 }
23816 
23817-static int vop_convert_afbc_format(uint32_t format)
23818+static bool is_uv_swap(uint32_t bus_format, uint32_t output_mode)
23819+{
23820+	/*
23821+	 * FIXME:
23822+	 *
23823+	 * There is no media type for YUV444 output,
23824+	 * so when out_mode is AAAA or P888, assume output is YUV444 on
23825+	 * yuv format.
23826+	 *
23827+	 * From H/W testing, YUV444 mode need a rb swap.
23828+	 */
23829+	if (bus_format == MEDIA_BUS_FMT_YVYU8_1X16 ||
23830+	    bus_format == MEDIA_BUS_FMT_VYUY8_1X16 ||
23831+	    bus_format == MEDIA_BUS_FMT_YVYU8_2X8 ||
23832+	    bus_format == MEDIA_BUS_FMT_VYUY8_2X8 ||
23833+	    ((bus_format == MEDIA_BUS_FMT_YUV8_1X24 ||
23834+	      bus_format == MEDIA_BUS_FMT_YUV10_1X30) &&
23835+	     (output_mode == ROCKCHIP_OUT_MODE_AAAA ||
23836+	      output_mode == ROCKCHIP_OUT_MODE_P888)))
23837+		return true;
23838+	else
23839+		return false;
23840+}
23841+
23842+static bool is_yc_swap(uint32_t bus_format)
23843+{
23844+	switch (bus_format) {
23845+	case MEDIA_BUS_FMT_YUYV8_1X16:
23846+	case MEDIA_BUS_FMT_YVYU8_1X16:
23847+	case MEDIA_BUS_FMT_YUYV8_2X8:
23848+	case MEDIA_BUS_FMT_YVYU8_2X8:
23849+		return true;
23850+	default:
23851+		return false;
23852+	}
23853+}
23854+
23855+static bool is_yuv_output(uint32_t bus_format)
23856+{
23857+	switch (bus_format) {
23858+	case MEDIA_BUS_FMT_YUV8_1X24:
23859+	case MEDIA_BUS_FMT_YUV10_1X30:
23860+	case MEDIA_BUS_FMT_UYYVYY8_0_5X24:
23861+	case MEDIA_BUS_FMT_UYYVYY10_0_5X30:
23862+	case MEDIA_BUS_FMT_YUYV8_2X8:
23863+	case MEDIA_BUS_FMT_YVYU8_2X8:
23864+	case MEDIA_BUS_FMT_UYVY8_2X8:
23865+	case MEDIA_BUS_FMT_VYUY8_2X8:
23866+	case MEDIA_BUS_FMT_YUYV8_1X16:
23867+	case MEDIA_BUS_FMT_YVYU8_1X16:
23868+	case MEDIA_BUS_FMT_UYVY8_1X16:
23869+	case MEDIA_BUS_FMT_VYUY8_1X16:
23870+		return true;
23871+	default:
23872+		return false;
23873+	}
23874+}
23875+
23876+static bool is_yuv_support(uint32_t format)
23877+{
23878+	switch (format) {
23879+	case DRM_FORMAT_NV12:
23880+	case DRM_FORMAT_NV15:
23881+	case DRM_FORMAT_NV16:
23882+	case DRM_FORMAT_NV20:
23883+	case DRM_FORMAT_NV24:
23884+	case DRM_FORMAT_NV30:
23885+	case DRM_FORMAT_YVYU:
23886+	case DRM_FORMAT_VYUY:
23887+	case DRM_FORMAT_YUYV:
23888+	case DRM_FORMAT_UYVY:
23889+		return true;
23890+	default:
23891+		return false;
23892+	}
23893+}
23894+
23895+static bool is_yuyv_format(uint32_t format)
23896+{
23897+	switch (format) {
23898+	case DRM_FORMAT_YVYU:
23899+	case DRM_FORMAT_VYUY:
23900+	case DRM_FORMAT_YUYV:
23901+	case DRM_FORMAT_UYVY:
23902+		return true;
23903+	default:
23904+		return false;
23905+	}
23906+}
23907+
23908+static bool is_yuv_10bit(uint32_t format)
23909+{
23910+	switch (format) {
23911+	case DRM_FORMAT_NV15:
23912+	case DRM_FORMAT_NV20:
23913+	case DRM_FORMAT_NV30:
23914+		return true;
23915+	default:
23916+		return false;
23917+	}
23918+}
23919+
23920+static bool is_alpha_support(uint32_t format)
23921 {
23922 	switch (format) {
23923-	case DRM_FORMAT_XRGB8888:
23924 	case DRM_FORMAT_ARGB8888:
23925-	case DRM_FORMAT_XBGR8888:
23926 	case DRM_FORMAT_ABGR8888:
23927-		return AFBC_FMT_U8U8U8U8;
23928-	case DRM_FORMAT_RGB888:
23929-	case DRM_FORMAT_BGR888:
23930-		return AFBC_FMT_U8U8U8;
23931-	case DRM_FORMAT_RGB565:
23932-	case DRM_FORMAT_BGR565:
23933-		return AFBC_FMT_RGB565;
23934-	/* either of the below should not be reachable */
23935+		return true;
23936 	default:
23937-		DRM_WARN_ONCE("unsupported AFBC format[%08x]\n", format);
23938-		return -EINVAL;
23939+		return false;
23940 	}
23941+}
23942 
23943-	return -EINVAL;
23944+static inline bool rockchip_afbc(struct drm_plane *plane, u64 modifier)
23945+{
23946+	int i;
23947+
23948+	if (modifier == DRM_FORMAT_MOD_LINEAR)
23949+		return false;
23950+
23951+	for (i = 0 ; i < plane->modifier_count; i++)
23952+		if (plane->modifiers[i] == modifier)
23953+			break;
23954+
23955+	return (i < plane->modifier_count) ? true : false;
23956 }
23957 
23958 static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
23959@@ -344,29 +783,37 @@ static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
23960 	return val;
23961 }
23962 
23963-static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
23964-			     uint32_t src_w, uint32_t src_h, uint32_t dst_w,
23965-			     uint32_t dst_h, const struct drm_format_info *info)
23966+static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win *win,
23967+				uint32_t src_w, uint32_t src_h, uint32_t dst_w,
23968+				uint32_t dst_h, uint32_t pixel_format)
23969 {
23970 	uint16_t yrgb_hor_scl_mode, yrgb_ver_scl_mode;
23971 	uint16_t cbcr_hor_scl_mode = SCALE_NONE;
23972 	uint16_t cbcr_ver_scl_mode = SCALE_NONE;
23973+	const struct drm_format_info *info = drm_format_info(pixel_format);
23974+	uint8_t hsub = info->hsub;
23975+	uint8_t vsub = info->vsub;
23976 	bool is_yuv = false;
23977-	uint16_t cbcr_src_w = src_w / info->hsub;
23978-	uint16_t cbcr_src_h = src_h / info->vsub;
23979+	uint16_t cbcr_src_w = src_w / hsub;
23980+	uint16_t cbcr_src_h = src_h / vsub;
23981 	uint16_t vsu_mode;
23982 	uint16_t lb_mode;
23983 	uint32_t val;
23984+	const struct vop_data *vop_data = vop->data;
23985 	int vskiplines;
23986 
23987-	if (info->is_yuv)
23988-		is_yuv = true;
23989-
23990-	if (dst_w > 3840) {
23991-		DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n");
23992+	if (!win->phy->scl)
23993 		return;
23994+
23995+	if (!(vop_data->feature & VOP_FEATURE_ALPHA_SCALE)) {
23996+		if (is_alpha_support(pixel_format) &&
23997+		    (src_w != dst_w || src_h != dst_h))
23998+			DRM_ERROR("ERROR: unsupported ppixel alpha&scale\n");
23999 	}
24000 
24001+	if (info->is_yuv)
24002+		is_yuv = true;
24003+
24004 	if (!win->phy->scl->ext) {
24005 		VOP_SCL_SET(vop, win, scale_yrgb_x,
24006 			    scl_cal_scale2(src_w, dst_w));
24007@@ -448,46 +895,412 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
24008 	}
24009 }
24010 
24011-static void vop_dsp_hold_valid_irq_enable(struct vop *vop)
24012+/*
24013+ * rk3328 HDR/CSC path
24014+ *
24015+ * HDR/SDR --> win0  --> HDR2SDR ----\
24016+ *		  \		      MUX --\
24017+ *                 \ --> SDR2HDR/CSC--/      \
24018+ *                                            \
24019+ * SDR --> win1 -->pre_overlay ->SDR2HDR/CSC --> post_ovrlay-->post CSC-->output
24020+ * SDR --> win2 -/
24021+ *
24022+ */
24023+
24024+static int vop_hdr_atomic_check(struct drm_crtc *crtc,
24025+				struct drm_crtc_state *crtc_state)
24026 {
24027-	unsigned long flags;
24028+	struct drm_atomic_state *state = crtc_state->state;
24029+	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
24030+	struct drm_plane_state *pstate;
24031+	struct drm_plane *plane;
24032+	struct vop *vop = to_vop(crtc);
24033+	int pre_sdr2hdr_state = 0, post_sdr2hdr_state = 0;
24034+	int pre_sdr2hdr_mode = 0, post_sdr2hdr_mode = 0, sdr2hdr_func = 0;
24035+	bool pre_overlay = false;
24036+	int hdr2sdr_en = 0, plane_id = 0;
24037 
24038-	if (WARN_ON(!vop->is_enabled))
24039-		return;
24040+	if (!vop->data->hdr_table)
24041+		return 0;
24042+	/* hdr cover */
24043+	drm_atomic_crtc_state_for_each_plane(plane, crtc_state) {
24044+		struct vop_plane_state *vop_plane_state;
24045+		struct vop_win *win = to_vop_win(plane);
24046+
24047+		pstate = drm_atomic_get_plane_state(state, plane);
24048+		if (IS_ERR(pstate))
24049+			return PTR_ERR(pstate);
24050+		vop_plane_state = to_vop_plane_state(pstate);
24051+		if (!pstate->fb)
24052+			continue;
24053 
24054-	spin_lock_irqsave(&vop->irq_lock, flags);
24055+		if (vop_plane_state->eotf > s->eotf)
24056+			if (win->feature & WIN_FEATURE_HDR2SDR)
24057+				hdr2sdr_en = 1;
24058+		if (vop_plane_state->eotf < s->eotf) {
24059+			if (win->feature & WIN_FEATURE_PRE_OVERLAY)
24060+				pre_sdr2hdr_state |= BIT(plane_id);
24061+			else
24062+				post_sdr2hdr_state |= BIT(plane_id);
24063+		}
24064+		plane_id++;
24065+	}
24066 
24067-	VOP_INTR_SET_TYPE(vop, clear, DSP_HOLD_VALID_INTR, 1);
24068-	VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 1);
24069+	if (pre_sdr2hdr_state || post_sdr2hdr_state || hdr2sdr_en) {
24070+		pre_overlay = true;
24071+		pre_sdr2hdr_mode = BT709_TO_BT2020;
24072+		post_sdr2hdr_mode = BT709_TO_BT2020;
24073+		sdr2hdr_func = SDR2HDR_FOR_HDR;
24074+		goto exit_hdr_convert;
24075+	}
24076 
24077-	spin_unlock_irqrestore(&vop->irq_lock, flags);
24078-}
24079+	/* overlay mode */
24080+	plane_id = 0;
24081+	pre_overlay = false;
24082+	pre_sdr2hdr_mode = 0;
24083+	post_sdr2hdr_mode = 0;
24084+	pre_sdr2hdr_state = 0;
24085+	post_sdr2hdr_state = 0;
24086+	drm_atomic_crtc_state_for_each_plane(plane, crtc_state) {
24087+		struct vop_plane_state *vop_plane_state;
24088+		struct vop_win *win = to_vop_win(plane);
24089+
24090+		pstate = drm_atomic_get_plane_state(state, plane);
24091+		if (IS_ERR(pstate))
24092+			return PTR_ERR(pstate);
24093+		vop_plane_state = to_vop_plane_state(pstate);
24094+		if (!pstate->fb)
24095+			continue;
24096 
24097-static void vop_dsp_hold_valid_irq_disable(struct vop *vop)
24098-{
24099-	unsigned long flags;
24100+		if (vop_plane_state->color_space == V4L2_COLORSPACE_BT2020 &&
24101+		    vop_plane_state->color_space > s->color_space) {
24102+			if (win->feature & WIN_FEATURE_PRE_OVERLAY) {
24103+				pre_sdr2hdr_mode = BT2020_TO_BT709;
24104+				pre_sdr2hdr_state |= BIT(plane_id);
24105+			} else {
24106+				post_sdr2hdr_mode = BT2020_TO_BT709;
24107+				post_sdr2hdr_state |= BIT(plane_id);
24108+			}
24109+		}
24110+		if (s->color_space == V4L2_COLORSPACE_BT2020 &&
24111+		    vop_plane_state->color_space < s->color_space) {
24112+			if (win->feature & WIN_FEATURE_PRE_OVERLAY) {
24113+				pre_sdr2hdr_mode = BT709_TO_BT2020;
24114+				pre_sdr2hdr_state |= BIT(plane_id);
24115+			} else {
24116+				post_sdr2hdr_mode = BT709_TO_BT2020;
24117+				post_sdr2hdr_state |= BIT(plane_id);
24118+			}
24119+		}
24120+		plane_id++;
24121+	}
24122 
24123-	if (WARN_ON(!vop->is_enabled))
24124-		return;
24125+	if (pre_sdr2hdr_state || post_sdr2hdr_state) {
24126+		pre_overlay = true;
24127+		sdr2hdr_func = SDR2HDR_FOR_BT2020;
24128+	}
24129 
24130-	spin_lock_irqsave(&vop->irq_lock, flags);
24131+exit_hdr_convert:
24132+	s->hdr.pre_overlay = pre_overlay;
24133+	s->hdr.hdr2sdr_en = hdr2sdr_en;
24134+	if (s->hdr.pre_overlay)
24135+		s->yuv_overlay = 0;
24136 
24137-	VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 0);
24138+	s->hdr.sdr2hdr_state.bt1886eotf_pre_conv_en = !!pre_sdr2hdr_state;
24139+	s->hdr.sdr2hdr_state.rgb2rgb_pre_conv_en = !!pre_sdr2hdr_state;
24140+	s->hdr.sdr2hdr_state.rgb2rgb_pre_conv_mode = pre_sdr2hdr_mode;
24141+	s->hdr.sdr2hdr_state.st2084oetf_pre_conv_en = !!pre_sdr2hdr_state;
24142 
24143-	spin_unlock_irqrestore(&vop->irq_lock, flags);
24144+	s->hdr.sdr2hdr_state.bt1886eotf_post_conv_en = !!post_sdr2hdr_state;
24145+	s->hdr.sdr2hdr_state.rgb2rgb_post_conv_en = !!post_sdr2hdr_state;
24146+	s->hdr.sdr2hdr_state.rgb2rgb_post_conv_mode = post_sdr2hdr_mode;
24147+	s->hdr.sdr2hdr_state.st2084oetf_post_conv_en = !!post_sdr2hdr_state;
24148+	s->hdr.sdr2hdr_state.sdr2hdr_func = sdr2hdr_func;
24149+
24150+	return 0;
24151+}
24152+
24153+static int to_vop_csc_mode(int csc_mode)
24154+{
24155+	switch (csc_mode) {
24156+	case V4L2_COLORSPACE_SMPTE170M:
24157+	case V4L2_COLORSPACE_470_SYSTEM_M:
24158+	case V4L2_COLORSPACE_470_SYSTEM_BG:
24159+		return CSC_BT601L;
24160+	case V4L2_COLORSPACE_REC709:
24161+	case V4L2_COLORSPACE_SMPTE240M:
24162+	case V4L2_COLORSPACE_DEFAULT:
24163+		return CSC_BT709L;
24164+	case V4L2_COLORSPACE_JPEG:
24165+		return CSC_BT601F;
24166+	case V4L2_COLORSPACE_BT2020:
24167+		return CSC_BT2020;
24168+	default:
24169+		return CSC_BT709L;
24170+	}
24171+}
24172+
24173+static void vop_disable_all_planes(struct vop *vop)
24174+{
24175+	bool active;
24176+	int ret;
24177+
24178+	vop_disable_allwin(vop);
24179+	vop_cfg_done(vop);
24180+	ret = readx_poll_timeout_atomic(vop_is_allwin_disabled,
24181+					vop, active, active,
24182+					0, 500 * 1000);
24183+	if (ret)
24184+		dev_err(vop->dev, "wait win close timeout\n");
24185 }
24186 
24187 /*
24188- * (1) each frame starts at the start of the Vsync pulse which is signaled by
24189- *     the "FRAME_SYNC" interrupt.
24190- * (2) the active data region of each frame ends at dsp_vact_end
24191- * (3) we should program this same number (dsp_vact_end) into dsp_line_frag_num,
24192- *      to get "LINE_FLAG" interrupt at the end of the active on screen data.
24193+ * rk3399 colorspace path:
24194+ *      Input        Win csc                     Output
24195+ * 1. YUV(2020)  --> Y2R->2020To709->R2Y   --> YUV_OUTPUT(601/709)
24196+ *    RGB        --> R2Y                  __/
24197  *
24198- * VOP_INTR_CTRL0.dsp_line_frag_num = VOP_DSP_VACT_ST_END.dsp_vact_end
24199- * Interrupts
24200- * LINE_FLAG -------------------------------+
24201- * FRAME_SYNC ----+                         |
24202+ * 2. YUV(2020)  --> bypasss               --> YUV_OUTPUT(2020)
24203+ *    RGB        --> 709To2020->R2Y       __/
24204+ *
24205+ * 3. YUV(2020)  --> Y2R->2020To709        --> RGB_OUTPUT(709)
24206+ *    RGB        --> R2Y                  __/
24207+ *
24208+ * 4. YUV(601/709)-> Y2R->709To2020->R2Y   --> YUV_OUTPUT(2020)
24209+ *    RGB        --> 709To2020->R2Y       __/
24210+ *
24211+ * 5. YUV(601/709)-> bypass                --> YUV_OUTPUT(709)
24212+ *    RGB        --> R2Y                  __/
24213+ *
24214+ * 6. YUV(601/709)-> bypass                --> YUV_OUTPUT(601)
24215+ *    RGB        --> R2Y(601)             __/
24216+ *
24217+ * 7. YUV        --> Y2R(709)              --> RGB_OUTPUT(709)
24218+ *    RGB        --> bypass               __/
24219+ *
24220+ * 8. RGB        --> 709To2020->R2Y        --> YUV_OUTPUT(2020)
24221+ *
24222+ * 9. RGB        --> R2Y(709)              --> YUV_OUTPUT(709)
24223+ *
24224+ * 10. RGB       --> R2Y(601)              --> YUV_OUTPUT(601)
24225+ *
24226+ * 11. RGB       --> bypass                --> RGB_OUTPUT(709)
24227+ */
24228+static int vop_setup_csc_table(const struct vop_csc_table *csc_table,
24229+			       bool is_input_yuv, bool is_output_yuv,
24230+			       int input_csc, int output_csc,
24231+			       const uint32_t **y2r_table,
24232+			       const uint32_t **r2r_table,
24233+			       const uint32_t **r2y_table)
24234+{
24235+	*y2r_table = NULL;
24236+	*r2r_table = NULL;
24237+	*r2y_table = NULL;
24238+
24239+	if (!csc_table)
24240+		return 0;
24241+
24242+	if (is_output_yuv) {
24243+		if (output_csc == V4L2_COLORSPACE_BT2020) {
24244+			if (is_input_yuv) {
24245+				if (input_csc == V4L2_COLORSPACE_BT2020)
24246+					return 0;
24247+				*y2r_table = csc_table->y2r_bt709;
24248+			}
24249+			if (input_csc != V4L2_COLORSPACE_BT2020)
24250+				*r2r_table = csc_table->r2r_bt709_to_bt2020;
24251+			*r2y_table = csc_table->r2y_bt2020;
24252+		} else {
24253+			if (is_input_yuv && input_csc == V4L2_COLORSPACE_BT2020)
24254+				*y2r_table = csc_table->y2r_bt2020;
24255+			if (input_csc == V4L2_COLORSPACE_BT2020)
24256+				*r2r_table = csc_table->r2r_bt2020_to_bt709;
24257+			if (!is_input_yuv || *y2r_table) {
24258+				if (output_csc == V4L2_COLORSPACE_REC709 ||
24259+				    output_csc == V4L2_COLORSPACE_SMPTE240M ||
24260+				    output_csc == V4L2_COLORSPACE_DEFAULT)
24261+					*r2y_table = csc_table->r2y_bt709;
24262+				else if (output_csc == V4L2_COLORSPACE_SMPTE170M ||
24263+					 output_csc == V4L2_COLORSPACE_470_SYSTEM_M ||
24264+					 output_csc == V4L2_COLORSPACE_470_SYSTEM_BG)
24265+					*r2y_table = csc_table->r2y_bt601_12_235; /* bt601 limit */
24266+				else
24267+					*r2y_table = csc_table->r2y_bt601; /* bt601 full */
24268+			}
24269+		}
24270+	} else {
24271+		if (!is_input_yuv)
24272+			return 0;
24273+
24274+		/*
24275+		 * is possible use bt2020 on rgb mode?
24276+		 */
24277+		if (WARN_ON(output_csc == V4L2_COLORSPACE_BT2020))
24278+			return -EINVAL;
24279+
24280+		if (input_csc == V4L2_COLORSPACE_BT2020)
24281+			*y2r_table = csc_table->y2r_bt2020;
24282+		else if (input_csc == V4L2_COLORSPACE_REC709 ||
24283+			 input_csc == V4L2_COLORSPACE_SMPTE240M ||
24284+			 input_csc == V4L2_COLORSPACE_DEFAULT)
24285+			*y2r_table = csc_table->y2r_bt709;
24286+		else if (input_csc == V4L2_COLORSPACE_SMPTE170M ||
24287+			 input_csc == V4L2_COLORSPACE_470_SYSTEM_M ||
24288+			 input_csc == V4L2_COLORSPACE_470_SYSTEM_BG)
24289+			*y2r_table = csc_table->y2r_bt601_12_235; /* bt601 limit */
24290+		else
24291+			*y2r_table = csc_table->y2r_bt601;  /* bt601 full */
24292+
24293+		if (input_csc == V4L2_COLORSPACE_BT2020)
24294+			/*
24295+			 * We don't have bt601 to bt709 table, force use bt709.
24296+			 */
24297+			*r2r_table = csc_table->r2r_bt2020_to_bt709;
24298+	}
24299+
24300+	return 0;
24301+}
24302+
24303+static void vop_setup_csc_mode(bool is_input_yuv, bool is_output_yuv,
24304+			       int input_csc, int output_csc,
24305+			       bool *y2r_en, bool *r2y_en, int *csc_mode)
24306+{
24307+	if (is_input_yuv && !is_output_yuv) {
24308+		*y2r_en = true;
24309+		*csc_mode =  to_vop_csc_mode(input_csc);
24310+	} else if (!is_input_yuv && is_output_yuv) {
24311+		*r2y_en = true;
24312+		*csc_mode = to_vop_csc_mode(output_csc);
24313+	}
24314+}
24315+
24316+static int vop_csc_atomic_check(struct drm_crtc *crtc,
24317+				struct drm_crtc_state *crtc_state)
24318+{
24319+	struct vop *vop = to_vop(crtc);
24320+	struct drm_atomic_state *state = crtc_state->state;
24321+	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
24322+	const struct vop_csc_table *csc_table = vop->data->csc_table;
24323+	struct drm_plane_state *pstate;
24324+	struct drm_plane *plane;
24325+	bool is_input_yuv, is_output_yuv;
24326+	int ret;
24327+
24328+	is_output_yuv = is_yuv_output(s->bus_format);
24329+
24330+	drm_atomic_crtc_state_for_each_plane(plane, crtc_state) {
24331+		struct vop_plane_state *vop_plane_state;
24332+		struct vop_win *win = to_vop_win(plane);
24333+
24334+		pstate = drm_atomic_get_plane_state(state, plane);
24335+		if (IS_ERR(pstate))
24336+			return PTR_ERR(pstate);
24337+		vop_plane_state = to_vop_plane_state(pstate);
24338+
24339+		if (!pstate->fb)
24340+			continue;
24341+		is_input_yuv = is_yuv_support(pstate->fb->format->format);
24342+		vop_plane_state->y2r_en = false;
24343+		vop_plane_state->r2r_en = false;
24344+		vop_plane_state->r2y_en = false;
24345+
24346+		ret = vop_setup_csc_table(csc_table, is_input_yuv,
24347+					  is_output_yuv,
24348+					  vop_plane_state->color_space,
24349+					  s->color_space,
24350+					  &vop_plane_state->y2r_table,
24351+					  &vop_plane_state->r2r_table,
24352+					  &vop_plane_state->r2y_table);
24353+		if (ret)
24354+			return ret;
24355+
24356+		vop_setup_csc_mode(is_input_yuv, s->yuv_overlay,
24357+				   vop_plane_state->color_space, s->color_space,
24358+				   &vop_plane_state->y2r_en,
24359+				   &vop_plane_state->r2y_en,
24360+				   &vop_plane_state->csc_mode);
24361+
24362+		if (csc_table) {
24363+			vop_plane_state->y2r_en = !!vop_plane_state->y2r_table;
24364+			vop_plane_state->r2r_en = !!vop_plane_state->r2r_table;
24365+			vop_plane_state->r2y_en = !!vop_plane_state->r2y_table;
24366+			continue;
24367+		}
24368+
24369+		/*
24370+		 * This is update for IC design not reasonable, when enable
24371+		 * hdr2sdr on rk3328, vop can't support per-pixel alpha * global
24372+		 * alpha,so we must back to gpu, but gpu can't support hdr2sdr,
24373+		 * gpu output hdr UI, vop will do:
24374+		 * UI(rgbx) -> yuv -> rgb ->hdr2sdr -> overlay -> output.
24375+		 */
24376+		if (s->hdr.hdr2sdr_en &&
24377+		    vop_plane_state->eotf == HDMI_EOTF_SMPTE_ST2084 &&
24378+		    !is_yuv_support(pstate->fb->format->format))
24379+			vop_plane_state->r2y_en = true;
24380+		if (win->feature & WIN_FEATURE_PRE_OVERLAY)
24381+			vop_plane_state->r2r_en =
24382+				s->hdr.sdr2hdr_state.rgb2rgb_pre_conv_en;
24383+		else if (win->feature & WIN_FEATURE_HDR2SDR)
24384+			vop_plane_state->r2r_en =
24385+				s->hdr.sdr2hdr_state.rgb2rgb_post_conv_en;
24386+	}
24387+
24388+	return 0;
24389+}
24390+
24391+static void vop_enable_debug_irq(struct drm_crtc *crtc)
24392+{
24393+	struct vop *vop = to_vop(crtc);
24394+	uint32_t irqs;
24395+
24396+	irqs = BUS_ERROR_INTR | WIN0_EMPTY_INTR | WIN1_EMPTY_INTR |
24397+		WIN2_EMPTY_INTR | WIN3_EMPTY_INTR | HWC_EMPTY_INTR |
24398+		POST_BUF_EMPTY_INTR;
24399+	VOP_INTR_SET_TYPE(vop, clear, irqs, 1);
24400+	VOP_INTR_SET_TYPE(vop, enable, irqs, 1);
24401+}
24402+
24403+static void vop_dsp_hold_valid_irq_enable(struct vop *vop)
24404+{
24405+	unsigned long flags;
24406+
24407+	if (WARN_ON(!vop->is_enabled))
24408+		return;
24409+
24410+	spin_lock_irqsave(&vop->irq_lock, flags);
24411+
24412+	VOP_INTR_SET_TYPE(vop, clear, DSP_HOLD_VALID_INTR, 1);
24413+	VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 1);
24414+
24415+	spin_unlock_irqrestore(&vop->irq_lock, flags);
24416+}
24417+
24418+static void vop_dsp_hold_valid_irq_disable(struct vop *vop)
24419+{
24420+	unsigned long flags;
24421+
24422+	if (WARN_ON(!vop->is_enabled))
24423+		return;
24424+
24425+	spin_lock_irqsave(&vop->irq_lock, flags);
24426+
24427+	VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 0);
24428+
24429+	spin_unlock_irqrestore(&vop->irq_lock, flags);
24430+}
24431+
24432+/*
24433+ * (1) each frame starts at the start of the Vsync pulse which is signaled by
24434+ *     the "FRAME_SYNC" interrupt.
24435+ * (2) the active data region of each frame ends at dsp_vact_end
24436+ * (3) we should program this same number (dsp_vact_end) into dsp_line_frag_num,
24437+ *      to get "LINE_FLAG" interrupt at the end of the active on screen data.
24438+ *
24439+ * VOP_INTR_CTRL0.dsp_line_frag_num = VOP_DSP_VACT_ST_END.dsp_vact_end
24440+ * Interrupts
24441+ * LINE_FLAG -------------------------------+
24442+ * FRAME_SYNC ----+                         |
24443  *                |                         |
24444  *                v                         v
24445  *                | Vsync | Vbp |  Vactive  | Vfp |
24446@@ -567,147 +1380,217 @@ static void vop_core_clks_disable(struct vop *vop)
24447 	clk_disable(vop->hclk);
24448 }
24449 
24450-static void vop_win_disable(struct vop *vop, const struct vop_win *vop_win)
24451+static void vop_crtc_load_lut(struct drm_crtc *crtc)
24452 {
24453-	const struct vop_win_data *win = vop_win->data;
24454+	struct vop *vop = to_vop(crtc);
24455+	int i, dle, lut_idx = 0;
24456 
24457-	if (win->phy->scl && win->phy->scl->ext) {
24458-		VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE);
24459-		VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE);
24460-		VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, SCALE_NONE);
24461-		VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, SCALE_NONE);
24462+	if (!vop->is_enabled || !vop->lut || !vop->lut_regs)
24463+		return;
24464+
24465+	if (WARN_ON(!drm_modeset_is_locked(&crtc->mutex)))
24466+		return;
24467+
24468+	if (!VOP_CTRL_SUPPORT(vop, update_gamma_lut)) {
24469+		spin_lock(&vop->reg_lock);
24470+		VOP_CTRL_SET(vop, dsp_lut_en, 0);
24471+		vop_cfg_done(vop);
24472+		spin_unlock(&vop->reg_lock);
24473+
24474+#define CTRL_GET(name) VOP_CTRL_GET(vop, name)
24475+		readx_poll_timeout(CTRL_GET, dsp_lut_en,
24476+				dle, !dle, 5, 33333);
24477+	} else {
24478+		lut_idx = CTRL_GET(lut_buffer_index);
24479 	}
24480 
24481-	VOP_WIN_SET(vop, win, enable, 0);
24482-	vop->win_enabled &= ~BIT(VOP_WIN_TO_INDEX(vop_win));
24483+	for (i = 0; i < vop->lut_len; i++)
24484+		vop_write_lut(vop, i << 2, vop->lut[i]);
24485+
24486+	spin_lock(&vop->reg_lock);
24487+
24488+	VOP_CTRL_SET(vop, dsp_lut_en, 1);
24489+	VOP_CTRL_SET(vop, update_gamma_lut, 1);
24490+	vop_cfg_done(vop);
24491+	vop->lut_active = true;
24492+
24493+	spin_unlock(&vop->reg_lock);
24494+
24495+	if (VOP_CTRL_SUPPORT(vop, update_gamma_lut)) {
24496+		readx_poll_timeout(CTRL_GET, lut_buffer_index,
24497+				   dle, dle != lut_idx, 5, 33333);
24498+		/* FIXME:
24499+		 * update_gamma value auto clean to 0 by HW, should not
24500+		 * bakeup it.
24501+		 */
24502+		VOP_CTRL_SET(vop, update_gamma_lut, 0);
24503+	}
24504+#undef CTRL_GET
24505 }
24506 
24507-static int vop_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state)
24508+static void rockchip_vop_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red,
24509+					   u16 green, u16 blue, int regno)
24510 {
24511 	struct vop *vop = to_vop(crtc);
24512-	int ret, i;
24513+	u32 lut_len = vop->lut_len;
24514+	u32 r, g, b;
24515 
24516-	ret = pm_runtime_get_sync(vop->dev);
24517-	if (ret < 0) {
24518-		DRM_DEV_ERROR(vop->dev, "failed to get pm runtime: %d\n", ret);
24519-		return ret;
24520-	}
24521+	if (regno >= lut_len || !vop->lut)
24522+		return;
24523 
24524-	ret = vop_core_clks_enable(vop);
24525-	if (WARN_ON(ret < 0))
24526-		goto err_put_pm_runtime;
24527+	r = red * (lut_len - 1) / 0xffff;
24528+	g = green * (lut_len - 1) / 0xffff;
24529+	b = blue * (lut_len - 1) / 0xffff;
24530+	vop->lut[regno] = r * lut_len * lut_len + g * lut_len + b;
24531+}
24532 
24533-	ret = clk_enable(vop->dclk);
24534-	if (WARN_ON(ret < 0))
24535-		goto err_disable_core;
24536+static void rockchip_vop_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red,
24537+					   u16 *green, u16 *blue, int regno)
24538+{
24539+	struct vop *vop = to_vop(crtc);
24540+	u32 lut_len = vop->lut_len;
24541+	u32 r, g, b;
24542 
24543-	/*
24544-	 * Slave iommu shares power, irq and clock with vop.  It was associated
24545-	 * automatically with this master device via common driver code.
24546-	 * Now that we have enabled the clock we attach it to the shared drm
24547-	 * mapping.
24548-	 */
24549-	ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev);
24550-	if (ret) {
24551-		DRM_DEV_ERROR(vop->dev,
24552-			      "failed to attach dma mapping, %d\n", ret);
24553-		goto err_disable_dclk;
24554-	}
24555+	if (regno >= lut_len || !vop->lut)
24556+		return;
24557 
24558-	spin_lock(&vop->reg_lock);
24559-	for (i = 0; i < vop->len; i += 4)
24560-		writel_relaxed(vop->regsbak[i / 4], vop->regs + i);
24561+	r = (vop->lut[regno] / lut_len / lut_len) & (lut_len - 1);
24562+	g = (vop->lut[regno] / lut_len) & (lut_len - 1);
24563+	b = vop->lut[regno] & (lut_len - 1);
24564+	*red = r * 0xffff / (lut_len - 1);
24565+	*green = g * 0xffff / (lut_len - 1);
24566+	*blue = b * 0xffff / (lut_len - 1);
24567+}
24568 
24569-	/*
24570-	 * We need to make sure that all windows are disabled before we
24571-	 * enable the crtc. Otherwise we might try to scan from a destroyed
24572-	 * buffer later.
24573-	 *
24574-	 * In the case of enable-after-PSR, we don't need to worry about this
24575-	 * case since the buffer is guaranteed to be valid and disabling the
24576-	 * window will result in screen glitches on PSR exit.
24577-	 */
24578-	if (!old_state || !old_state->self_refresh_active) {
24579-		for (i = 0; i < vop->data->win_size; i++) {
24580-			struct vop_win *vop_win = &vop->win[i];
24581+static int vop_crtc_legacy_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
24582+				     u16 *blue, uint32_t size,
24583+				     struct drm_modeset_acquire_ctx *ctx)
24584+{
24585+	struct vop *vop = to_vop(crtc);
24586+	int len = min(size, vop->lut_len);
24587+	int i;
24588 
24589-			vop_win_disable(vop, vop_win);
24590-		}
24591-	}
24592+	if (!vop->lut)
24593+		return -EINVAL;
24594 
24595-	if (vop->data->afbc) {
24596-		struct rockchip_crtc_state *s;
24597-		/*
24598-		 * Disable AFBC and forget there was a vop window with AFBC
24599-		 */
24600-		VOP_AFBC_SET(vop, enable, 0);
24601-		s = to_rockchip_crtc_state(crtc->state);
24602-		s->enable_afbc = false;
24603+	for (i = 0; i < len; i++)
24604+		rockchip_vop_crtc_fb_gamma_set(crtc, red[i], green[i], blue[i], i);
24605+
24606+	vop_crtc_load_lut(crtc);
24607+
24608+	return 0;
24609+}
24610+
24611+static int vop_crtc_atomic_gamma_set(struct drm_crtc *crtc,
24612+				     struct drm_crtc_state *old_state)
24613+{
24614+	struct vop *vop = to_vop(crtc);
24615+	struct drm_color_lut *lut = vop->gamma_lut;
24616+	unsigned int i;
24617+
24618+	for (i = 0; i < vop->lut_len; i++)
24619+		rockchip_vop_crtc_fb_gamma_set(crtc, lut[i].red, lut[i].green,
24620+					       lut[i].blue, i);
24621+	vop_crtc_load_lut(crtc);
24622+
24623+	return 0;
24624+}
24625+
24626+static void vop_power_enable(struct drm_crtc *crtc)
24627+{
24628+	struct vop *vop = to_vop(crtc);
24629+	int ret;
24630+
24631+	ret = clk_prepare_enable(vop->hclk);
24632+	if (ret < 0) {
24633+		dev_err(vop->dev, "failed to enable hclk - %d\n", ret);
24634+		return;
24635 	}
24636 
24637-	vop_cfg_done(vop);
24638+	ret = clk_prepare_enable(vop->dclk);
24639+	if (ret < 0) {
24640+		dev_err(vop->dev, "failed to enable dclk - %d\n", ret);
24641+		goto err_disable_hclk;
24642+	}
24643 
24644-	spin_unlock(&vop->reg_lock);
24645+	ret = clk_prepare_enable(vop->aclk);
24646+	if (ret < 0) {
24647+		dev_err(vop->dev, "failed to enable aclk - %d\n", ret);
24648+		goto err_disable_dclk;
24649+	}
24650 
24651-	/*
24652-	 * At here, vop clock & iommu is enable, R/W vop regs would be safe.
24653-	 */
24654-	vop->is_enabled = true;
24655+	ret = pm_runtime_get_sync(vop->dev);
24656+	if (ret < 0) {
24657+		dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
24658+		return;
24659+	}
24660 
24661-	spin_lock(&vop->reg_lock);
24662+	memcpy(vop->regsbak, vop->regs, vop->len);
24663 
24664-	VOP_REG_SET(vop, common, standby, 1);
24665+	if (VOP_CTRL_SUPPORT(vop, version)) {
24666+		uint32_t version = VOP_CTRL_GET(vop, version);
24667 
24668-	spin_unlock(&vop->reg_lock);
24669+		/*
24670+		 * Fixup rk3288w version.
24671+		 */
24672+		if (version && version == 0x0a05)
24673+			vop->version = VOP_VERSION(3, 1);
24674+	}
24675 
24676-	drm_crtc_vblank_on(crtc);
24677+	vop->is_enabled = true;
24678 
24679-	return 0;
24680+	return;
24681 
24682 err_disable_dclk:
24683-	clk_disable(vop->dclk);
24684-err_disable_core:
24685-	vop_core_clks_disable(vop);
24686-err_put_pm_runtime:
24687-	pm_runtime_put_sync(vop->dev);
24688-	return ret;
24689+	clk_disable_unprepare(vop->dclk);
24690+err_disable_hclk:
24691+	clk_disable_unprepare(vop->hclk);
24692 }
24693 
24694-static void rockchip_drm_set_win_enabled(struct drm_crtc *crtc, bool enabled)
24695+static void vop_initial(struct drm_crtc *crtc)
24696 {
24697-        struct vop *vop = to_vop(crtc);
24698-        int i;
24699+	struct vop *vop = to_vop(crtc);
24700+	int i;
24701 
24702-        spin_lock(&vop->reg_lock);
24703+	vop_power_enable(crtc);
24704 
24705-        for (i = 0; i < vop->data->win_size; i++) {
24706-                struct vop_win *vop_win = &vop->win[i];
24707-                const struct vop_win_data *win = vop_win->data;
24708+	VOP_CTRL_SET(vop, global_regdone_en, 1);
24709+	VOP_CTRL_SET(vop, dsp_blank, 0);
24710+	VOP_CTRL_SET(vop, axi_outstanding_max_num, 30);
24711+	VOP_CTRL_SET(vop, axi_max_outstanding_en, 1);
24712+	VOP_CTRL_SET(vop, dither_up_en, 1);
24713 
24714-                VOP_WIN_SET(vop, win, enable,
24715-                            enabled && (vop->win_enabled & BIT(i)));
24716-        }
24717-        vop_cfg_done(vop);
24718+	/*
24719+	 * We need to make sure that all windows are disabled before resume
24720+	 * the crtc. Otherwise we might try to scan from a destroyed
24721+	 * buffer later.
24722+	 */
24723+	for (i = 0; i < vop->num_wins; i++) {
24724+		struct vop_win *win = &vop->win[i];
24725+		int channel = i * 2 + 1;
24726 
24727-        spin_unlock(&vop->reg_lock);
24728+		VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel);
24729+	}
24730+	VOP_CTRL_SET(vop, afbdc_en, 0);
24731+	vop_enable_debug_irq(crtc);
24732 }
24733 
24734 static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
24735 				    struct drm_crtc_state *old_state)
24736 {
24737 	struct vop *vop = to_vop(crtc);
24738+	int sys_status = drm_crtc_index(crtc) ?
24739+				SYS_STATUS_LCDC1 : SYS_STATUS_LCDC0;
24740 
24741 	WARN_ON(vop->event);
24742 
24743-	if (crtc->state->self_refresh_active)
24744-		rockchip_drm_set_win_enabled(crtc, false);
24745-
24746-	mutex_lock(&vop->vop_lock);
24747-
24748+	vop_lock(vop);
24749+	VOP_CTRL_SET(vop, reg_done_frm, 1);
24750+	VOP_CTRL_SET(vop, dsp_interlace, 0);
24751 	drm_crtc_vblank_off(crtc);
24752-
24753-	if (crtc->state->self_refresh_active)
24754-		goto out;
24755+	VOP_CTRL_SET(vop, out_mode, ROCKCHIP_OUT_MODE_P888);
24756+	VOP_CTRL_SET(vop, afbdc_en, 0);
24757+	vop_disable_all_planes(vop);
24758 
24759 	/*
24760 	 * Vop standby will take effect at end of current frame,
24761@@ -721,27 +1604,32 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
24762 
24763 	spin_lock(&vop->reg_lock);
24764 
24765-	VOP_REG_SET(vop, common, standby, 1);
24766+	VOP_CTRL_SET(vop, standby, 1);
24767 
24768 	spin_unlock(&vop->reg_lock);
24769 
24770-	wait_for_completion(&vop->dsp_hold_completion);
24771+	WARN_ON(!wait_for_completion_timeout(&vop->dsp_hold_completion,
24772+					     msecs_to_jiffies(50)));
24773 
24774 	vop_dsp_hold_valid_irq_disable(vop);
24775 
24776 	vop->is_enabled = false;
24777+	if (vop->is_iommu_enabled) {
24778+		/*
24779+		 * vop standby complete, so iommu detach is safe.
24780+		 */
24781+		VOP_CTRL_SET(vop, dma_stop, 1);
24782+		rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
24783+		vop->is_iommu_enabled = false;
24784+	}
24785 
24786-	/*
24787-	 * vop standby complete, so iommu detach is safe.
24788-	 */
24789-	rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
24790-
24791-	clk_disable(vop->dclk);
24792-	vop_core_clks_disable(vop);
24793-	pm_runtime_put(vop->dev);
24794+	pm_runtime_put_sync(vop->dev);
24795+	clk_disable_unprepare(vop->dclk);
24796+	clk_disable_unprepare(vop->aclk);
24797+	clk_disable_unprepare(vop->hclk);
24798+	vop_unlock(vop);
24799 
24800-out:
24801-	mutex_unlock(&vop->vop_lock);
24802+	rockchip_clear_system_status(sys_status);
24803 
24804 	if (crtc->state->event && !crtc->state->active) {
24805 		spin_lock_irq(&crtc->dev->event_lock);
24806@@ -752,29 +1640,20 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
24807 	}
24808 }
24809 
24810-static void vop_plane_destroy(struct drm_plane *plane)
24811+static int vop_plane_prepare_fb(struct drm_plane *plane,
24812+				struct drm_plane_state *new_state)
24813 {
24814-	drm_plane_cleanup(plane);
24815-}
24816+	if (plane->state->fb)
24817+		drm_framebuffer_get(plane->state->fb);
24818 
24819-static inline bool rockchip_afbc(u64 modifier)
24820-{
24821-	return modifier == ROCKCHIP_AFBC_MOD;
24822+	return 0;
24823 }
24824 
24825-static bool rockchip_mod_supported(struct drm_plane *plane,
24826-				   u32 format, u64 modifier)
24827+static void vop_plane_cleanup_fb(struct drm_plane *plane,
24828+				 struct drm_plane_state *old_state)
24829 {
24830-	if (modifier == DRM_FORMAT_MOD_LINEAR)
24831-		return true;
24832-
24833-	if (!rockchip_afbc(modifier)) {
24834-		DRM_DEBUG_KMS("Unsupported format modifier 0x%llx\n", modifier);
24835-
24836-		return false;
24837-	}
24838-
24839-	return vop_convert_afbc_format(format) >= 0;
24840+	if (old_state->fb)
24841+		drm_framebuffer_put(old_state->fb);
24842 }
24843 
24844 static int vop_plane_atomic_check(struct drm_plane *plane,
24845@@ -783,21 +1662,43 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
24846 	struct drm_crtc *crtc = state->crtc;
24847 	struct drm_crtc_state *crtc_state;
24848 	struct drm_framebuffer *fb = state->fb;
24849-	struct vop_win *vop_win = to_vop_win(plane);
24850-	const struct vop_win_data *win = vop_win->data;
24851+	struct vop_win *win = to_vop_win(plane);
24852+	struct vop_plane_state *vop_plane_state = to_vop_plane_state(state);
24853+	const struct vop_data *vop_data;
24854+	struct vop *vop;
24855 	int ret;
24856+	struct drm_rect *dest = &vop_plane_state->dest;
24857+	struct drm_rect *src = &vop_plane_state->src;
24858+	struct drm_gem_object *obj, *uv_obj;
24859+	struct rockchip_gem_object *rk_obj, *rk_uv_obj;
24860 	int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
24861 					DRM_PLANE_HELPER_NO_SCALING;
24862 	int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
24863 					DRM_PLANE_HELPER_NO_SCALING;
24864+	unsigned long offset;
24865+	dma_addr_t dma_addr;
24866 
24867-	if (!crtc || WARN_ON(!fb))
24868+	crtc = crtc ? crtc : plane->state->crtc;
24869+	if (!crtc || !fb) {
24870+		plane->state->visible = false;
24871 		return 0;
24872+	}
24873 
24874 	crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
24875 	if (WARN_ON(!crtc_state))
24876 		return -EINVAL;
24877 
24878+	src->x1 = state->src_x;
24879+	src->y1 = state->src_y;
24880+	src->x2 = state->src_x + state->src_w;
24881+	src->y2 = state->src_y + state->src_h;
24882+	dest->x1 = state->crtc_x;
24883+	dest->y1 = state->crtc_y;
24884+	dest->x2 = state->crtc_x + state->crtc_w;
24885+	dest->y2 = state->crtc_y + state->crtc_h;
24886+	vop_plane_state->zpos = state->zpos;
24887+	vop_plane_state->blend_mode = state->pixel_blend_mode;
24888+
24889 	ret = drm_atomic_helper_check_plane_state(state, crtc_state,
24890 						  min_scale, max_scale,
24891 						  true, true);
24892@@ -807,13 +1708,34 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
24893 	if (!state->visible)
24894 		return 0;
24895 
24896-	ret = vop_convert_format(fb->format->format);
24897-	if (ret < 0)
24898-		return ret;
24899+	vop_plane_state->format = vop_convert_format(fb->format->format);
24900+	if (vop_plane_state->format < 0)
24901+		return vop_plane_state->format;
24902 
24903-	/*
24904-	 * Src.x1 can be odd when do clip, but yuv plane start point
24905-	 * need align with 2 pixel.
24906+	vop = to_vop(crtc);
24907+	vop_data = vop->data;
24908+
24909+	if (state->src_w >> 16 < 4 || state->src_h >> 16 < 4 ||
24910+	    state->crtc_w < 4 || state->crtc_h < 4) {
24911+		DRM_ERROR("Invalid size: %dx%d->%dx%d, min size is 4x4\n",
24912+			  state->src_w >> 16, state->src_h >> 16,
24913+			  state->crtc_w, state->crtc_h);
24914+		return -EINVAL;
24915+	}
24916+
24917+	if (drm_rect_width(src) >> 16 > vop_data->max_input.width ||
24918+	    drm_rect_height(src) >> 16 > vop_data->max_input.height) {
24919+		DRM_ERROR("Invalid source: %dx%d. max input: %dx%d\n",
24920+			  drm_rect_width(src) >> 16,
24921+			  drm_rect_height(src) >> 16,
24922+			  vop_data->max_input.width,
24923+			  vop_data->max_input.height);
24924+		return -EINVAL;
24925+	}
24926+
24927+	/*
24928+	 * Src.x1 can be odd when do clip, but yuv plane start point
24929+	 * need align with 2 pixel.
24930 	 */
24931 	if (fb->format->is_yuv && ((state->src.x1 >> 16) % 2)) {
24932 		DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");
24933@@ -825,28 +1747,28 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
24934 		return -EINVAL;
24935 	}
24936 
24937-	if (rockchip_afbc(fb->modifier)) {
24938-		struct vop *vop = to_vop(crtc);
24939+	offset = (src->x1 >> 16) * fb->format->cpp[0];
24940+	vop_plane_state->offset = offset + fb->offsets[0];
24941+	if (state->rotation & DRM_MODE_REFLECT_Y)
24942+		offset += ((src->y2 >> 16) - 1) * fb->pitches[0];
24943+	else
24944+		offset += (src->y1 >> 16) * fb->pitches[0];
24945 
24946-		if (!vop->data->afbc) {
24947-			DRM_ERROR("vop does not support AFBC\n");
24948-			return -EINVAL;
24949-		}
24950+	obj = fb->obj[0];
24951+	rk_obj = to_rockchip_obj(obj);
24952+	vop_plane_state->yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0];
24953+	if (fb->format->is_yuv) {
24954+		int hsub = fb->format->hsub;
24955+		int vsub = fb->format->vsub;
24956 
24957-		ret = vop_convert_afbc_format(fb->format->format);
24958-		if (ret < 0)
24959-			return ret;
24960+		offset = (src->x1 >> 16) * fb->format->cpp[1] / hsub;
24961+		offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
24962 
24963-		if (state->src.x1 || state->src.y1) {
24964-			DRM_ERROR("AFBC does not support offset display, xpos=%d, ypos=%d, offset=%d\n", state->src.x1, state->src.y1, fb->offsets[0]);
24965-			return -EINVAL;
24966-		}
24967+		uv_obj = fb->obj[1];
24968+		rk_uv_obj = to_rockchip_obj(uv_obj);
24969 
24970-		if (state->rotation && state->rotation != DRM_MODE_ROTATE_0) {
24971-			DRM_ERROR("No rotation support in AFBC, rotation=%d\n",
24972-				  state->rotation);
24973-			return -EINVAL;
24974-		}
24975+		dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
24976+		vop_plane_state->uv_mst = dma_addr;
24977 	}
24978 
24979 	return 0;
24980@@ -855,15 +1777,33 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
24981 static void vop_plane_atomic_disable(struct drm_plane *plane,
24982 				     struct drm_plane_state *old_state)
24983 {
24984-	struct vop_win *vop_win = to_vop_win(plane);
24985+	struct vop_win *win = to_vop_win(plane);
24986 	struct vop *vop = to_vop(old_state->crtc);
24987+#if defined(CONFIG_ROCKCHIP_DRM_DEBUG)
24988+	struct vop_plane_state *vop_plane_state =
24989+					to_vop_plane_state(plane->state);
24990+#endif
24991 
24992 	if (!old_state->crtc)
24993 		return;
24994 
24995 	spin_lock(&vop->reg_lock);
24996 
24997-	vop_win_disable(vop, vop_win);
24998+	vop_win_disable(vop, win);
24999+
25000+	/*
25001+	 * IC design bug: in the bandwidth tension environment when close win2,
25002+	 * vop will access the freed memory lead to iommu pagefault.
25003+	 * so we add this reset to workaround.
25004+	 */
25005+	if (VOP_MAJOR(vop->version) == 2 && VOP_MINOR(vop->version) == 5 &&
25006+	    win->win_id == 2)
25007+		VOP_WIN_SET(vop, win, yrgb_mst, 0);
25008+
25009+#if defined(CONFIG_ROCKCHIP_DRM_DEBUG)
25010+	kfree(vop_plane_state->planlist);
25011+	vop_plane_state->planlist = NULL;
25012+#endif
25013 
25014 	spin_unlock(&vop->reg_lock);
25015 }
25016@@ -873,26 +1813,46 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
25017 {
25018 	struct drm_plane_state *state = plane->state;
25019 	struct drm_crtc *crtc = state->crtc;
25020-	struct vop_win *vop_win = to_vop_win(plane);
25021-	const struct vop_win_data *win = vop_win->data;
25022-	const struct vop_win_yuv2yuv_data *win_yuv2yuv = vop_win->yuv2yuv_data;
25023+	struct drm_display_mode *mode = NULL;
25024+	struct vop_win *win = to_vop_win(plane);
25025+	struct vop_plane_state *vop_plane_state = to_vop_plane_state(state);
25026+	struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
25027+	struct rockchip_crtc_state *s;
25028 	struct vop *vop = to_vop(state->crtc);
25029 	struct drm_framebuffer *fb = state->fb;
25030-	unsigned int actual_w, actual_h;
25031+	unsigned int actual_w, actual_h, dsp_w, dsp_h;
25032 	unsigned int dsp_stx, dsp_sty;
25033 	uint32_t act_info, dsp_info, dsp_st;
25034-	struct drm_rect *src = &state->src;
25035-	struct drm_rect *dest = &state->dst;
25036-	struct drm_gem_object *obj, *uv_obj;
25037-	struct rockchip_gem_object *rk_obj, *rk_uv_obj;
25038-	unsigned long offset;
25039-	dma_addr_t dma_addr;
25040+	struct drm_rect *src = &vop_plane_state->src;
25041+	struct drm_rect *dest = &vop_plane_state->dest;
25042+	const uint32_t *y2r_table = vop_plane_state->y2r_table;
25043+	const uint32_t *r2r_table = vop_plane_state->r2r_table;
25044+	const uint32_t *r2y_table = vop_plane_state->r2y_table;
25045 	uint32_t val;
25046-	bool rb_swap;
25047-	int win_index = VOP_WIN_TO_INDEX(vop_win);
25048-	int format;
25049+	bool rb_swap, global_alpha_en;
25050 	int is_yuv = fb->format->is_yuv;
25051-	int i;
25052+
25053+#if defined(CONFIG_ROCKCHIP_DRM_DEBUG)
25054+	bool AFBC_flag = false;
25055+	struct vop_dump_list *planlist;
25056+	unsigned long num_pages;
25057+	struct page **pages;
25058+	struct drm_gem_object *obj;
25059+	struct rockchip_gem_object *rk_obj;
25060+
25061+	num_pages = 0;
25062+	pages = NULL;
25063+	obj = fb->obj[0];
25064+	rk_obj = to_rockchip_obj(obj);
25065+	if (rk_obj) {
25066+		num_pages = rk_obj->num_pages;
25067+		pages = rk_obj->pages;
25068+	}
25069+	if (fb->modifier == DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16))
25070+		AFBC_flag = true;
25071+	else
25072+		AFBC_flag = false;
25073+#endif
25074 
25075 	/*
25076 	 * can't update plane when vop is disabled.
25077@@ -908,206 +1868,426 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
25078 		return;
25079 	}
25080 
25081-	obj = fb->obj[0];
25082-	rk_obj = to_rockchip_obj(obj);
25083-
25084+	mode = &crtc->state->adjusted_mode;
25085 	actual_w = drm_rect_width(src) >> 16;
25086 	actual_h = drm_rect_height(src) >> 16;
25087-	act_info = (actual_h - 1) << 16 | ((actual_w - 1) & 0xffff);
25088-
25089-	dsp_info = (drm_rect_height(dest) - 1) << 16;
25090-	dsp_info |= (drm_rect_width(dest) - 1) & 0xffff;
25091 
25092-	dsp_stx = dest->x1 + crtc->mode.htotal - crtc->mode.hsync_start;
25093-	dsp_sty = dest->y1 + crtc->mode.vtotal - crtc->mode.vsync_start;
25094-	dsp_st = dsp_sty << 16 | (dsp_stx & 0xffff);
25095+	dsp_w = drm_rect_width(dest);
25096+	if (dest->x1 + dsp_w > adjusted_mode->hdisplay) {
25097+		DRM_ERROR("%s win%d dest->x1[%d] + dsp_w[%d] exceed mode hdisplay[%d]\n",
25098+			  crtc->name, win->win_id, dest->x1, dsp_w, adjusted_mode->hdisplay);
25099+		dsp_w = adjusted_mode->hdisplay - dest->x1;
25100+		if (dsp_w < 4)
25101+			dsp_w = 4;
25102+		actual_w = dsp_w * actual_w / drm_rect_width(dest);
25103+	}
25104+	dsp_h = drm_rect_height(dest);
25105+	if (dest->y1 + dsp_h > adjusted_mode->vdisplay) {
25106+		DRM_ERROR("%s win%d dest->y1[%d] + dsp_h[%d] exceed mode vdisplay[%d]\n",
25107+			  crtc->name, win->win_id, dest->y1, dsp_h, adjusted_mode->vdisplay);
25108+		dsp_h = adjusted_mode->vdisplay - dest->y1;
25109+		if (dsp_h < 4)
25110+			dsp_h = 4;
25111+		actual_h = dsp_h * actual_h / drm_rect_height(dest);
25112+	}
25113 
25114-	offset = (src->x1 >> 16) * fb->format->cpp[0];
25115-	offset += (src->y1 >> 16) * fb->pitches[0];
25116-	dma_addr = rk_obj->dma_addr + offset + fb->offsets[0];
25117+	act_info = (actual_h - 1) << 16 | ((actual_w - 1) & 0xffff);
25118 
25119-	/*
25120-	 * For y-mirroring we need to move address
25121-	 * to the beginning of the last line.
25122-	 */
25123-	if (state->rotation & DRM_MODE_REFLECT_Y)
25124-		dma_addr += (actual_h - 1) * fb->pitches[0];
25125+	dsp_info = (dsp_h - 1) << 16;
25126+	dsp_info |= (dsp_w - 1) & 0xffff;
25127 
25128-	format = vop_convert_format(fb->format->format);
25129+	dsp_stx = dest->x1 + mode->crtc_htotal - mode->crtc_hsync_start;
25130+	dsp_sty = dest->y1 + mode->crtc_vtotal - mode->crtc_vsync_start;
25131+	dsp_st = dsp_sty << 16 | (dsp_stx & 0xffff);
25132 
25133+	s = to_rockchip_crtc_state(crtc->state);
25134 	spin_lock(&vop->reg_lock);
25135 
25136-	if (rockchip_afbc(fb->modifier)) {
25137-		int afbc_format = vop_convert_afbc_format(fb->format->format);
25138-
25139-		VOP_AFBC_SET(vop, format, afbc_format | AFBC_TILE_16x16);
25140-		VOP_AFBC_SET(vop, hreg_block_split, 0);
25141-		VOP_AFBC_SET(vop, win_sel, VOP_WIN_TO_INDEX(vop_win));
25142-		VOP_AFBC_SET(vop, hdr_ptr, dma_addr);
25143-		VOP_AFBC_SET(vop, pic_size, act_info);
25144-	}
25145-
25146-	VOP_WIN_SET(vop, win, format, format);
25147+	VOP_WIN_SET(vop, win, format, vop_plane_state->format);
25148 	VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
25149-	VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
25150-	VOP_WIN_YUV2YUV_SET(vop, win_yuv2yuv, y2r_en, is_yuv);
25151-	VOP_WIN_SET(vop, win, y_mir_en,
25152+	VOP_WIN_SET(vop, win, yrgb_mst, vop_plane_state->yrgb_mst);
25153+
25154+	VOP_WIN_SET(vop, win, ymirror,
25155 		    (state->rotation & DRM_MODE_REFLECT_Y) ? 1 : 0);
25156-	VOP_WIN_SET(vop, win, x_mir_en,
25157+	VOP_WIN_SET(vop, win, xmirror,
25158 		    (state->rotation & DRM_MODE_REFLECT_X) ? 1 : 0);
25159 
25160 	if (is_yuv) {
25161-		int hsub = fb->format->hsub;
25162-		int vsub = fb->format->vsub;
25163-		int bpp = fb->format->cpp[1];
25164-
25165-		uv_obj = fb->obj[1];
25166-		rk_uv_obj = to_rockchip_obj(uv_obj);
25167-
25168-		offset = (src->x1 >> 16) * bpp / hsub;
25169-		offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
25170-
25171-		dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
25172 		VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4));
25173-		VOP_WIN_SET(vop, win, uv_mst, dma_addr);
25174-
25175-		for (i = 0; i < NUM_YUV2YUV_COEFFICIENTS; i++) {
25176-			VOP_WIN_YUV2YUV_COEFFICIENT_SET(vop,
25177-							win_yuv2yuv,
25178-							y2r_coefficients[i],
25179-							bt601_yuv2rgb[i]);
25180-		}
25181+		VOP_WIN_SET(vop, win, uv_mst, vop_plane_state->uv_mst);
25182 	}
25183+	VOP_WIN_SET(vop, win, fmt_10, is_yuv_10bit(fb->format->format));
25184+	VOP_WIN_SET(vop, win, fmt_yuyv, is_yuyv_format(fb->format->format));
25185 
25186 	if (win->phy->scl)
25187 		scl_vop_cal_scl_fac(vop, win, actual_w, actual_h,
25188 				    drm_rect_width(dest), drm_rect_height(dest),
25189-				    fb->format);
25190+				    fb->format->format);
25191 
25192 	VOP_WIN_SET(vop, win, act_info, act_info);
25193 	VOP_WIN_SET(vop, win, dsp_info, dsp_info);
25194 	VOP_WIN_SET(vop, win, dsp_st, dsp_st);
25195 
25196 	rb_swap = has_rb_swapped(fb->format->format);
25197-	VOP_WIN_SET(vop, win, rb_swap, rb_swap);
25198-
25199 	/*
25200-	 * Blending win0 with the background color doesn't seem to work
25201-	 * correctly. We only get the background color, no matter the contents
25202-	 * of the win0 framebuffer.  However, blending pre-multiplied color
25203-	 * with the default opaque black default background color is a no-op,
25204-	 * so we can just disable blending to get the correct result.
25205+	 * VOP full need to do rb swap to show rgb888/bgr888 format color correctly
25206 	 */
25207-	if (fb->format->has_alpha && win_index > 0) {
25208+	if ((fb->format->format == DRM_FORMAT_RGB888 || fb->format->format == DRM_FORMAT_BGR888) &&
25209+	    VOP_MAJOR(vop->version) == 3)
25210+		rb_swap = !rb_swap;
25211+	VOP_WIN_SET(vop, win, rb_swap, rb_swap);
25212+
25213+	global_alpha_en = (vop_plane_state->global_alpha == 0xff) ? 0 : 1;
25214+	if ((is_alpha_support(fb->format->format) || global_alpha_en) &&
25215+	    (s->dsp_layer_sel & 0x3) != win->win_id) {
25216+		int src_blend_m0;
25217+
25218+		if (is_alpha_support(fb->format->format) && global_alpha_en)
25219+			src_blend_m0 = ALPHA_PER_PIX_GLOBAL;
25220+		else if (is_alpha_support(fb->format->format))
25221+			src_blend_m0 = ALPHA_PER_PIX;
25222+		else
25223+			src_blend_m0 = ALPHA_GLOBAL;
25224+
25225 		VOP_WIN_SET(vop, win, dst_alpha_ctl,
25226 			    DST_FACTOR_M0(ALPHA_SRC_INVERSE));
25227 		val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) |
25228 			SRC_ALPHA_M0(ALPHA_STRAIGHT) |
25229-			SRC_BLEND_M0(ALPHA_PER_PIX) |
25230-			SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION) |
25231-			SRC_FACTOR_M0(ALPHA_ONE);
25232+			SRC_BLEND_M0(src_blend_m0) |
25233+			SRC_ALPHA_CAL_M0(ALPHA_SATURATION) |
25234+			SRC_FACTOR_M0(global_alpha_en ?
25235+				      ALPHA_SRC_GLOBAL : ALPHA_ONE);
25236 		VOP_WIN_SET(vop, win, src_alpha_ctl, val);
25237-
25238-		VOP_WIN_SET(vop, win, alpha_pre_mul, ALPHA_SRC_PRE_MUL);
25239-		VOP_WIN_SET(vop, win, alpha_mode, ALPHA_PER_PIX);
25240+		VOP_WIN_SET(vop, win, alpha_pre_mul,
25241+			    vop_plane_state->blend_mode == DRM_MODE_BLEND_PREMULTI ? 1 : 0);
25242+		VOP_WIN_SET(vop, win, alpha_mode, 1);
25243 		VOP_WIN_SET(vop, win, alpha_en, 1);
25244 	} else {
25245 		VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0));
25246 		VOP_WIN_SET(vop, win, alpha_en, 0);
25247 	}
25248-
25249+	VOP_WIN_SET(vop, win, global_alpha_val, vop_plane_state->global_alpha);
25250+
25251+	VOP_WIN_SET(vop, win, csc_mode, vop_plane_state->csc_mode);
25252+	if (win->csc) {
25253+		vop_load_csc_table(vop, win->csc->y2r_offset, y2r_table);
25254+		vop_load_csc_table(vop, win->csc->r2r_offset, r2r_table);
25255+		vop_load_csc_table(vop, win->csc->r2y_offset, r2y_table);
25256+		VOP_WIN_SET_EXT(vop, win, csc, y2r_en, vop_plane_state->y2r_en);
25257+		VOP_WIN_SET_EXT(vop, win, csc, r2r_en, vop_plane_state->r2r_en);
25258+		VOP_WIN_SET_EXT(vop, win, csc, r2y_en, vop_plane_state->r2y_en);
25259+		VOP_WIN_SET_EXT(vop, win, csc, csc_mode, vop_plane_state->csc_mode);
25260+	}
25261 	VOP_WIN_SET(vop, win, enable, 1);
25262-	vop->win_enabled |= BIT(win_index);
25263+	VOP_WIN_SET(vop, win, gate, 1);
25264 	spin_unlock(&vop->reg_lock);
25265+	/*
25266+	 * spi interface(vop_plane_state->yrgb_kvaddr, fb->pixel_format,
25267+	 * actual_w, actual_h)
25268+	 */
25269+	vop->is_iommu_needed = true;
25270+#if defined(CONFIG_ROCKCHIP_DRM_DEBUG)
25271+	kfree(vop_plane_state->planlist);
25272+	vop_plane_state->planlist = NULL;
25273+
25274+	planlist = kmalloc(sizeof(*planlist), GFP_KERNEL);
25275+	if (planlist) {
25276+		planlist->dump_info.AFBC_flag = AFBC_flag;
25277+		planlist->dump_info.area_id = win->area_id;
25278+		planlist->dump_info.win_id = win->win_id;
25279+		planlist->dump_info.yuv_format =
25280+			is_yuv_support(fb->format->format);
25281+		planlist->dump_info.num_pages = num_pages;
25282+		planlist->dump_info.pages = pages;
25283+		planlist->dump_info.offset = vop_plane_state->offset;
25284+		planlist->dump_info.pitches = fb->pitches[0];
25285+		planlist->dump_info.height = actual_h;
25286+		planlist->dump_info.format = fb->format;
25287+		list_add_tail(&planlist->entry, &vop->rockchip_crtc.vop_dump_list_head);
25288+		vop_plane_state->planlist = planlist;
25289+	} else {
25290+		DRM_ERROR("can't alloc a node of planlist %p\n", planlist);
25291+		return;
25292+	}
25293+	if (vop->rockchip_crtc.vop_dump_status == DUMP_KEEP ||
25294+	    vop->rockchip_crtc.vop_dump_times > 0) {
25295+		rockchip_drm_dump_plane_buffer(&planlist->dump_info, vop->rockchip_crtc.frame_count);
25296+		vop->rockchip_crtc.vop_dump_times--;
25297+	}
25298+#endif
25299 }
25300 
25301-static int vop_plane_atomic_async_check(struct drm_plane *plane,
25302-					struct drm_plane_state *state)
25303+static const struct drm_plane_helper_funcs plane_helper_funcs = {
25304+	.prepare_fb = vop_plane_prepare_fb,
25305+	.cleanup_fb = vop_plane_cleanup_fb,
25306+	.atomic_check = vop_plane_atomic_check,
25307+	.atomic_update = vop_plane_atomic_update,
25308+	.atomic_disable = vop_plane_atomic_disable,
25309+};
25310+
25311+/**
25312+ * rockchip_atomic_helper_update_plane copy from drm_atomic_helper_update_plane
25313+ * be designed to support async commit at ioctl DRM_IOCTL_MODE_SETPLANE.
25314+ * @plane: plane object to update
25315+ * @crtc: owning CRTC of owning plane
25316+ * @fb: framebuffer to flip onto plane
25317+ * @crtc_x: x offset of primary plane on crtc
25318+ * @crtc_y: y offset of primary plane on crtc
25319+ * @crtc_w: width of primary plane rectangle on crtc
25320+ * @crtc_h: height of primary plane rectangle on crtc
25321+ * @src_x: x offset of @fb for panning
25322+ * @src_y: y offset of @fb for panning
25323+ * @src_w: width of source rectangle in @fb
25324+ * @src_h: height of source rectangle in @fb
25325+ * @ctx: lock acquire context
25326+ *
25327+ * Provides a default plane update handler using the atomic driver interface.
25328+ *
25329+ * RETURNS:
25330+ * Zero on success, error code on failure
25331+ */
25332+static int __maybe_unused
25333+rockchip_atomic_helper_update_plane(struct drm_plane *plane,
25334+				    struct drm_crtc *crtc,
25335+				    struct drm_framebuffer *fb,
25336+				    int crtc_x, int crtc_y,
25337+				    unsigned int crtc_w, unsigned int crtc_h,
25338+				    uint32_t src_x, uint32_t src_y,
25339+				    uint32_t src_w, uint32_t src_h,
25340+				    struct drm_modeset_acquire_ctx *ctx)
25341 {
25342-	struct vop_win *vop_win = to_vop_win(plane);
25343-	const struct vop_win_data *win = vop_win->data;
25344-	int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
25345-					DRM_PLANE_HELPER_NO_SCALING;
25346-	int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
25347-					DRM_PLANE_HELPER_NO_SCALING;
25348-	struct drm_crtc_state *crtc_state;
25349+	struct drm_atomic_state *state;
25350+	struct drm_plane_state *plane_state;
25351+	struct vop_plane_state *vop_plane_state;
25352+	int ret = 0;
25353 
25354-	if (plane != state->crtc->cursor)
25355-		return -EINVAL;
25356+	state = drm_atomic_state_alloc(plane->dev);
25357+	if (!state)
25358+		return -ENOMEM;
25359 
25360-	if (!plane->state)
25361-		return -EINVAL;
25362+	state->acquire_ctx = ctx;
25363+	plane_state = drm_atomic_get_plane_state(state, plane);
25364+	if (IS_ERR(plane_state)) {
25365+		ret = PTR_ERR(plane_state);
25366+		goto fail;
25367+	}
25368 
25369-	if (!plane->state->fb)
25370-		return -EINVAL;
25371+	vop_plane_state = to_vop_plane_state(plane_state);
25372+
25373+	ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
25374+	if (ret != 0)
25375+		goto fail;
25376+	drm_atomic_set_fb_for_plane(plane_state, fb);
25377+	plane_state->crtc_x = crtc_x;
25378+	plane_state->crtc_y = crtc_y;
25379+	plane_state->crtc_w = crtc_w;
25380+	plane_state->crtc_h = crtc_h;
25381+	plane_state->src_x = src_x;
25382+	plane_state->src_y = src_y;
25383+	plane_state->src_w = src_w;
25384+	plane_state->src_h = src_h;
25385+
25386+	if (plane == crtc->cursor || vop_plane_state->async_commit)
25387+		state->legacy_cursor_update = true;
25388+
25389+	ret = drm_atomic_commit(state);
25390+fail:
25391+	drm_atomic_state_put(state);
25392+	return ret;
25393+}
25394+
25395+/**
25396+ * drm_atomic_helper_disable_plane copy from drm_atomic_helper_disable_plane
25397+ * be designed to support async commit at ioctl DRM_IOCTL_MODE_SETPLANE.
25398+ *
25399+ * @plane: plane to disable
25400+ * @ctx: lock acquire context
25401+ *
25402+ * Provides a default plane disable handler using the atomic driver interface.
25403+ *
25404+ * RETURNS:
25405+ * Zero on success, error code on failure
25406+ */
25407+static int __maybe_unused
25408+rockchip_atomic_helper_disable_plane(struct drm_plane *plane,
25409+				     struct drm_modeset_acquire_ctx *ctx)
25410+{
25411+	struct drm_atomic_state *state;
25412+	struct drm_plane_state *plane_state;
25413+	struct vop_plane_state *vop_plane_state;
25414+	int ret = 0;
25415+
25416+	state = drm_atomic_state_alloc(plane->dev);
25417+	if (!state)
25418+		return -ENOMEM;
25419+
25420+	state->acquire_ctx = ctx;
25421+	plane_state = drm_atomic_get_plane_state(state, plane);
25422+	if (IS_ERR(plane_state)) {
25423+		ret = PTR_ERR(plane_state);
25424+		goto fail;
25425+	}
25426+	vop_plane_state = to_vop_plane_state(plane_state);
25427+
25428+	if ((plane_state->crtc && plane_state->crtc->cursor == plane) ||
25429+	    vop_plane_state->async_commit)
25430+		plane_state->state->legacy_cursor_update = true;
25431 
25432-	if (state->state)
25433-		crtc_state = drm_atomic_get_existing_crtc_state(state->state,
25434-								state->crtc);
25435-	else /* Special case for asynchronous cursor updates. */
25436-		crtc_state = plane->crtc->state;
25437+	ret = __drm_atomic_helper_disable_plane(plane, plane_state);
25438+	if (ret != 0)
25439+		goto fail;
25440 
25441-	return drm_atomic_helper_check_plane_state(plane->state, crtc_state,
25442-						   min_scale, max_scale,
25443-						   true, true);
25444+	ret = drm_atomic_commit(state);
25445+fail:
25446+	drm_atomic_state_put(state);
25447+	return ret;
25448 }
25449 
25450-static void vop_plane_atomic_async_update(struct drm_plane *plane,
25451-					  struct drm_plane_state *new_state)
25452+static void vop_plane_destroy(struct drm_plane *plane)
25453 {
25454-	struct vop *vop = to_vop(plane->state->crtc);
25455-	struct drm_framebuffer *old_fb = plane->state->fb;
25456+	drm_plane_cleanup(plane);
25457+}
25458 
25459-	plane->state->crtc_x = new_state->crtc_x;
25460-	plane->state->crtc_y = new_state->crtc_y;
25461-	plane->state->crtc_h = new_state->crtc_h;
25462-	plane->state->crtc_w = new_state->crtc_w;
25463-	plane->state->src_x = new_state->src_x;
25464-	plane->state->src_y = new_state->src_y;
25465-	plane->state->src_h = new_state->src_h;
25466-	plane->state->src_w = new_state->src_w;
25467-	swap(plane->state->fb, new_state->fb);
25468+static void vop_atomic_plane_reset(struct drm_plane *plane)
25469+{
25470+	struct vop_plane_state *vop_plane_state =
25471+					to_vop_plane_state(plane->state);
25472+	struct vop_win *win = to_vop_win(plane);
25473+
25474+	if (plane->state && plane->state->fb)
25475+		__drm_atomic_helper_plane_destroy_state(plane->state);
25476+	kfree(vop_plane_state);
25477+	vop_plane_state = kzalloc(sizeof(*vop_plane_state), GFP_KERNEL);
25478+	if (!vop_plane_state)
25479+		return;
25480 
25481-	if (vop->is_enabled) {
25482-		vop_plane_atomic_update(plane, plane->state);
25483-		spin_lock(&vop->reg_lock);
25484-		vop_cfg_done(vop);
25485-		spin_unlock(&vop->reg_lock);
25486+	__drm_atomic_helper_plane_reset(plane, &vop_plane_state->base);
25487+	win->state.zpos = win->zpos;
25488+	vop_plane_state->global_alpha = 0xff;
25489+}
25490 
25491-		/*
25492-		 * A scanout can still be occurring, so we can't drop the
25493-		 * reference to the old framebuffer. To solve this we get a
25494-		 * reference to old_fb and set a worker to release it later.
25495-		 * FIXME: if we perform 500 async_update calls before the
25496-		 * vblank, then we can have 500 different framebuffers waiting
25497-		 * to be released.
25498-		 */
25499-		if (old_fb && plane->state->fb != old_fb) {
25500-			drm_framebuffer_get(old_fb);
25501-			WARN_ON(drm_crtc_vblank_get(plane->state->crtc) != 0);
25502-			drm_flip_work_queue(&vop->fb_unref_work, old_fb);
25503-			set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
25504-		}
25505+static struct drm_plane_state *
25506+vop_atomic_plane_duplicate_state(struct drm_plane *plane)
25507+{
25508+	struct vop_plane_state *old_vop_plane_state;
25509+	struct vop_plane_state *vop_plane_state;
25510+
25511+	if (WARN_ON(!plane->state))
25512+		return NULL;
25513+
25514+	old_vop_plane_state = to_vop_plane_state(plane->state);
25515+	vop_plane_state = kmemdup(old_vop_plane_state,
25516+				  sizeof(*vop_plane_state), GFP_KERNEL);
25517+	if (!vop_plane_state)
25518+		return NULL;
25519+
25520+	__drm_atomic_helper_plane_duplicate_state(plane,
25521+						  &vop_plane_state->base);
25522+
25523+	return &vop_plane_state->base;
25524+}
25525+
25526+static void vop_atomic_plane_destroy_state(struct drm_plane *plane,
25527+					   struct drm_plane_state *state)
25528+{
25529+	struct vop_plane_state *vop_state = to_vop_plane_state(state);
25530+
25531+	__drm_atomic_helper_plane_destroy_state(state);
25532+
25533+	kfree(vop_state);
25534+}
25535+
25536+static int vop_atomic_plane_set_property(struct drm_plane *plane,
25537+					 struct drm_plane_state *state,
25538+					 struct drm_property *property,
25539+					 uint64_t val)
25540+{
25541+	struct rockchip_drm_private *private = plane->dev->dev_private;
25542+	struct vop_win *win = to_vop_win(plane);
25543+	struct vop_plane_state *plane_state = to_vop_plane_state(state);
25544+
25545+	if (property == private->eotf_prop) {
25546+		plane_state->eotf = val;
25547+		return 0;
25548+	}
25549+
25550+	if (property == private->color_space_prop) {
25551+		plane_state->color_space = val;
25552+		return 0;
25553+	}
25554+
25555+	if (property == private->async_commit_prop) {
25556+		plane_state->async_commit = val;
25557+		return 0;
25558+	}
25559+
25560+	if (property == win->color_key_prop) {
25561+		plane_state->color_key = val;
25562+		return 0;
25563 	}
25564+
25565+	DRM_ERROR("failed to set vop plane property id:%d, name:%s\n",
25566+		   property->base.id, property->name);
25567+
25568+	return -EINVAL;
25569 }
25570 
25571-static const struct drm_plane_helper_funcs plane_helper_funcs = {
25572-	.atomic_check = vop_plane_atomic_check,
25573-	.atomic_update = vop_plane_atomic_update,
25574-	.atomic_disable = vop_plane_atomic_disable,
25575-	.atomic_async_check = vop_plane_atomic_async_check,
25576-	.atomic_async_update = vop_plane_atomic_async_update,
25577-	.prepare_fb = drm_gem_fb_prepare_fb,
25578-};
25579+static int vop_atomic_plane_get_property(struct drm_plane *plane,
25580+					 const struct drm_plane_state *state,
25581+					 struct drm_property *property,
25582+					 uint64_t *val)
25583+{
25584+	struct vop_plane_state *plane_state = to_vop_plane_state(state);
25585+	struct vop_win *win = to_vop_win(plane);
25586+	struct rockchip_drm_private *private = plane->dev->dev_private;
25587+
25588+	if (property == private->eotf_prop) {
25589+		*val = plane_state->eotf;
25590+		return 0;
25591+	}
25592+
25593+	if (property == private->color_space_prop) {
25594+		*val = plane_state->color_space;
25595+		return 0;
25596+	}
25597+
25598+	if (property == private->async_commit_prop) {
25599+		*val = plane_state->async_commit;
25600+		return 0;
25601+	}
25602+
25603+	if (property == private->share_id_prop) {
25604+		int i;
25605+		struct drm_mode_object *obj = &plane->base;
25606+
25607+		for (i = 0; i < obj->properties->count; i++) {
25608+			if (obj->properties->properties[i] == property) {
25609+				*val = obj->properties->values[i];
25610+				return 0;
25611+			}
25612+		}
25613+	}
25614+
25615+	if (property == win->color_key_prop) {
25616+		*val = plane_state->color_key;
25617+		return 0;
25618+	}
25619+
25620+	DRM_ERROR("failed to get vop plane property id:%d, name:%s\n",
25621+		   property->base.id, property->name);
25622+
25623+	return -EINVAL;
25624+}
25625 
25626 static const struct drm_plane_funcs vop_plane_funcs = {
25627-	.update_plane	= drm_atomic_helper_update_plane,
25628-	.disable_plane	= drm_atomic_helper_disable_plane,
25629+	.update_plane	= rockchip_atomic_helper_update_plane,
25630+	.disable_plane	= rockchip_atomic_helper_disable_plane,
25631 	.destroy = vop_plane_destroy,
25632-	.reset = drm_atomic_helper_plane_reset,
25633-	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
25634-	.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
25635-	.format_mod_supported = rockchip_mod_supported,
25636+	.reset = vop_atomic_plane_reset,
25637+	.atomic_duplicate_state = vop_atomic_plane_duplicate_state,
25638+	.atomic_destroy_state = vop_atomic_plane_destroy_state,
25639+	.atomic_set_property = vop_atomic_plane_set_property,
25640+	.atomic_get_property = vop_atomic_plane_get_property,
25641 };
25642 
25643 static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
25644@@ -1118,278 +2298,1434 @@ static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
25645 	if (WARN_ON(!vop->is_enabled))
25646 		return -EPERM;
25647 
25648-	spin_lock_irqsave(&vop->irq_lock, flags);
25649+	spin_lock_irqsave(&vop->irq_lock, flags);
25650+
25651+	if (VOP_MAJOR(vop->version) == 3 && VOP_MINOR(vop->version) >= 7) {
25652+		VOP_INTR_SET_TYPE(vop, clear, FS_FIELD_INTR, 1);
25653+		VOP_INTR_SET_TYPE(vop, enable, FS_FIELD_INTR, 1);
25654+	} else {
25655+		VOP_INTR_SET_TYPE(vop, clear, FS_INTR, 1);
25656+		VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 1);
25657+	}
25658+
25659+	spin_unlock_irqrestore(&vop->irq_lock, flags);
25660+
25661+	return 0;
25662+}
25663+
25664+static void vop_crtc_disable_vblank(struct drm_crtc *crtc)
25665+{
25666+	struct vop *vop = to_vop(crtc);
25667+	unsigned long flags;
25668+
25669+	if (WARN_ON(!vop->is_enabled))
25670+		return;
25671+
25672+	spin_lock_irqsave(&vop->irq_lock, flags);
25673+
25674+	if (VOP_MAJOR(vop->version) == 3 && VOP_MINOR(vop->version) >= 7)
25675+		VOP_INTR_SET_TYPE(vop, enable, FS_FIELD_INTR, 0);
25676+	else
25677+		VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 0);
25678+
25679+	spin_unlock_irqrestore(&vop->irq_lock, flags);
25680+}
25681+
25682+static void vop_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
25683+					   struct drm_file *file_priv)
25684+{
25685+	struct drm_device *drm = crtc->dev;
25686+	struct vop *vop = to_vop(crtc);
25687+	struct drm_pending_vblank_event *e;
25688+	unsigned long flags;
25689+
25690+	spin_lock_irqsave(&drm->event_lock, flags);
25691+	e = vop->event;
25692+	if (e && e->base.file_priv == file_priv) {
25693+		vop->event = NULL;
25694+
25695+		/* e->base.destroy(&e->base);//todo */
25696+		file_priv->event_space += sizeof(e->event);
25697+	}
25698+	spin_unlock_irqrestore(&drm->event_lock, flags);
25699+}
25700+
25701+static int vop_crtc_loader_protect(struct drm_crtc *crtc, bool on)
25702+{
25703+	struct rockchip_drm_private *private = crtc->dev->dev_private;
25704+	struct vop *vop = to_vop(crtc);
25705+	int sys_status = drm_crtc_index(crtc) ?
25706+				SYS_STATUS_LCDC1 : SYS_STATUS_LCDC0;
25707+
25708+	if (on == vop->loader_protect)
25709+		return 0;
25710+
25711+	if (on) {
25712+		if (vop->dclk_source) {
25713+			struct clk *parent;
25714+
25715+			parent = clk_get_parent(vop->dclk_source);
25716+			if (parent) {
25717+				if (clk_is_match(private->default_pll.pll, parent))
25718+					vop->pll = &private->default_pll;
25719+				else if (clk_is_match(private->hdmi_pll.pll, parent))
25720+					vop->pll = &private->hdmi_pll;
25721+				if (vop->pll)
25722+					vop->pll->use_count++;
25723+			}
25724+		}
25725+
25726+		rockchip_set_system_status(sys_status);
25727+		vop_initial(crtc);
25728+		drm_crtc_vblank_on(crtc);
25729+		vop->loader_protect = true;
25730+	} else {
25731+		vop_crtc_atomic_disable(crtc, NULL);
25732+
25733+		if (vop->dclk_source && vop->pll) {
25734+			vop->pll->use_count--;
25735+			vop->pll = NULL;
25736+		}
25737+		vop->loader_protect = false;
25738+	}
25739+
25740+	return 0;
25741+}
25742+
25743+#define DEBUG_PRINT(args...) \
25744+		do { \
25745+			if (s) \
25746+				seq_printf(s, args); \
25747+			else \
25748+				pr_err(args); \
25749+		} while (0)
25750+
25751+static int vop_plane_info_dump(struct seq_file *s, struct drm_plane *plane)
25752+{
25753+	struct vop_win *win = to_vop_win(plane);
25754+	struct drm_plane_state *state = plane->state;
25755+	struct vop_plane_state *pstate = to_vop_plane_state(state);
25756+	struct drm_rect *src, *dest;
25757+	struct drm_framebuffer *fb = state->fb;
25758+	struct drm_format_name_buf format_name;
25759+	int i;
25760+	struct drm_gem_object *obj;
25761+	struct rockchip_gem_object *rk_obj;
25762+	dma_addr_t fb_addr;
25763+	u64 afbdc_format =
25764+		DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16);
25765+
25766+	DEBUG_PRINT("    win%d-%d: %s\n", win->win_id, win->area_id,
25767+		    state->crtc ? "ACTIVE" : "DISABLED");
25768+	if (!fb)
25769+		return 0;
25770+
25771+	src = &pstate->src;
25772+	dest = &pstate->dest;
25773+
25774+	drm_get_format_name(fb->format->format, &format_name);
25775+	DEBUG_PRINT("\tformat: %s%s%s[%d] color_space[%d]\n",
25776+		    format_name.str,
25777+		    fb->modifier == afbdc_format ? "[AFBC]" : "",
25778+		    pstate->eotf ? " HDR" : " SDR", pstate->eotf,
25779+		    pstate->color_space);
25780+	DEBUG_PRINT("\tcsc: y2r[%d] r2r[%d] r2y[%d] csc mode[%d]\n",
25781+		    pstate->y2r_en, pstate->r2r_en, pstate->r2y_en,
25782+		    pstate->csc_mode);
25783+	DEBUG_PRINT("\tzpos: %d\n", pstate->zpos);
25784+	DEBUG_PRINT("\tsrc: pos[%dx%d] rect[%dx%d]\n", src->x1 >> 16,
25785+		    src->y1 >> 16, drm_rect_width(src) >> 16,
25786+		    drm_rect_height(src) >> 16);
25787+	DEBUG_PRINT("\tdst: pos[%dx%d] rect[%dx%d]\n", dest->x1, dest->y1,
25788+		    drm_rect_width(dest), drm_rect_height(dest));
25789+
25790+	for (i = 0; i < fb->format->num_planes; i++) {
25791+		obj = fb->obj[0];
25792+		rk_obj = to_rockchip_obj(obj);
25793+		fb_addr = rk_obj->dma_addr + fb->offsets[0];
25794+
25795+		DEBUG_PRINT("\tbuf[%d]: addr: %pad pitch: %d offset: %d\n",
25796+			    i, &fb_addr, fb->pitches[i], fb->offsets[i]);
25797+	}
25798+
25799+	return 0;
25800+}
25801+
25802+static void vop_dump_connector_on_crtc(struct drm_crtc *crtc, struct seq_file *s)
25803+{
25804+	struct drm_connector_list_iter conn_iter;
25805+	struct drm_connector *connector;
25806+
25807+	drm_connector_list_iter_begin(crtc->dev, &conn_iter);
25808+	drm_for_each_connector_iter(connector, &conn_iter) {
25809+		if (crtc->state->connector_mask & drm_connector_mask(connector))
25810+			DEBUG_PRINT("    Connector: %s\n", connector->name);
25811+
25812+	}
25813+	drm_connector_list_iter_end(&conn_iter);
25814+}
25815+
25816+static int vop_crtc_debugfs_dump(struct drm_crtc *crtc, struct seq_file *s)
25817+{
25818+	struct vop *vop = to_vop(crtc);
25819+	struct drm_crtc_state *crtc_state = crtc->state;
25820+	struct drm_display_mode *mode = &crtc->state->adjusted_mode;
25821+	struct rockchip_crtc_state *state = to_rockchip_crtc_state(crtc->state);
25822+	bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
25823+	struct drm_plane *plane;
25824+	int i;
25825+
25826+	DEBUG_PRINT("VOP [%s]: %s\n", dev_name(vop->dev),
25827+		    crtc_state->active ? "ACTIVE" : "DISABLED");
25828+
25829+	if (!crtc_state->active)
25830+		return 0;
25831+
25832+	vop_dump_connector_on_crtc(crtc, s);
25833+	DEBUG_PRINT("\tbus_format[%x]: %s\n", state->bus_format,
25834+		    drm_get_bus_format_name(state->bus_format));
25835+	DEBUG_PRINT("\toverlay_mode[%d] output_mode[%x]",
25836+		    state->yuv_overlay, state->output_mode);
25837+	DEBUG_PRINT(" color_space[%d]\n",
25838+		    state->color_space);
25839+	DEBUG_PRINT("    Display mode: %dx%d%s%d\n",
25840+		    mode->hdisplay, mode->vdisplay, interlaced ? "i" : "p",
25841+		    drm_mode_vrefresh(mode));
25842+	DEBUG_PRINT("\tclk[%d] real_clk[%d] type[%x] flag[%x]\n",
25843+		    mode->clock, mode->crtc_clock, mode->type, mode->flags);
25844+	DEBUG_PRINT("\tH: %d %d %d %d\n", mode->hdisplay, mode->hsync_start,
25845+		    mode->hsync_end, mode->htotal);
25846+	DEBUG_PRINT("\tV: %d %d %d %d\n", mode->vdisplay, mode->vsync_start,
25847+		    mode->vsync_end, mode->vtotal);
25848+
25849+	for (i = 0; i < vop->num_wins; i++) {
25850+		plane = &vop->win[i].base;
25851+		vop_plane_info_dump(s, plane);
25852+	}
25853+	DEBUG_PRINT("    post: sdr2hdr[%d] hdr2sdr[%d]\n",
25854+		    state->hdr.sdr2hdr_state.bt1886eotf_post_conv_en,
25855+		    state->hdr.hdr2sdr_en);
25856+	DEBUG_PRINT("    pre : sdr2hdr[%d]\n",
25857+		    state->hdr.sdr2hdr_state.bt1886eotf_pre_conv_en);
25858+	DEBUG_PRINT("    post CSC: r2y[%d] y2r[%d] CSC mode[%d]\n",
25859+		    state->post_r2y_en, state->post_y2r_en,
25860+		    state->post_csc_mode);
25861+
25862+	return 0;
25863+}
25864+
25865+static void vop_crtc_regs_dump(struct drm_crtc *crtc, struct seq_file *s)
25866+{
25867+	struct vop *vop = to_vop(crtc);
25868+	struct drm_crtc_state *crtc_state = crtc->state;
25869+	int dump_len = vop->len > 0x400 ? 0x400 : vop->len;
25870+	int i;
25871+
25872+	if (!crtc_state->active)
25873+		return;
25874+
25875+	for (i = 0; i < dump_len; i += 16) {
25876+		DEBUG_PRINT("0x%08x: %08x %08x %08x %08x\n", i,
25877+			    vop_readl(vop, i), vop_readl(vop, i + 4),
25878+			    vop_readl(vop, i + 8), vop_readl(vop, i + 12));
25879+	}
25880+}
25881+
25882+static int vop_gamma_show(struct seq_file *s, void *data)
25883+{
25884+	struct drm_info_node *node = s->private;
25885+	struct vop *vop = node->info_ent->data;
25886+	int i;
25887+
25888+	if (!vop->lut || !vop->lut_active || !vop->lut_regs)
25889+		return 0;
25890+
25891+	for (i = 0; i < vop->lut_len; i++) {
25892+		if (i % 8 == 0)
25893+			DEBUG_PRINT("\n");
25894+		DEBUG_PRINT("0x%08x ", vop->lut[i]);
25895+	}
25896+	DEBUG_PRINT("\n");
25897+
25898+	return 0;
25899+}
25900+
25901+#undef DEBUG_PRINT
25902+
25903+static struct drm_info_list vop_debugfs_files[] = {
25904+	{ "gamma_lut", vop_gamma_show, 0, NULL },
25905+};
25906+
25907+static int vop_crtc_debugfs_init(struct drm_minor *minor, struct drm_crtc *crtc)
25908+{
25909+	struct vop *vop = to_vop(crtc);
25910+	int ret, i;
25911+
25912+	vop->debugfs = debugfs_create_dir(dev_name(vop->dev),
25913+					  minor->debugfs_root);
25914+
25915+	if (!vop->debugfs)
25916+		return -ENOMEM;
25917+
25918+	vop->debugfs_files = kmemdup(vop_debugfs_files,
25919+				     sizeof(vop_debugfs_files),
25920+				     GFP_KERNEL);
25921+	if (!vop->debugfs_files) {
25922+		ret = -ENOMEM;
25923+		goto remove;
25924+	}
25925+#if defined(CONFIG_ROCKCHIP_DRM_DEBUG)
25926+	rockchip_drm_add_dump_buffer(crtc, vop->debugfs);
25927+#endif
25928+	for (i = 0; i < ARRAY_SIZE(vop_debugfs_files); i++)
25929+		vop->debugfs_files[i].data = vop;
25930+
25931+	drm_debugfs_create_files(vop->debugfs_files, ARRAY_SIZE(vop_debugfs_files),
25932+				 vop->debugfs, minor);
25933+
25934+	return 0;
25935+remove:
25936+	debugfs_remove(vop->debugfs);
25937+	vop->debugfs = NULL;
25938+	return ret;
25939+}
25940+
25941+static enum drm_mode_status
25942+vop_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode,
25943+		    int output_type)
25944+{
25945+	struct vop *vop = to_vop(crtc);
25946+	const struct vop_data *vop_data = vop->data;
25947+	int request_clock = mode->clock;
25948+	int clock;
25949+
25950+	if (mode->hdisplay > vop_data->max_output.width)
25951+		return MODE_BAD_HVALUE;
25952+
25953+	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
25954+	    VOP_MAJOR(vop->version) == 3 &&
25955+	    VOP_MINOR(vop->version) <= 2)
25956+		return MODE_BAD;
25957+
25958+	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
25959+		request_clock *= 2;
25960+	clock = clk_round_rate(vop->dclk, request_clock * 1000) / 1000;
25961+
25962+	/*
25963+	 * Hdmi or DisplayPort request a Accurate clock.
25964+	 */
25965+	if (output_type == DRM_MODE_CONNECTOR_HDMIA ||
25966+	    output_type == DRM_MODE_CONNECTOR_DisplayPort)
25967+		if (clock != request_clock)
25968+			return MODE_CLOCK_RANGE;
25969+
25970+	return MODE_OK;
25971+}
25972+
25973+struct vop_bandwidth {
25974+	size_t bandwidth;
25975+	int y1;
25976+	int y2;
25977+};
25978+
25979+static int vop_bandwidth_cmp(const void *a, const void *b)
25980+{
25981+	struct vop_bandwidth *pa = (struct vop_bandwidth *)a;
25982+	struct vop_bandwidth *pb = (struct vop_bandwidth *)b;
25983+
25984+	return pa->y1 - pb->y2;
25985+}
25986+
25987+static size_t vop_plane_line_bandwidth(struct drm_plane_state *pstate)
25988+{
25989+	struct vop_plane_state *vop_plane_state = to_vop_plane_state(pstate);
25990+	struct vop_win *win = to_vop_win(pstate->plane);
25991+	struct drm_crtc *crtc = pstate->crtc;
25992+	struct vop *vop = to_vop(crtc);
25993+	struct drm_framebuffer *fb = pstate->fb;
25994+	struct drm_rect *dest = &vop_plane_state->dest;
25995+	struct drm_rect *src = &vop_plane_state->src;
25996+	int bpp = fb->format->cpp[0] << 3;
25997+	int src_width = drm_rect_width(src) >> 16;
25998+	int src_height = drm_rect_height(src) >> 16;
25999+	int dest_width = drm_rect_width(dest);
26000+	int dest_height = drm_rect_height(dest);
26001+	int vskiplines = scl_get_vskiplines(src_height, dest_height);
26002+	size_t bandwidth;
26003+
26004+	if (src_width <= 0 || src_height <= 0 || dest_width <= 0 ||
26005+	    dest_height <= 0)
26006+		return 0;
26007+
26008+	bandwidth = src_width * bpp / 8;
26009+
26010+	bandwidth = bandwidth * src_width / dest_width;
26011+	bandwidth = bandwidth * src_height / dest_height;
26012+	if (vskiplines == 2 && VOP_WIN_SCL_EXT_SUPPORT(vop, win, vsd_yrgb_gt2))
26013+		bandwidth /= 2;
26014+	else if (vskiplines == 4 &&
26015+		 VOP_WIN_SCL_EXT_SUPPORT(vop, win, vsd_yrgb_gt4))
26016+		bandwidth /= 4;
26017+
26018+	return bandwidth;
26019+}
26020+
26021+static u64 vop_calc_max_bandwidth(struct vop_bandwidth *bw, int start,
26022+				  int count, int y2)
26023+{
26024+	u64 max_bandwidth = 0;
26025+	int i;
26026+
26027+	for (i = start; i < count; i++) {
26028+		u64 bandwidth = 0;
26029+
26030+		if (bw[i].y1 > y2)
26031+			continue;
26032+		bandwidth = bw[i].bandwidth;
26033+		bandwidth += vop_calc_max_bandwidth(bw, i + 1, count,
26034+						    min(bw[i].y2, y2));
26035+
26036+		if (bandwidth > max_bandwidth)
26037+			max_bandwidth = bandwidth;
26038+	}
26039+
26040+	return max_bandwidth;
26041+}
26042+
26043+static size_t vop_crtc_bandwidth(struct drm_crtc *crtc,
26044+				 struct drm_crtc_state *crtc_state,
26045+				 struct dmcfreq_vop_info *vop_bw_info)
26046+{
26047+	struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
26048+	u16 htotal = adjusted_mode->crtc_htotal;
26049+	u16 vdisplay = adjusted_mode->crtc_vdisplay;
26050+	int clock = adjusted_mode->crtc_clock;
26051+	struct vop_plane_state *vop_plane_state;
26052+	struct drm_plane_state *pstate;
26053+	struct vop_bandwidth *pbandwidth;
26054+	struct drm_plane *plane;
26055+	u64 line_bw_mbyte = 0;
26056+	int cnt = 0, plane_num = 0;
26057+	struct drm_atomic_state *state = crtc_state->state;
26058+#if defined(CONFIG_ROCKCHIP_DRM_DEBUG)
26059+	struct vop_dump_list *pos, *n;
26060+	struct vop *vop = to_vop(crtc);
26061+#endif
26062+
26063+	if (!htotal || !vdisplay)
26064+		return 0;
26065+
26066+#if defined(CONFIG_ROCKCHIP_DRM_DEBUG)
26067+	if (!vop->rockchip_crtc.vop_dump_list_init_flag) {
26068+		INIT_LIST_HEAD(&vop->rockchip_crtc.vop_dump_list_head);
26069+		vop->rockchip_crtc.vop_dump_list_init_flag = true;
26070+	}
26071+	list_for_each_entry_safe(pos, n, &vop->rockchip_crtc.vop_dump_list_head, entry) {
26072+		list_del(&pos->entry);
26073+	}
26074+	if (vop->rockchip_crtc.vop_dump_status == DUMP_KEEP ||
26075+	    vop->rockchip_crtc.vop_dump_times > 0) {
26076+		vop->rockchip_crtc.frame_count++;
26077+	}
26078+#endif
26079+
26080+	drm_atomic_crtc_state_for_each_plane(plane, crtc_state)
26081+		plane_num++;
26082+
26083+	vop_bw_info->plane_num += plane_num;
26084+	pbandwidth = kmalloc_array(plane_num, sizeof(*pbandwidth),
26085+				   GFP_KERNEL);
26086+	if (!pbandwidth)
26087+		return -ENOMEM;
26088+
26089+	drm_atomic_crtc_state_for_each_plane(plane, crtc_state) {
26090+		int act_w, act_h, cpp, afbc_fac;
26091+
26092+		pstate = drm_atomic_get_existing_plane_state(state, plane);
26093+		if (pstate->crtc != crtc || !pstate->fb)
26094+			continue;
26095+
26096+		/* This is an empirical value, if it's afbc format, the frame buffer size div 2 */
26097+		afbc_fac = rockchip_afbc(plane, pstate->fb->modifier) ? 2 : 1;
26098+
26099+		vop_plane_state = to_vop_plane_state(pstate);
26100+		pbandwidth[cnt].y1 = vop_plane_state->dest.y1;
26101+		pbandwidth[cnt].y2 = vop_plane_state->dest.y2;
26102+		pbandwidth[cnt++].bandwidth = vop_plane_line_bandwidth(pstate) / afbc_fac;
26103+
26104+		act_w = drm_rect_width(&pstate->src) >> 16;
26105+		act_h = drm_rect_height(&pstate->src) >> 16;
26106+		cpp = pstate->fb->format->cpp[0];
26107+
26108+		vop_bw_info->frame_bw_mbyte += act_w * act_h / 1000 * cpp * drm_mode_vrefresh(adjusted_mode) / 1000;
26109+
26110+	}
26111+
26112+	sort(pbandwidth, cnt, sizeof(pbandwidth[0]), vop_bandwidth_cmp, NULL);
26113+
26114+	vop_bw_info->line_bw_mbyte = vop_calc_max_bandwidth(pbandwidth, 0, cnt, vdisplay);
26115+	kfree(pbandwidth);
26116+	/*
26117+	 * line_bandwidth(MB/s)
26118+	 *    = line_bandwidth / line_time
26119+	 *    = line_bandwidth(Byte) * clock(KHZ) / 1000 / htotal
26120+	 */
26121+	line_bw_mbyte *= clock;
26122+	do_div(line_bw_mbyte, htotal * 1000);
26123+	vop_bw_info->line_bw_mbyte = line_bw_mbyte;
26124+
26125+	return vop_bw_info->line_bw_mbyte;
26126+}
26127+
26128+static void vop_crtc_close(struct drm_crtc *crtc)
26129+{
26130+	struct vop *vop = NULL;
26131+
26132+	if (!crtc)
26133+		return;
26134+	vop = to_vop(crtc);
26135+	mutex_lock(&vop->vop_lock);
26136+	if (!vop->is_enabled) {
26137+		mutex_unlock(&vop->vop_lock);
26138+		return;
26139+	}
26140+
26141+	vop_disable_all_planes(vop);
26142+	mutex_unlock(&vop->vop_lock);
26143+}
26144+
26145+static u32 vop_mode_done(struct vop *vop)
26146+{
26147+	return VOP_CTRL_GET(vop, out_mode);
26148+}
26149+
26150+static void vop_set_out_mode(struct vop *vop, u32 mode)
26151+{
26152+	int ret;
26153+	u32 val;
26154+
26155+	VOP_CTRL_SET(vop, out_mode, mode);
26156+	vop_cfg_done(vop);
26157+	ret = readx_poll_timeout(vop_mode_done, vop, val, val == mode,
26158+				 1000, 500 * 1000);
26159+	if (ret)
26160+		dev_err(vop->dev, "wait mode 0x%x timeout\n", mode);
26161+
26162+}
26163+
26164+static void vop_crtc_send_mcu_cmd(struct drm_crtc *crtc,  u32 type, u32 value)
26165+{
26166+	struct rockchip_crtc_state *state;
26167+	struct vop *vop = NULL;
26168+
26169+	if (!crtc)
26170+		return;
26171+
26172+	vop = to_vop(crtc);
26173+	state = to_rockchip_crtc_state(crtc->state);
26174+
26175+	/*
26176+	 * set output mode to P888 when start send cmd.
26177+	 */
26178+	if ((type == MCU_SETBYPASS) && value)
26179+		vop_set_out_mode(vop, ROCKCHIP_OUT_MODE_P888);
26180+	mutex_lock(&vop->vop_lock);
26181+	if (vop && vop->is_enabled) {
26182+		switch (type) {
26183+		case MCU_WRCMD:
26184+			VOP_CTRL_SET(vop, mcu_rs, 0);
26185+			VOP_CTRL_SET(vop, mcu_rw_bypass_port, value);
26186+			VOP_CTRL_SET(vop, mcu_rs, 1);
26187+			break;
26188+		case MCU_WRDATA:
26189+			VOP_CTRL_SET(vop, mcu_rs, 1);
26190+			VOP_CTRL_SET(vop, mcu_rw_bypass_port, value);
26191+			break;
26192+		case MCU_SETBYPASS:
26193+			VOP_CTRL_SET(vop, mcu_bypass, value ? 1 : 0);
26194+			break;
26195+		default:
26196+			break;
26197+		}
26198+	}
26199+	mutex_unlock(&vop->vop_lock);
26200+
26201+	/*
26202+	 * restore output mode at the end
26203+	 */
26204+	if ((type == MCU_SETBYPASS) && !value)
26205+		vop_set_out_mode(vop, state->output_mode);
26206+}
26207+
26208+static const struct rockchip_crtc_funcs private_crtc_funcs = {
26209+	.loader_protect = vop_crtc_loader_protect,
26210+	.cancel_pending_vblank = vop_crtc_cancel_pending_vblank,
26211+	.debugfs_init = vop_crtc_debugfs_init,
26212+	.debugfs_dump = vop_crtc_debugfs_dump,
26213+	.regs_dump = vop_crtc_regs_dump,
26214+	.mode_valid = vop_crtc_mode_valid,
26215+	.bandwidth = vop_crtc_bandwidth,
26216+	.crtc_close = vop_crtc_close,
26217+	.crtc_send_mcu_cmd = vop_crtc_send_mcu_cmd,
26218+};
26219+
26220+static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
26221+				const struct drm_display_mode *mode,
26222+				struct drm_display_mode *adj_mode)
26223+{
26224+	struct vop *vop = to_vop(crtc);
26225+	const struct vop_data *vop_data = vop->data;
26226+
26227+	if (mode->hdisplay > vop_data->max_output.width)
26228+		return false;
26229+
26230+	drm_mode_set_crtcinfo(adj_mode,
26231+			      CRTC_INTERLACE_HALVE_V | CRTC_STEREO_DOUBLE);
26232+
26233+	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
26234+		adj_mode->crtc_clock *= 2;
26235+
26236+	adj_mode->crtc_clock =
26237+		DIV_ROUND_UP(clk_round_rate(vop->dclk, adj_mode->crtc_clock * 1000),
26238+			     1000);
26239+
26240+	return true;
26241+}
26242+
26243+static void vop_dither_setup(struct drm_crtc *crtc)
26244+{
26245+	struct rockchip_crtc_state *s =
26246+			to_rockchip_crtc_state(crtc->state);
26247+	struct vop *vop = to_vop(crtc);
26248+
26249+	/*
26250+	 * VOP MCU interface can't work right when dither enabled.
26251+	 * (1) the MCU CMD will be treated as data then changed by dither algorithm
26252+	 * (2) the dither algorithm works wrong in mcu mode
26253+	 */
26254+	if (vop->mcu_timing.mcu_pix_total)
26255+		return;
26256+
26257+	switch (s->bus_format) {
26258+	case MEDIA_BUS_FMT_RGB565_1X16:
26259+		VOP_CTRL_SET(vop, dither_down_en, 1);
26260+		VOP_CTRL_SET(vop, dither_down_mode, RGB888_TO_RGB565);
26261+		break;
26262+	case MEDIA_BUS_FMT_RGB666_1X18:
26263+	case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
26264+	case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
26265+		VOP_CTRL_SET(vop, dither_down_en, 1);
26266+		VOP_CTRL_SET(vop, dither_down_mode, RGB888_TO_RGB666);
26267+		break;
26268+	case MEDIA_BUS_FMT_YUV8_1X24:
26269+	case MEDIA_BUS_FMT_UYYVYY8_0_5X24:
26270+		VOP_CTRL_SET(vop, dither_down_en, 0);
26271+		VOP_CTRL_SET(vop, pre_dither_down_en, 1);
26272+		break;
26273+	case MEDIA_BUS_FMT_YUV10_1X30:
26274+	case MEDIA_BUS_FMT_UYYVYY10_0_5X30:
26275+		VOP_CTRL_SET(vop, dither_down_en, 0);
26276+		VOP_CTRL_SET(vop, pre_dither_down_en, 0);
26277+		break;
26278+	case MEDIA_BUS_FMT_RGB888_3X8:
26279+	case MEDIA_BUS_FMT_RGB888_DUMMY_4X8:
26280+	case MEDIA_BUS_FMT_RGB888_1X24:
26281+	case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
26282+	case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
26283+	default:
26284+		VOP_CTRL_SET(vop, dither_down_en, 0);
26285+		VOP_CTRL_SET(vop, pre_dither_down_en, 0);
26286+		break;
26287+	}
26288+
26289+	VOP_CTRL_SET(vop, pre_dither_down_en,
26290+		     s->output_mode == ROCKCHIP_OUT_MODE_AAAA ? 0 : 1);
26291+	VOP_CTRL_SET(vop, dither_down_sel, DITHER_DOWN_ALLEGRO);
26292+}
26293+
26294+static void vop_update_csc(struct drm_crtc *crtc)
26295+{
26296+	struct rockchip_crtc_state *s =
26297+			to_rockchip_crtc_state(crtc->state);
26298+	struct vop *vop = to_vop(crtc);
26299+	u32 val;
26300+
26301+	if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA &&
26302+	    !(vop->data->feature & VOP_FEATURE_OUTPUT_10BIT))
26303+		s->output_mode = ROCKCHIP_OUT_MODE_P888;
26304+
26305+	if (is_uv_swap(s->bus_format, s->output_mode))
26306+		VOP_CTRL_SET(vop, dsp_data_swap, DSP_RB_SWAP);
26307+	else
26308+		VOP_CTRL_SET(vop, dsp_data_swap, 0);
26309+
26310+	VOP_CTRL_SET(vop, out_mode, s->output_mode);
26311+
26312+	vop_dither_setup(crtc);
26313+	VOP_CTRL_SET(vop, dclk_ddr,
26314+		     s->output_mode == ROCKCHIP_OUT_MODE_YUV420 ? 1 : 0);
26315+	VOP_CTRL_SET(vop, hdmi_dclk_out_en,
26316+		     s->output_mode == ROCKCHIP_OUT_MODE_YUV420 ? 1 : 0);
26317+
26318+	VOP_CTRL_SET(vop, overlay_mode, s->yuv_overlay);
26319+	VOP_CTRL_SET(vop, dsp_out_yuv, is_yuv_output(s->bus_format));
26320+
26321+	/*
26322+	 * Background color is 10bit depth if vop version >= 3.5
26323+	 */
26324+	if (!is_yuv_output(s->bus_format))
26325+		val = 0;
26326+	else if (VOP_MAJOR(vop->version) == 3 && VOP_MINOR(vop->version) == 8 &&
26327+		 s->hdr.pre_overlay)
26328+		val = 0;
26329+	else if (VOP_MAJOR(vop->version) == 3 && VOP_MINOR(vop->version) >= 5)
26330+		val = 0x20010200;
26331+	else
26332+		val = 0x801080;
26333+	VOP_CTRL_SET(vop, dsp_background, val);
26334+}
26335+
26336+/*
26337+ * if adjusted mode update, return true, else return false
26338+ */
26339+static bool vop_crtc_mode_update(struct drm_crtc *crtc)
26340+{
26341+	struct vop *vop = to_vop(crtc);
26342+	struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
26343+	u16 hsync_len = adjusted_mode->crtc_hsync_end -
26344+				adjusted_mode->crtc_hsync_start;
26345+	u16 hdisplay = adjusted_mode->crtc_hdisplay;
26346+	u16 htotal = adjusted_mode->crtc_htotal;
26347+	u16 hact_st = adjusted_mode->crtc_htotal -
26348+				adjusted_mode->crtc_hsync_start;
26349+	u16 hact_end = hact_st + hdisplay;
26350+	u16 vdisplay = adjusted_mode->crtc_vdisplay;
26351+	u16 vtotal = adjusted_mode->crtc_vtotal;
26352+	u16 vsync_len = adjusted_mode->crtc_vsync_end -
26353+				adjusted_mode->crtc_vsync_start;
26354+	u16 vact_st = adjusted_mode->crtc_vtotal -
26355+				adjusted_mode->crtc_vsync_start;
26356+	u16 vact_end = vact_st + vdisplay;
26357+	u32 htotal_sync = htotal << 16 | hsync_len;
26358+	u32 hactive_st_end = hact_st << 16 | hact_end;
26359+	u32 vtotal_sync = vtotal << 16 | vsync_len;
26360+	u32 vactive_st_end = vact_st << 16 | vact_end;
26361+	u32 crtc_clock = adjusted_mode->crtc_clock * 100;
26362+
26363+	if (htotal_sync != VOP_CTRL_GET(vop, htotal_pw) ||
26364+	    hactive_st_end != VOP_CTRL_GET(vop, hact_st_end) ||
26365+	    vtotal_sync != VOP_CTRL_GET(vop, vtotal_pw) ||
26366+	    vactive_st_end != VOP_CTRL_GET(vop, vact_st_end) ||
26367+	    crtc_clock != clk_get_rate(vop->dclk))
26368+		return true;
26369+
26370+	return false;
26371+}
26372+
26373+static void vop_mcu_mode(struct drm_crtc *crtc)
26374+{
26375+	struct vop *vop = to_vop(crtc);
26376+
26377+	VOP_CTRL_SET(vop, mcu_clk_sel, 1);
26378+	VOP_CTRL_SET(vop, mcu_type, 1);
26379+
26380+	VOP_CTRL_SET(vop, mcu_hold_mode, 1);
26381+	VOP_CTRL_SET(vop, mcu_pix_total, vop->mcu_timing.mcu_pix_total);
26382+	VOP_CTRL_SET(vop, mcu_cs_pst, vop->mcu_timing.mcu_cs_pst);
26383+	VOP_CTRL_SET(vop, mcu_cs_pend, vop->mcu_timing.mcu_cs_pend);
26384+	VOP_CTRL_SET(vop, mcu_rw_pst, vop->mcu_timing.mcu_rw_pst);
26385+	VOP_CTRL_SET(vop, mcu_rw_pend, vop->mcu_timing.mcu_rw_pend);
26386+}
26387+
26388+static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
26389+				   struct drm_crtc_state *old_state)
26390+{
26391+	struct vop *vop = to_vop(crtc);
26392+	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
26393+	struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
26394+	u16 hsync_len = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
26395+	u16 hdisplay = adjusted_mode->crtc_hdisplay;
26396+	u16 htotal = adjusted_mode->crtc_htotal;
26397+	u16 hact_st = adjusted_mode->crtc_htotal - adjusted_mode->crtc_hsync_start;
26398+	u16 hact_end = hact_st + hdisplay;
26399+	u16 vdisplay = adjusted_mode->crtc_vdisplay;
26400+	u16 vtotal = adjusted_mode->crtc_vtotal;
26401+	u16 vsync_len = adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start;
26402+	u16 vact_st = adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vsync_start;
26403+	u16 vact_end = vact_st + vdisplay;
26404+	int sys_status = drm_crtc_index(crtc) ?
26405+				SYS_STATUS_LCDC1 : SYS_STATUS_LCDC0;
26406+	uint32_t val;
26407+	int act_end;
26408+	bool interlaced = !!(adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE);
26409+	int for_ddr_freq = 0;
26410+	bool dclk_inv, yc_swap = false;
26411+
26412+	rockchip_set_system_status(sys_status);
26413+	vop_lock(vop);
26414+	DRM_DEV_INFO(vop->dev, "Update mode to %dx%d%s%d, type: %d\n",
26415+		     hdisplay, vdisplay, interlaced ? "i" : "p",
26416+		     drm_mode_vrefresh(adjusted_mode), s->output_type);
26417+	vop_initial(crtc);
26418+	vop_disable_allwin(vop);
26419+	VOP_CTRL_SET(vop, standby, 0);
26420+	s->mode_update = vop_crtc_mode_update(crtc);
26421+	if (s->mode_update)
26422+		vop_disable_all_planes(vop);
26423+	/*
26424+	 * restore the lut table.
26425+	 */
26426+	if (vop->lut_active)
26427+		vop_crtc_load_lut(crtc);
26428+
26429+	if (vop->mcu_timing.mcu_pix_total)
26430+		vop_mcu_mode(crtc);
26431+
26432+	dclk_inv = (s->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE) ? 1 : 0;
26433+
26434+	VOP_CTRL_SET(vop, dclk_pol, dclk_inv);
26435+	val = (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ?
26436+		   0 : BIT(HSYNC_POSITIVE);
26437+	val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ?
26438+		   0 : BIT(VSYNC_POSITIVE);
26439+	VOP_CTRL_SET(vop, pin_pol, val);
26440+
26441+	if (vop->dclk_source && vop->pll && vop->pll->pll) {
26442+		if (clk_set_parent(vop->dclk_source, vop->pll->pll))
26443+			DRM_DEV_ERROR(vop->dev,
26444+				      "failed to set dclk's parents\n");
26445+	}
26446+
26447+	switch (s->output_type) {
26448+	case DRM_MODE_CONNECTOR_DPI:
26449+	case DRM_MODE_CONNECTOR_LVDS:
26450+		VOP_CTRL_SET(vop, rgb_en, 1);
26451+		VOP_CTRL_SET(vop, rgb_pin_pol, val);
26452+		VOP_CTRL_SET(vop, rgb_dclk_pol, dclk_inv);
26453+		VOP_CTRL_SET(vop, lvds_en, 1);
26454+		VOP_CTRL_SET(vop, lvds_pin_pol, val);
26455+		VOP_CTRL_SET(vop, lvds_dclk_pol, dclk_inv);
26456+		VOP_GRF_SET(vop, grf_dclk_inv, dclk_inv);
26457+		if (s->output_if & VOP_OUTPUT_IF_BT1120) {
26458+			VOP_CTRL_SET(vop, bt1120_en, 1);
26459+			yc_swap = is_yc_swap(s->bus_format);
26460+			VOP_CTRL_SET(vop, bt1120_yc_swap, yc_swap);
26461+			VOP_CTRL_SET(vop, yuv_clip, 1);
26462+		}
26463+		break;
26464+	case DRM_MODE_CONNECTOR_eDP:
26465+		VOP_CTRL_SET(vop, edp_en, 1);
26466+		VOP_CTRL_SET(vop, edp_pin_pol, val);
26467+		VOP_CTRL_SET(vop, edp_dclk_pol, dclk_inv);
26468+		break;
26469+	case DRM_MODE_CONNECTOR_HDMIA:
26470+		VOP_CTRL_SET(vop, hdmi_en, 1);
26471+		VOP_CTRL_SET(vop, hdmi_pin_pol, val);
26472+		VOP_CTRL_SET(vop, hdmi_dclk_pol, 1);
26473+		break;
26474+	case DRM_MODE_CONNECTOR_DSI:
26475+		VOP_CTRL_SET(vop, mipi_en, 1);
26476+		VOP_CTRL_SET(vop, mipi_pin_pol, val);
26477+		VOP_CTRL_SET(vop, mipi_dclk_pol, dclk_inv);
26478+		VOP_CTRL_SET(vop, mipi_dual_channel_en,
26479+			!!(s->output_flags & ROCKCHIP_OUTPUT_DUAL_CHANNEL_LEFT_RIGHT_MODE));
26480+		VOP_CTRL_SET(vop, data01_swap,
26481+			!!(s->output_flags & ROCKCHIP_OUTPUT_DATA_SWAP) ||
26482+			vop->dual_channel_swap);
26483+		break;
26484+	case DRM_MODE_CONNECTOR_DisplayPort:
26485+		VOP_CTRL_SET(vop, dp_dclk_pol, 0);
26486+		VOP_CTRL_SET(vop, dp_pin_pol, val);
26487+		VOP_CTRL_SET(vop, dp_en, 1);
26488+		break;
26489+	case DRM_MODE_CONNECTOR_TV:
26490+		if (vdisplay == CVBS_PAL_VDISPLAY)
26491+			VOP_CTRL_SET(vop, tve_sw_mode, 1);
26492+		else
26493+			VOP_CTRL_SET(vop, tve_sw_mode, 0);
26494+
26495+		VOP_CTRL_SET(vop, tve_dclk_pol, 1);
26496+		VOP_CTRL_SET(vop, tve_dclk_en, 1);
26497+		/* use the same pol reg with hdmi */
26498+		VOP_CTRL_SET(vop, hdmi_pin_pol, val);
26499+		VOP_CTRL_SET(vop, sw_genlock, 1);
26500+		VOP_CTRL_SET(vop, sw_uv_offset_en, 1);
26501+		VOP_CTRL_SET(vop, dither_up_en, 1);
26502+		break;
26503+	default:
26504+		DRM_ERROR("unsupported connector_type[%d]\n", s->output_type);
26505+	}
26506+	vop_update_csc(crtc);
26507+	VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
26508+	val = hact_st << 16;
26509+	val |= hact_end;
26510+	VOP_CTRL_SET(vop, hact_st_end, val);
26511+	VOP_CTRL_SET(vop, hpost_st_end, val);
26512+
26513+	val = vact_st << 16;
26514+	val |= vact_end;
26515+	VOP_CTRL_SET(vop, vact_st_end, val);
26516+	VOP_CTRL_SET(vop, vpost_st_end, val);
26517+
26518+	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
26519+		u16 vact_st_f1 = vtotal + vact_st + 1;
26520+		u16 vact_end_f1 = vact_st_f1 + vdisplay;
26521+
26522+		val = vact_st_f1 << 16 | vact_end_f1;
26523+		VOP_CTRL_SET(vop, vact_st_end_f1, val);
26524+		VOP_CTRL_SET(vop, vpost_st_end_f1, val);
26525+
26526+		val = vtotal << 16 | (vtotal + vsync_len);
26527+		VOP_CTRL_SET(vop, vs_st_end_f1, val);
26528+		VOP_CTRL_SET(vop, dsp_interlace, 1);
26529+		VOP_CTRL_SET(vop, p2i_en, 1);
26530+		vtotal += vtotal + 1;
26531+		act_end = vact_end_f1;
26532+	} else {
26533+		VOP_CTRL_SET(vop, dsp_interlace, 0);
26534+		VOP_CTRL_SET(vop, p2i_en, 0);
26535+		act_end = vact_end;
26536+	}
26537+
26538+	if (VOP_MAJOR(vop->version) == 3 &&
26539+	    (VOP_MINOR(vop->version) == 2 || VOP_MINOR(vop->version) == 8))
26540+		for_ddr_freq = 1000;
26541+	VOP_INTR_SET(vop, line_flag_num[0], act_end);
26542+	VOP_INTR_SET(vop, line_flag_num[1],
26543+		     act_end - us_to_vertical_line(adjusted_mode, for_ddr_freq));
26544+
26545+	VOP_CTRL_SET(vop, vtotal_pw, vtotal << 16 | vsync_len);
26546+
26547+	VOP_CTRL_SET(vop, core_dclk_div,
26548+		     !!(adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK));
26549+
26550+	VOP_CTRL_SET(vop, win_csc_mode_sel, 1);
26551+
26552+	clk_set_rate(vop->dclk, adjusted_mode->crtc_clock * 1000);
26553+
26554+
26555+	vop_cfg_done(vop);
26556+
26557+	drm_crtc_vblank_on(crtc);
26558+	vop_unlock(vop);
26559+}
26560+
26561+static int vop_zpos_cmp(const void *a, const void *b)
26562+{
26563+	struct vop_zpos *pa = (struct vop_zpos *)a;
26564+	struct vop_zpos *pb = (struct vop_zpos *)b;
26565+
26566+	return pa->zpos - pb->zpos;
26567+}
26568+
26569+static int vop_afbdc_atomic_check(struct drm_crtc *crtc,
26570+				  struct drm_crtc_state *crtc_state)
26571+{
26572+	struct vop *vop = to_vop(crtc);
26573+	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
26574+	struct drm_atomic_state *state = crtc_state->state;
26575+	struct drm_plane *plane;
26576+	struct drm_plane_state *pstate;
26577+	struct vop_plane_state *plane_state;
26578+	struct drm_framebuffer *fb;
26579+	struct drm_rect *src;
26580+	struct vop_win *win;
26581+	int afbdc_format;
26582+
26583+	s->afbdc_en = 0;
26584+
26585+	drm_atomic_crtc_state_for_each_plane(plane, crtc_state) {
26586+		pstate = drm_atomic_get_existing_plane_state(state, plane);
26587+		/*
26588+		 * plane might not have changed, in which case take
26589+		 * current state:
26590+		 */
26591+		if (!pstate)
26592+			pstate = plane->state;
26593+
26594+		fb = pstate->fb;
26595+
26596+		if (pstate->crtc != crtc || !fb)
26597+			continue;
26598+		if (fb->modifier !=
26599+			DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16))
26600+			continue;
26601+
26602+		if (!VOP_CTRL_SUPPORT(vop, afbdc_en)) {
26603+			DRM_INFO("not support afbdc\n");
26604+			return -EINVAL;
26605+		}
26606+
26607+		plane_state = to_vop_plane_state(pstate);
26608+
26609+		switch (plane_state->format) {
26610+		case VOP_FMT_ARGB8888:
26611+			afbdc_format = AFBDC_FMT_U8U8U8U8;
26612+			break;
26613+		case VOP_FMT_RGB888:
26614+			afbdc_format = AFBDC_FMT_U8U8U8;
26615+			break;
26616+		case VOP_FMT_RGB565:
26617+			afbdc_format = AFBDC_FMT_RGB565;
26618+			break;
26619+		default:
26620+			return -EINVAL;
26621+		}
26622+
26623+		if (s->afbdc_en) {
26624+			DRM_ERROR("vop only support one afbc layer\n");
26625+			return -EINVAL;
26626+		}
26627+
26628+		win = to_vop_win(plane);
26629+		src = &plane_state->src;
26630+		if (!(win->feature & WIN_FEATURE_AFBDC)) {
26631+			DRM_ERROR("win[%d] feature:0x%llx, not support afbdc\n",
26632+				  win->win_id, win->feature);
26633+			return -EINVAL;
26634+		}
26635+		if (!IS_ALIGNED(fb->width, 16)) {
26636+			DRM_ERROR("win[%d] afbdc must 16 align, width: %d\n",
26637+				  win->win_id, fb->width);
26638+			return -EINVAL;
26639+		}
26640+
26641+		if (VOP_CTRL_SUPPORT(vop, afbdc_pic_vir_width)) {
26642+			u32 align_x1, align_x2, align_y1, align_y2, align_val;
26643+			struct drm_gem_object *obj;
26644+			struct rockchip_gem_object *rk_obj;
26645+			dma_addr_t fb_addr;
26646+
26647+			obj = fb->obj[0];
26648+			rk_obj = to_rockchip_obj(obj);
26649+			fb_addr = rk_obj->dma_addr + fb->offsets[0];
26650+
26651+			s->afbdc_win_format = afbdc_format;
26652+			s->afbdc_win_id = win->win_id;
26653+			s->afbdc_win_ptr = fb_addr;
26654+			s->afbdc_win_vir_width = fb->width;
26655+			s->afbdc_win_xoffset = (src->x1 >> 16);
26656+			s->afbdc_win_yoffset = (src->y1 >> 16);
26657+
26658+			align_x1 = (src->x1 >> 16) - ((src->x1 >> 16) % 16);
26659+			align_y1 = (src->y1 >> 16) - ((src->y1 >> 16) % 16);
26660+
26661+			align_val = (src->x2 >> 16) % 16;
26662+			if (align_val)
26663+				align_x2 = (src->x2 >> 16) + (16 - align_val);
26664+			else
26665+				align_x2 = src->x2 >> 16;
26666+
26667+			align_val = (src->y2 >> 16) % 16;
26668+			if (align_val)
26669+				align_y2 = (src->y2 >> 16) + (16 - align_val);
26670+			else
26671+				align_y2 = src->y2 >> 16;
26672+
26673+			s->afbdc_win_width = align_x2 - align_x1 - 1;
26674+			s->afbdc_win_height = align_y2 - align_y1 - 1;
26675+
26676+			s->afbdc_en = 1;
26677+
26678+			break;
26679+		}
26680+		if (src->x1 || src->y1 || fb->offsets[0]) {
26681+			DRM_ERROR("win[%d] afbdc not support offset display\n",
26682+				  win->win_id);
26683+			DRM_ERROR("xpos=%d, ypos=%d, offset=%d\n",
26684+				  src->x1, src->y1, fb->offsets[0]);
26685+			return -EINVAL;
26686+		}
26687+		s->afbdc_win_format = afbdc_format;
26688+		s->afbdc_win_width = fb->width - 1;
26689+		s->afbdc_win_height = (drm_rect_height(src) >> 16) - 1;
26690+		s->afbdc_win_id = win->win_id;
26691+		s->afbdc_win_ptr = plane_state->yrgb_mst;
26692+		s->afbdc_en = 1;
26693+	}
26694+
26695+	return 0;
26696+}
26697+
26698+static void vop_dclk_source_generate(struct drm_crtc *crtc,
26699+				     struct drm_crtc_state *crtc_state)
26700+{
26701+	struct rockchip_drm_private *private = crtc->dev->dev_private;
26702+	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
26703+	struct rockchip_crtc_state *old_s = to_rockchip_crtc_state(crtc->state);
26704+	struct vop *vop = to_vop(crtc);
26705+	struct rockchip_dclk_pll *old_pll = vop->pll;
26706+
26707+	if (!vop->dclk_source)
26708+		return;
26709+
26710+	if (crtc_state->active) {
26711+		WARN_ON(vop->pll && !vop->pll->use_count);
26712+		if (!vop->pll || vop->pll->use_count > 1 ||
26713+		    s->output_type != old_s->output_type) {
26714+			if (vop->pll)
26715+				vop->pll->use_count--;
26716+
26717+			if (s->output_type != DRM_MODE_CONNECTOR_HDMIA &&
26718+			    !private->default_pll.use_count)
26719+				vop->pll = &private->default_pll;
26720+			else
26721+				vop->pll = &private->hdmi_pll;
26722+
26723+			vop->pll->use_count++;
26724+		}
26725+	} else if (vop->pll) {
26726+		vop->pll->use_count--;
26727+		vop->pll = NULL;
26728+	}
26729+	if (vop->pll != old_pll)
26730+		crtc_state->mode_changed = true;
26731+}
26732+
26733+static int vop_crtc_atomic_check(struct drm_crtc *crtc,
26734+				 struct drm_crtc_state *crtc_state)
26735+{
26736+	struct drm_atomic_state *state = crtc_state->state;
26737+	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
26738+	struct vop *vop = to_vop(crtc);
26739+	const struct vop_data *vop_data = vop->data;
26740+	struct drm_plane *plane;
26741+	struct drm_plane_state *pstate;
26742+	struct vop_plane_state *plane_state;
26743+	struct vop_zpos *pzpos;
26744+	int dsp_layer_sel = 0;
26745+	int i, j, cnt = 0, ret = 0;
26746+
26747+	ret = vop_afbdc_atomic_check(crtc, crtc_state);
26748+	if (ret)
26749+		return ret;
26750+
26751+	s->yuv_overlay = 0;
26752+	if (VOP_CTRL_SUPPORT(vop, overlay_mode))
26753+		s->yuv_overlay = is_yuv_output(s->bus_format);
26754+
26755+	ret = vop_hdr_atomic_check(crtc, crtc_state);
26756+	if (ret)
26757+		return ret;
26758+	ret = vop_csc_atomic_check(crtc, crtc_state);
26759+	if (ret)
26760+		return ret;
26761+
26762+	pzpos = kmalloc_array(vop_data->win_size, sizeof(*pzpos), GFP_KERNEL);
26763+	if (!pzpos)
26764+		return -ENOMEM;
26765+
26766+	for (i = 0; i < vop_data->win_size; i++) {
26767+		const struct vop_win_data *win_data = &vop_data->win[i];
26768+		struct vop_win *win;
26769+
26770+		if (!win_data->phy)
26771+			continue;
26772 
26773-	VOP_INTR_SET_TYPE(vop, clear, FS_INTR, 1);
26774-	VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 1);
26775+		for (j = 0; j < vop->num_wins; j++) {
26776+			win = &vop->win[j];
26777 
26778-	spin_unlock_irqrestore(&vop->irq_lock, flags);
26779+			if (win->win_id == i && !win->area_id)
26780+				break;
26781+		}
26782+		if (WARN_ON(j >= vop->num_wins)) {
26783+			ret = -EINVAL;
26784+			goto err_free_pzpos;
26785+		}
26786 
26787-	return 0;
26788-}
26789+		plane = &win->base;
26790+		pstate = state->planes[drm_plane_index(plane)].state;
26791+		/*
26792+		 * plane might not have changed, in which case take
26793+		 * current state:
26794+		 */
26795+		if (!pstate)
26796+			pstate = plane->state;
26797+		plane_state = to_vop_plane_state(pstate);
26798 
26799-static void vop_crtc_disable_vblank(struct drm_crtc *crtc)
26800-{
26801-	struct vop *vop = to_vop(crtc);
26802-	unsigned long flags;
26803+		if (!pstate->visible)
26804+			pzpos[cnt].zpos = INT_MAX;
26805+		else
26806+			pzpos[cnt].zpos = plane_state->zpos;
26807+		pzpos[cnt++].win_id = win->win_id;
26808+	}
26809 
26810-	if (WARN_ON(!vop->is_enabled))
26811-		return;
26812+	sort(pzpos, cnt, sizeof(pzpos[0]), vop_zpos_cmp, NULL);
26813 
26814-	spin_lock_irqsave(&vop->irq_lock, flags);
26815+	for (i = 0, cnt = 0; i < vop_data->win_size; i++) {
26816+		const struct vop_win_data *win_data = &vop_data->win[i];
26817+		int shift = i * 2;
26818 
26819-	VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 0);
26820+		if (win_data->phy) {
26821+			struct vop_zpos *zpos = &pzpos[cnt++];
26822 
26823-	spin_unlock_irqrestore(&vop->irq_lock, flags);
26824-}
26825+			dsp_layer_sel |= zpos->win_id << shift;
26826+		} else {
26827+			dsp_layer_sel |= i << shift;
26828+		}
26829+	}
26830 
26831-static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
26832-				const struct drm_display_mode *mode,
26833-				struct drm_display_mode *adjusted_mode)
26834-{
26835-	struct vop *vop = to_vop(crtc);
26836-	unsigned long rate;
26837+	s->dsp_layer_sel = dsp_layer_sel;
26838 
26839-	/*
26840-	 * Clock craziness.
26841-	 *
26842-	 * Key points:
26843-	 *
26844-	 * - DRM works in in kHz.
26845-	 * - Clock framework works in Hz.
26846-	 * - Rockchip's clock driver picks the clock rate that is the
26847-	 *   same _OR LOWER_ than the one requested.
26848-	 *
26849-	 * Action plan:
26850-	 *
26851-	 * 1. When DRM gives us a mode, we should add 999 Hz to it.  That way
26852-	 *    if the clock we need is 60000001 Hz (~60 MHz) and DRM tells us to
26853-	 *    make 60000 kHz then the clock framework will actually give us
26854-	 *    the right clock.
26855-	 *
26856-	 *    NOTE: if the PLL (maybe through a divider) could actually make
26857-	 *    a clock rate 999 Hz higher instead of the one we want then this
26858-	 *    could be a problem.  Unfortunately there's not much we can do
26859-	 *    since it's baked into DRM to use kHz.  It shouldn't matter in
26860-	 *    practice since Rockchip PLLs are controlled by tables and
26861-	 *    even if there is a divider in the middle I wouldn't expect PLL
26862-	 *    rates in the table that are just a few kHz different.
26863-	 *
26864-	 * 2. Get the clock framework to round the rate for us to tell us
26865-	 *    what it will actually make.
26866-	 *
26867-	 * 3. Store the rounded up rate so that we don't need to worry about
26868-	 *    this in the actual clk_set_rate().
26869-	 */
26870-	rate = clk_round_rate(vop->dclk, adjusted_mode->clock * 1000 + 999);
26871-	adjusted_mode->clock = DIV_ROUND_UP(rate, 1000);
26872+	vop_dclk_source_generate(crtc, crtc_state);
26873 
26874-	return true;
26875+err_free_pzpos:
26876+	kfree(pzpos);
26877+	return ret;
26878 }
26879 
26880-static bool vop_dsp_lut_is_enabled(struct vop *vop)
26881+static void vop_post_config(struct drm_crtc *crtc)
26882 {
26883-	return vop_read_reg(vop, 0, &vop->data->common->dsp_lut_en);
26884+	struct vop *vop = to_vop(crtc);
26885+	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
26886+	struct drm_display_mode *mode = &crtc->state->adjusted_mode;
26887+	u16 vtotal = mode->crtc_vtotal;
26888+	u16 hdisplay = mode->crtc_hdisplay;
26889+	u16 hact_st = mode->crtc_htotal - mode->crtc_hsync_start;
26890+	u16 vdisplay = mode->crtc_vdisplay;
26891+	u16 vact_st = mode->crtc_vtotal - mode->crtc_vsync_start;
26892+	u16 hsize = hdisplay * (s->left_margin + s->right_margin) / 200;
26893+	u16 vsize = vdisplay * (s->top_margin + s->bottom_margin) / 200;
26894+	u16 hact_end, vact_end;
26895+	u32 val;
26896+
26897+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
26898+		vsize = rounddown(vsize, 2);
26899+
26900+	hact_st += hdisplay * (100 - s->left_margin) / 200;
26901+	hact_end = hact_st + hsize;
26902+	val = hact_st << 16;
26903+	val |= hact_end;
26904+	VOP_CTRL_SET(vop, hpost_st_end, val);
26905+	vact_st += vdisplay * (100 - s->top_margin) / 200;
26906+	vact_end = vact_st + vsize;
26907+	val = vact_st << 16;
26908+	val |= vact_end;
26909+	VOP_CTRL_SET(vop, vpost_st_end, val);
26910+	val = scl_cal_scale2(vdisplay, vsize) << 16;
26911+	val |= scl_cal_scale2(hdisplay, hsize);
26912+	VOP_CTRL_SET(vop, post_scl_factor, val);
26913+
26914+#define POST_HORIZONTAL_SCALEDOWN_EN(x)		((x) << 0)
26915+#define POST_VERTICAL_SCALEDOWN_EN(x)		((x) << 1)
26916+	VOP_CTRL_SET(vop, post_scl_ctrl,
26917+		     POST_HORIZONTAL_SCALEDOWN_EN(hdisplay != hsize) |
26918+		     POST_VERTICAL_SCALEDOWN_EN(vdisplay != vsize));
26919+	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
26920+		u16 vact_st_f1 = vtotal + vact_st + 1;
26921+		u16 vact_end_f1 = vact_st_f1 + vsize;
26922+
26923+		val = vact_st_f1 << 16 | vact_end_f1;
26924+		VOP_CTRL_SET(vop, vpost_st_end_f1, val);
26925+	}
26926 }
26927 
26928-static void vop_crtc_write_gamma_lut(struct vop *vop, struct drm_crtc *crtc)
26929+static void vop_update_hdr(struct drm_crtc *crtc,
26930+			   struct drm_crtc_state *old_crtc_state)
26931 {
26932-	struct drm_color_lut *lut = crtc->state->gamma_lut->data;
26933-	unsigned int i;
26934+	struct rockchip_crtc_state *s =
26935+			to_rockchip_crtc_state(crtc->state);
26936+	struct vop *vop = to_vop(crtc);
26937+	struct rockchip_sdr2hdr_state *sdr2hdr_state = &s->hdr.sdr2hdr_state;
26938 
26939-	for (i = 0; i < crtc->gamma_size; i++) {
26940-		u32 word;
26941+	if (!vop->data->hdr_table)
26942+		return;
26943 
26944-		word = (drm_color_lut_extract(lut[i].red, 10) << 20) |
26945-		       (drm_color_lut_extract(lut[i].green, 10) << 10) |
26946-			drm_color_lut_extract(lut[i].blue, 10);
26947-		writel(word, vop->lut_regs + i * 4);
26948+	if (s->hdr.hdr2sdr_en) {
26949+		vop_load_hdr2sdr_table(vop);
26950+		/* This is ic design bug, when in hdr2sdr mode, the overlay mode
26951+		 * is rgb domain, so the win0 is do yuv2rgb, but in this case,
26952+		 * we must close win0 y2r.
26953+		 */
26954+		VOP_CTRL_SET(vop, hdr2sdr_en_win0_csc, 0);
26955 	}
26956+	VOP_CTRL_SET(vop, hdr2sdr_en, s->hdr.hdr2sdr_en);
26957+
26958+	VOP_CTRL_SET(vop, bt1886eotf_pre_conv_en,
26959+		     sdr2hdr_state->bt1886eotf_pre_conv_en);
26960+	VOP_CTRL_SET(vop, bt1886eotf_post_conv_en,
26961+		     sdr2hdr_state->bt1886eotf_post_conv_en);
26962+
26963+	VOP_CTRL_SET(vop, rgb2rgb_pre_conv_en,
26964+		     sdr2hdr_state->rgb2rgb_pre_conv_en);
26965+	VOP_CTRL_SET(vop, rgb2rgb_pre_conv_mode,
26966+		     sdr2hdr_state->rgb2rgb_pre_conv_mode);
26967+	VOP_CTRL_SET(vop, st2084oetf_pre_conv_en,
26968+		     sdr2hdr_state->st2084oetf_pre_conv_en);
26969+
26970+	VOP_CTRL_SET(vop, rgb2rgb_post_conv_en,
26971+		     sdr2hdr_state->rgb2rgb_post_conv_en);
26972+	VOP_CTRL_SET(vop, rgb2rgb_post_conv_mode,
26973+		     sdr2hdr_state->rgb2rgb_post_conv_mode);
26974+	VOP_CTRL_SET(vop, st2084oetf_post_conv_en,
26975+		     sdr2hdr_state->st2084oetf_post_conv_en);
26976+
26977+	if (sdr2hdr_state->bt1886eotf_pre_conv_en ||
26978+	    sdr2hdr_state->bt1886eotf_post_conv_en)
26979+		vop_load_sdr2hdr_table(vop, sdr2hdr_state->sdr2hdr_func);
26980+	VOP_CTRL_SET(vop, win_csc_mode_sel, 1);
26981 }
26982 
26983-static void vop_crtc_gamma_set(struct vop *vop, struct drm_crtc *crtc,
26984-			       struct drm_crtc_state *old_state)
26985+static void vop_tv_config_update(struct drm_crtc *crtc,
26986+				 struct drm_crtc_state *old_crtc_state)
26987 {
26988-	struct drm_crtc_state *state = crtc->state;
26989-	unsigned int idle;
26990-	int ret;
26991+	struct rockchip_crtc_state *s =
26992+			to_rockchip_crtc_state(crtc->state);
26993+	struct rockchip_crtc_state *old_s =
26994+			to_rockchip_crtc_state(old_crtc_state);
26995+	int brightness, contrast, saturation, hue, sin_hue, cos_hue;
26996+	struct vop *vop = to_vop(crtc);
26997+	const struct vop_data *vop_data = vop->data;
26998 
26999-	if (!vop->lut_regs)
27000+	if (!s->tv_state)
27001 		return;
27002-	/*
27003-	 * To disable gamma (gamma_lut is null) or to write
27004-	 * an update to the LUT, clear dsp_lut_en.
27005-	 */
27006-	spin_lock(&vop->reg_lock);
27007-	VOP_REG_SET(vop, common, dsp_lut_en, 0);
27008-	vop_cfg_done(vop);
27009-	spin_unlock(&vop->reg_lock);
27010 
27011 	/*
27012-	 * In order to write the LUT to the internal memory,
27013-	 * we need to first make sure the dsp_lut_en bit is cleared.
27014+	 * The BCSH only need to config once except one of the following
27015+	 * condition changed:
27016+	 *   1. tv_state: include brightness,contrast,saturation and hue;
27017+	 *   2. yuv_overlay: it is related to BCSH r2y module;
27018+	 *   3. mode_update: it is indicate mode change and resume from suspend;
27019+	 *   4. bcsh_en: control the BCSH module enable or disable state;
27020+	 *   5. bus_format: it is related to BCSH y2r module;
27021 	 */
27022-	ret = readx_poll_timeout(vop_dsp_lut_is_enabled, vop,
27023-				 idle, !idle, 5, 30 * 1000);
27024-	if (ret) {
27025-		DRM_DEV_ERROR(vop->dev, "display LUT RAM enable timeout!\n");
27026+	if (!memcmp(s->tv_state,
27027+		    &vop->active_tv_state, sizeof(*s->tv_state)) &&
27028+	    s->yuv_overlay == old_s->yuv_overlay && s->mode_update &&
27029+	    s->bcsh_en == old_s->bcsh_en && s->bus_format == old_s->bus_format)
27030 		return;
27031+
27032+	memcpy(&vop->active_tv_state, s->tv_state, sizeof(*s->tv_state));
27033+	/* post BCSH CSC */
27034+	s->post_r2y_en = 0;
27035+	s->post_y2r_en = 0;
27036+	s->bcsh_en = 0;
27037+	if (s->tv_state) {
27038+		if (s->tv_state->brightness != 50 ||
27039+		    s->tv_state->contrast != 50 ||
27040+		    s->tv_state->saturation != 50 || s->tv_state->hue != 50)
27041+			s->bcsh_en = 1;
27042+	}
27043+
27044+	if (s->bcsh_en) {
27045+		if (!s->yuv_overlay)
27046+			s->post_r2y_en = 1;
27047+		if (!is_yuv_output(s->bus_format))
27048+			s->post_y2r_en = 1;
27049+	} else {
27050+		if (!s->yuv_overlay && is_yuv_output(s->bus_format))
27051+			s->post_r2y_en = 1;
27052+		if (s->yuv_overlay && !is_yuv_output(s->bus_format))
27053+			s->post_y2r_en = 1;
27054 	}
27055 
27056-	if (!state->gamma_lut)
27057+	s->post_csc_mode = to_vop_csc_mode(s->color_space);
27058+	VOP_CTRL_SET(vop, bcsh_r2y_en, s->post_r2y_en);
27059+	VOP_CTRL_SET(vop, bcsh_y2r_en, s->post_y2r_en);
27060+	VOP_CTRL_SET(vop, bcsh_r2y_csc_mode, s->post_csc_mode);
27061+	VOP_CTRL_SET(vop, bcsh_y2r_csc_mode, s->post_csc_mode);
27062+	if (!s->bcsh_en) {
27063+		VOP_CTRL_SET(vop, bcsh_en, s->bcsh_en);
27064 		return;
27065+	}
27066 
27067-	spin_lock(&vop->reg_lock);
27068-	vop_crtc_write_gamma_lut(vop, crtc);
27069-	VOP_REG_SET(vop, common, dsp_lut_en, 1);
27070-	vop_cfg_done(vop);
27071-	spin_unlock(&vop->reg_lock);
27072-}
27073+	if (vop_data->feature & VOP_FEATURE_OUTPUT_10BIT)
27074+		brightness = interpolate(0, -128, 100, 127, s->tv_state->brightness);
27075+	else if (VOP_MAJOR(vop->version) == 2 && VOP_MINOR(vop->version) == 6) /* px30 vopb */
27076+		brightness = interpolate(0, -64, 100, 63, s->tv_state->brightness);
27077+	else
27078+		brightness = interpolate(0, -32, 100, 31, s->tv_state->brightness);
27079 
27080-static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
27081-				  struct drm_crtc_state *old_crtc_state)
27082-{
27083-	struct vop *vop = to_vop(crtc);
27084+	if ((VOP_MAJOR(vop->version) == 3) ||
27085+	    (VOP_MAJOR(vop->version) == 2 && VOP_MINOR(vop->version) == 6)) { /* px30 vopb */
27086+		contrast = interpolate(0, 0, 100, 511, s->tv_state->contrast);
27087+		saturation = interpolate(0, 0, 100, 511, s->tv_state->saturation);
27088+		/*
27089+		 *  a:[-30~0]:
27090+		 *    sin_hue = 0x100 - sin(a)*256;
27091+		 *    cos_hue = cos(a)*256;
27092+		 *  a:[0~30]
27093+		 *    sin_hue = sin(a)*256;
27094+		 *    cos_hue = cos(a)*256;
27095+		 */
27096+		hue = interpolate(0, -30, 100, 30, s->tv_state->hue);
27097+		sin_hue = fixp_sin32(hue) >> 23;
27098+		cos_hue = fixp_cos32(hue) >> 23;
27099+		VOP_CTRL_SET(vop, bcsh_sat_con, saturation * contrast / 0x100);
27100 
27101-	/*
27102-	 * Only update GAMMA if the 'active' flag is not changed,
27103-	 * otherwise it's updated by .atomic_enable.
27104-	 */
27105-	if (crtc->state->color_mgmt_changed &&
27106-	    !crtc->state->active_changed)
27107-		vop_crtc_gamma_set(vop, crtc, old_crtc_state);
27108+	} else {
27109+		contrast = interpolate(0, 0, 100, 255, s->tv_state->contrast);
27110+		saturation = interpolate(0, 0, 100, 255, s->tv_state->saturation);
27111+		/*
27112+		 *  a:[-30~0]:
27113+		 *    sin_hue = 0x100 - sin(a)*128;
27114+		 *    cos_hue = cos(a)*128;
27115+		 *  a:[0~30]
27116+		 *    sin_hue = sin(a)*128;
27117+		 *    cos_hue = cos(a)*128;
27118+		 */
27119+		hue = interpolate(0, -30, 100, 30, s->tv_state->hue);
27120+		sin_hue = fixp_sin32(hue) >> 24;
27121+		cos_hue = fixp_cos32(hue) >> 24;
27122+		VOP_CTRL_SET(vop, bcsh_sat_con, saturation * contrast / 0x80);
27123+	}
27124+
27125+	VOP_CTRL_SET(vop, bcsh_brightness, brightness);
27126+	VOP_CTRL_SET(vop, bcsh_contrast, contrast);
27127+	VOP_CTRL_SET(vop, bcsh_sin_hue, sin_hue);
27128+	VOP_CTRL_SET(vop, bcsh_cos_hue, cos_hue);
27129+	VOP_CTRL_SET(vop, bcsh_out_mode, BCSH_OUT_MODE_NORMAL_VIDEO);
27130+	if (VOP_MAJOR(vop->version) == 3 && VOP_MINOR(vop->version) == 0)
27131+		VOP_CTRL_SET(vop, auto_gate_en, 0);
27132+	VOP_CTRL_SET(vop, bcsh_en, s->bcsh_en);
27133 }
27134 
27135-static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
27136-				   struct drm_crtc_state *old_state)
27137+static void vop_cfg_update(struct drm_crtc *crtc,
27138+			   struct drm_crtc_state *old_crtc_state)
27139 {
27140+	struct rockchip_crtc_state *s =
27141+			to_rockchip_crtc_state(crtc->state);
27142 	struct vop *vop = to_vop(crtc);
27143 	const struct vop_data *vop_data = vop->data;
27144-	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
27145-	struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
27146-	u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
27147-	u16 hdisplay = adjusted_mode->hdisplay;
27148-	u16 htotal = adjusted_mode->htotal;
27149-	u16 hact_st = adjusted_mode->htotal - adjusted_mode->hsync_start;
27150-	u16 hact_end = hact_st + hdisplay;
27151-	u16 vdisplay = adjusted_mode->vdisplay;
27152-	u16 vtotal = adjusted_mode->vtotal;
27153-	u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
27154-	u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
27155-	u16 vact_end = vact_st + vdisplay;
27156-	uint32_t pin_pol, val;
27157-	int dither_bpc = s->output_bpc ? s->output_bpc : 10;
27158-	int ret;
27159-
27160-	if (old_state && old_state->self_refresh_active) {
27161-		drm_crtc_vblank_on(crtc);
27162-		rockchip_drm_set_win_enabled(crtc, true);
27163-		return;
27164-	}
27165-
27166-	/*
27167-	 * If we have a GAMMA LUT in the state, then let's make sure
27168-	 * it's updated. We might be coming out of suspend,
27169-	 * which means the LUT internal memory needs to be re-written.
27170-	 */
27171-	if (crtc->state->gamma_lut)
27172-		vop_crtc_gamma_set(vop, crtc, old_state);
27173-
27174-	mutex_lock(&vop->vop_lock);
27175 
27176-	WARN_ON(vop->event);
27177+	spin_lock(&vop->reg_lock);
27178 
27179-	ret = vop_enable(crtc, old_state);
27180-	if (ret) {
27181-		mutex_unlock(&vop->vop_lock);
27182-		DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret);
27183-		return;
27184-	}
27185-	pin_pol = (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) ?
27186-		   BIT(HSYNC_POSITIVE) : 0;
27187-	pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) ?
27188-		   BIT(VSYNC_POSITIVE) : 0;
27189-	VOP_REG_SET(vop, output, pin_pol, pin_pol);
27190-	VOP_REG_SET(vop, output, mipi_dual_channel_en, 0);
27191+	vop_update_csc(crtc);
27192 
27193-	switch (s->output_type) {
27194-	case DRM_MODE_CONNECTOR_LVDS:
27195-		VOP_REG_SET(vop, output, rgb_dclk_pol, 1);
27196-		VOP_REG_SET(vop, output, rgb_pin_pol, pin_pol);
27197-		VOP_REG_SET(vop, output, rgb_en, 1);
27198-		break;
27199-	case DRM_MODE_CONNECTOR_eDP:
27200-		VOP_REG_SET(vop, output, edp_dclk_pol, 1);
27201-		VOP_REG_SET(vop, output, edp_pin_pol, pin_pol);
27202-		VOP_REG_SET(vop, output, edp_en, 1);
27203-		break;
27204-	case DRM_MODE_CONNECTOR_HDMIA:
27205-		VOP_REG_SET(vop, output, hdmi_dclk_pol, 1);
27206-		VOP_REG_SET(vop, output, hdmi_pin_pol, pin_pol);
27207-		VOP_REG_SET(vop, output, hdmi_en, 1);
27208-		break;
27209-	case DRM_MODE_CONNECTOR_DSI:
27210-		VOP_REG_SET(vop, output, mipi_dclk_pol, 1);
27211-		VOP_REG_SET(vop, output, mipi_pin_pol, pin_pol);
27212-		VOP_REG_SET(vop, output, mipi_en, 1);
27213-		VOP_REG_SET(vop, output, mipi_dual_channel_en,
27214-			    !!(s->output_flags & ROCKCHIP_OUTPUT_DSI_DUAL));
27215-		break;
27216-	case DRM_MODE_CONNECTOR_DisplayPort:
27217-		VOP_REG_SET(vop, output, dp_dclk_pol, 0);
27218-		VOP_REG_SET(vop, output, dp_pin_pol, pin_pol);
27219-		VOP_REG_SET(vop, output, dp_en, 1);
27220-		break;
27221-	default:
27222-		DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n",
27223-			      s->output_type);
27224-	}
27225+	vop_tv_config_update(crtc, old_crtc_state);
27226 
27227-	/*
27228-	 * if vop is not support RGB10 output, need force RGB10 to RGB888.
27229-	 */
27230-	if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA &&
27231-	    !(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10))
27232-		s->output_mode = ROCKCHIP_OUT_MODE_P888;
27233+	if (s->afbdc_en) {
27234+		u32 pic_size, pic_offset;
27235 
27236-	if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA && dither_bpc <= 8)
27237-		VOP_REG_SET(vop, common, pre_dither_down, 1);
27238-	else
27239-		VOP_REG_SET(vop, common, pre_dither_down, 0);
27240+		VOP_CTRL_SET(vop, afbdc_format, s->afbdc_win_format | 1 << 4);
27241+		VOP_CTRL_SET(vop, afbdc_hreg_block_split, 0);
27242+		VOP_CTRL_SET(vop, afbdc_sel, s->afbdc_win_id);
27243+		VOP_CTRL_SET(vop, afbdc_hdr_ptr, s->afbdc_win_ptr);
27244+		pic_size = (s->afbdc_win_width & 0xffff);
27245+		pic_size |= s->afbdc_win_height << 16;
27246+		VOP_CTRL_SET(vop, afbdc_pic_size, pic_size);
27247 
27248-	if (dither_bpc == 6) {
27249-		VOP_REG_SET(vop, common, dither_down_sel, DITHER_DOWN_ALLEGRO);
27250-		VOP_REG_SET(vop, common, dither_down_mode, RGB888_TO_RGB666);
27251-		VOP_REG_SET(vop, common, dither_down_en, 1);
27252-	} else {
27253-		VOP_REG_SET(vop, common, dither_down_en, 0);
27254+		VOP_CTRL_SET(vop, afbdc_pic_vir_width, s->afbdc_win_vir_width);
27255+		pic_offset = (s->afbdc_win_xoffset & 0xffff);
27256+		pic_offset |= s->afbdc_win_yoffset << 16;
27257+		VOP_CTRL_SET(vop, afbdc_pic_offset, pic_offset);
27258 	}
27259 
27260-	VOP_REG_SET(vop, common, out_mode, s->output_mode);
27261-
27262-	VOP_REG_SET(vop, modeset, htotal_pw, (htotal << 16) | hsync_len);
27263-	val = hact_st << 16;
27264-	val |= hact_end;
27265-	VOP_REG_SET(vop, modeset, hact_st_end, val);
27266-	VOP_REG_SET(vop, modeset, hpost_st_end, val);
27267-
27268-	VOP_REG_SET(vop, modeset, vtotal_pw, (vtotal << 16) | vsync_len);
27269-	val = vact_st << 16;
27270-	val |= vact_end;
27271-	VOP_REG_SET(vop, modeset, vact_st_end, val);
27272-	VOP_REG_SET(vop, modeset, vpost_st_end, val);
27273-
27274-	VOP_REG_SET(vop, intr, line_flag_num[0], vact_end);
27275+	VOP_CTRL_SET(vop, afbdc_en, s->afbdc_en);
27276 
27277-	clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
27278+	VOP_CTRL_SET(vop, dsp_layer_sel, s->dsp_layer_sel);
27279+	if (vop_data->feature & VOP_FEATURE_OVERSCAN)
27280+		vop_post_config(crtc);
27281 
27282-	VOP_REG_SET(vop, common, standby, 0);
27283-	mutex_unlock(&vop->vop_lock);
27284+	spin_unlock(&vop->reg_lock);
27285 }
27286 
27287 static bool vop_fs_irq_is_pending(struct vop *vop)
27288 {
27289-	return VOP_INTR_GET_TYPE(vop, status, FS_INTR);
27290+	if (VOP_MAJOR(vop->version) == 3 && VOP_MINOR(vop->version) >= 7)
27291+		return VOP_INTR_GET_TYPE(vop, status, FS_FIELD_INTR);
27292+	else
27293+		return VOP_INTR_GET_TYPE(vop, status, FS_INTR);
27294 }
27295 
27296 static void vop_wait_for_irq_handler(struct vop *vop)
27297@@ -1413,72 +3749,66 @@ static void vop_wait_for_irq_handler(struct vop *vop)
27298 	synchronize_irq(vop->irq);
27299 }
27300 
27301-static int vop_crtc_atomic_check(struct drm_crtc *crtc,
27302-				 struct drm_crtc_state *crtc_state)
27303+static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
27304+				  struct drm_crtc_state *old_crtc_state)
27305 {
27306+	struct drm_atomic_state *old_state = old_crtc_state->state;
27307+	struct drm_plane_state *old_plane_state;
27308 	struct vop *vop = to_vop(crtc);
27309 	struct drm_plane *plane;
27310-	struct drm_plane_state *plane_state;
27311-	struct rockchip_crtc_state *s;
27312-	int afbc_planes = 0;
27313+	int i;
27314+	unsigned long flags;
27315+	struct rockchip_crtc_state *s =
27316+		to_rockchip_crtc_state(crtc->state);
27317 
27318-	if (vop->lut_regs && crtc_state->color_mgmt_changed &&
27319-	    crtc_state->gamma_lut) {
27320-		unsigned int len;
27321+	vop_cfg_update(crtc, old_crtc_state);
27322 
27323-		len = drm_color_lut_size(crtc_state->gamma_lut);
27324-		if (len != crtc->gamma_size) {
27325-			DRM_DEBUG_KMS("Invalid LUT size; got %d, expected %d\n",
27326-				      len, crtc->gamma_size);
27327-			return -EINVAL;
27328-		}
27329-	}
27330+	if (!vop->is_iommu_enabled && vop->is_iommu_needed) {
27331+		int ret;
27332 
27333-	drm_atomic_crtc_state_for_each_plane(plane, crtc_state) {
27334-		plane_state =
27335-			drm_atomic_get_plane_state(crtc_state->state, plane);
27336-		if (IS_ERR(plane_state)) {
27337-			DRM_DEBUG_KMS("Cannot get plane state for plane %s\n",
27338-				      plane->name);
27339-			return PTR_ERR(plane_state);
27340-		}
27341+		if (s->mode_update)
27342+			VOP_CTRL_SET(vop, dma_stop, 1);
27343 
27344-		if (drm_is_afbc(plane_state->fb->modifier))
27345-			++afbc_planes;
27346+		ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev);
27347+		if (ret) {
27348+			vop->is_iommu_enabled = false;
27349+			vop_disable_all_planes(vop);
27350+			dev_err(vop->dev, "failed to attach dma mapping, %d\n",
27351+				ret);
27352+		} else {
27353+			vop->is_iommu_enabled = true;
27354+			VOP_CTRL_SET(vop, dma_stop, 0);
27355+		}
27356 	}
27357 
27358-	if (afbc_planes > 1) {
27359-		DRM_DEBUG_KMS("Invalid number of AFBC planes; got %d, expected at most 1\n", afbc_planes);
27360-		return -EINVAL;
27361+	vop_update_hdr(crtc, old_crtc_state);
27362+	if (old_crtc_state->color_mgmt_changed || old_crtc_state->active_changed) {
27363+		if (crtc->state->gamma_lut || vop->gamma_lut) {
27364+			if (old_crtc_state->gamma_lut)
27365+				vop->gamma_lut = old_crtc_state->gamma_lut->data;
27366+			vop_crtc_atomic_gamma_set(crtc, old_crtc_state);
27367+		}
27368 	}
27369 
27370-	s = to_rockchip_crtc_state(crtc_state);
27371-	s->enable_afbc = afbc_planes > 0;
27372-
27373-	return 0;
27374-}
27375-
27376-static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
27377-				  struct drm_crtc_state *old_crtc_state)
27378-{
27379-	struct drm_atomic_state *old_state = old_crtc_state->state;
27380-	struct drm_plane_state *old_plane_state, *new_plane_state;
27381-	struct vop *vop = to_vop(crtc);
27382-	struct drm_plane *plane;
27383-	struct rockchip_crtc_state *s;
27384-	int i;
27385-
27386-	if (WARN_ON(!vop->is_enabled))
27387-		return;
27388-
27389-	spin_lock(&vop->reg_lock);
27390-
27391-	/* Enable AFBC if there is some AFBC window, disable otherwise. */
27392-	s = to_rockchip_crtc_state(crtc->state);
27393-	VOP_AFBC_SET(vop, enable, s->enable_afbc);
27394+	spin_lock_irqsave(&vop->irq_lock, flags);
27395+	vop->pre_overlay = s->hdr.pre_overlay;
27396 	vop_cfg_done(vop);
27397+	/*
27398+	 * rk322x and rk332x odd-even field will mistake when in interlace mode.
27399+	 * we must switch to frame effect before switch screen and switch to
27400+	 * field effect after switch screen complete.
27401+	 */
27402+	if (VOP_MAJOR(vop->version) == 3 &&
27403+	    (VOP_MINOR(vop->version) == 7 || VOP_MINOR(vop->version) == 8)) {
27404+		if (!s->mode_update && VOP_CTRL_GET(vop, reg_done_frm))
27405+			VOP_CTRL_SET(vop, reg_done_frm, 0);
27406+	} else {
27407+		VOP_CTRL_SET(vop, reg_done_frm, 0);
27408+	}
27409+	if (vop->mcu_timing.mcu_pix_total)
27410+		VOP_CTRL_SET(vop, mcu_hold_mode, 0);
27411 
27412-	spin_unlock(&vop->reg_lock);
27413+	spin_unlock_irqrestore(&vop->irq_lock, flags);
27414 
27415 	/*
27416 	 * There is a (rather unlikely) possiblity that a vblank interrupt
27417@@ -1496,13 +3826,11 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
27418 		crtc->state->event = NULL;
27419 	}
27420 	spin_unlock_irq(&crtc->dev->event_lock);
27421-
27422-	for_each_oldnew_plane_in_state(old_state, plane, old_plane_state,
27423-				       new_plane_state, i) {
27424+	for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
27425 		if (!old_plane_state->fb)
27426 			continue;
27427 
27428-		if (old_plane_state->fb == new_plane_state->fb)
27429+		if (old_plane_state->fb == plane->state->fb)
27430 			continue;
27431 
27432 		drm_framebuffer_get(old_plane_state->fb);
27433@@ -1515,7 +3843,6 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
27434 static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
27435 	.mode_fixup = vop_crtc_mode_fixup,
27436 	.atomic_check = vop_crtc_atomic_check,
27437-	.atomic_begin = vop_crtc_atomic_begin,
27438 	.atomic_flush = vop_crtc_atomic_flush,
27439 	.atomic_enable = vop_crtc_atomic_enable,
27440 	.atomic_disable = vop_crtc_atomic_disable,
27441@@ -1526,14 +3853,33 @@ static void vop_crtc_destroy(struct drm_crtc *crtc)
27442 	drm_crtc_cleanup(crtc);
27443 }
27444 
27445-static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
27446+static void vop_crtc_reset(struct drm_crtc *crtc)
27447 {
27448-	struct rockchip_crtc_state *rockchip_state;
27449+	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
27450 
27451-	if (WARN_ON(!crtc->state))
27452-		return NULL;
27453+	if (crtc->state) {
27454+		__drm_atomic_helper_crtc_destroy_state(crtc->state);
27455+		kfree(s);
27456+	}
27457+
27458+	s = kzalloc(sizeof(*s), GFP_KERNEL);
27459+	if (!s)
27460+		return;
27461+	crtc->state = &s->base;
27462+	crtc->state->crtc = crtc;
27463+
27464+	s->left_margin = 100;
27465+	s->right_margin = 100;
27466+	s->top_margin = 100;
27467+	s->bottom_margin = 100;
27468+}
27469+
27470+static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
27471+{
27472+	struct rockchip_crtc_state *rockchip_state, *old_state;
27473 
27474-	rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL);
27475+	old_state = to_rockchip_crtc_state(crtc->state);
27476+	rockchip_state = kmemdup(old_state, sizeof(*old_state), GFP_KERNEL);
27477 	if (!rockchip_state)
27478 		return NULL;
27479 
27480@@ -1550,17 +3896,6 @@ static void vop_crtc_destroy_state(struct drm_crtc *crtc,
27481 	kfree(s);
27482 }
27483 
27484-static void vop_crtc_reset(struct drm_crtc *crtc)
27485-{
27486-	struct rockchip_crtc_state *crtc_state =
27487-		kzalloc(sizeof(*crtc_state), GFP_KERNEL);
27488-
27489-	if (crtc->state)
27490-		vop_crtc_destroy_state(crtc, crtc->state);
27491-
27492-	__drm_atomic_helper_crtc_reset(crtc, &crtc_state->base);
27493-}
27494-
27495 #ifdef CONFIG_DRM_ANALOGIX_DP
27496 static struct drm_connector *vop_get_edp_connector(struct vop *vop)
27497 {
27498@@ -1611,33 +3946,131 @@ vop_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
27499 	return 0;
27500 }
27501 
27502-#else
27503-static int vop_crtc_set_crc_source(struct drm_crtc *crtc,
27504-				   const char *source_name)
27505-{
27506-	return -ENODEV;
27507-}
27508+#else
27509+static int vop_crtc_set_crc_source(struct drm_crtc *crtc,
27510+				   const char *source_name)
27511+{
27512+	return -ENODEV;
27513+}
27514+
27515+static int
27516+vop_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
27517+			   size_t *values_cnt)
27518+{
27519+	return -ENODEV;
27520+}
27521+#endif
27522+
27523+static int vop_crtc_atomic_get_property(struct drm_crtc *crtc,
27524+					const struct drm_crtc_state *state,
27525+					struct drm_property *property,
27526+					uint64_t *val)
27527+{
27528+	struct drm_device *drm_dev = crtc->dev;
27529+	struct rockchip_drm_private *private = drm_dev->dev_private;
27530+	struct drm_mode_config *mode_config = &drm_dev->mode_config;
27531+	struct rockchip_crtc_state *s = to_rockchip_crtc_state(state);
27532+	struct vop *vop = to_vop(crtc);
27533+
27534+	if (property == mode_config->tv_left_margin_property) {
27535+		*val = s->left_margin;
27536+		return 0;
27537+	}
27538+
27539+	if (property == mode_config->tv_right_margin_property) {
27540+		*val = s->right_margin;
27541+		return 0;
27542+	}
27543+
27544+	if (property == mode_config->tv_top_margin_property) {
27545+		*val = s->top_margin;
27546+		return 0;
27547+	}
27548+
27549+	if (property == mode_config->tv_bottom_margin_property) {
27550+		*val = s->bottom_margin;
27551+		return 0;
27552+	}
27553+
27554+	if (property == private->aclk_prop) {
27555+		/* KHZ, keep align with mode->clock */
27556+		*val = clk_get_rate(vop->aclk) / 1000;
27557+		return 0;
27558+	}
27559+
27560+	if (property == private->bg_prop) {
27561+		*val = vop->background;
27562+		return 0;
27563+	}
27564+
27565+	if (property == private->line_flag_prop) {
27566+		*val = vop->line_flag;
27567+		return 0;
27568+	}
27569+
27570+	DRM_ERROR("failed to get vop crtc property\n");
27571+	return -EINVAL;
27572+}
27573+
27574+static int vop_crtc_atomic_set_property(struct drm_crtc *crtc,
27575+					struct drm_crtc_state *state,
27576+					struct drm_property *property,
27577+					uint64_t val)
27578+{
27579+	struct drm_device *drm_dev = crtc->dev;
27580+	struct rockchip_drm_private *private = drm_dev->dev_private;
27581+	struct drm_mode_config *mode_config = &drm_dev->mode_config;
27582+	struct rockchip_crtc_state *s = to_rockchip_crtc_state(state);
27583+	struct vop *vop = to_vop(crtc);
27584+
27585+	if (property == mode_config->tv_left_margin_property) {
27586+		s->left_margin = val;
27587+		return 0;
27588+	}
27589+
27590+	if (property == mode_config->tv_right_margin_property) {
27591+		s->right_margin = val;
27592+		return 0;
27593+	}
27594+
27595+	if (property == mode_config->tv_top_margin_property) {
27596+		s->top_margin = val;
27597+		return 0;
27598+	}
27599+
27600+	if (property == mode_config->tv_bottom_margin_property) {
27601+		s->bottom_margin = val;
27602+		return 0;
27603+	}
27604+
27605+	if (property == private->bg_prop) {
27606+		vop->background = val;
27607+		return 0;
27608+	}
27609+
27610+	if (property == private->line_flag_prop) {
27611+		vop->line_flag = val;
27612+		return 0;
27613+	}
27614 
27615-static int
27616-vop_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
27617-			   size_t *values_cnt)
27618-{
27619-	return -ENODEV;
27620+	DRM_ERROR("failed to set vop crtc property\n");
27621+	return -EINVAL;
27622 }
27623-#endif
27624 
27625 static const struct drm_crtc_funcs vop_crtc_funcs = {
27626+	.gamma_set = vop_crtc_legacy_gamma_set,
27627 	.set_config = drm_atomic_helper_set_config,
27628 	.page_flip = drm_atomic_helper_page_flip,
27629 	.destroy = vop_crtc_destroy,
27630 	.reset = vop_crtc_reset,
27631+	.atomic_get_property = vop_crtc_atomic_get_property,
27632+	.atomic_set_property = vop_crtc_atomic_set_property,
27633 	.atomic_duplicate_state = vop_crtc_duplicate_state,
27634 	.atomic_destroy_state = vop_crtc_destroy_state,
27635 	.enable_vblank = vop_crtc_enable_vblank,
27636 	.disable_vblank = vop_crtc_disable_vblank,
27637 	.set_crc_source = vop_crtc_set_crc_source,
27638 	.verify_crc_source = vop_crtc_verify_crc_source,
27639-	.gamma_set = drm_atomic_helper_legacy_gamma_set,
27640 };
27641 
27642 static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
27643@@ -1645,22 +4078,23 @@ static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
27644 	struct vop *vop = container_of(work, struct vop, fb_unref_work);
27645 	struct drm_framebuffer *fb = val;
27646 
27647-	drm_crtc_vblank_put(&vop->crtc);
27648+	drm_crtc_vblank_put(&vop->rockchip_crtc.crtc);
27649 	drm_framebuffer_put(fb);
27650 }
27651 
27652 static void vop_handle_vblank(struct vop *vop)
27653 {
27654 	struct drm_device *drm = vop->drm_dev;
27655-	struct drm_crtc *crtc = &vop->crtc;
27656+	struct drm_crtc *crtc = &vop->rockchip_crtc.crtc;
27657+	unsigned long flags;
27658 
27659-	spin_lock(&drm->event_lock);
27660+	spin_lock_irqsave(&drm->event_lock, flags);
27661 	if (vop->event) {
27662 		drm_crtc_send_vblank_event(crtc, vop->event);
27663 		drm_crtc_vblank_put(crtc);
27664 		vop->event = NULL;
27665 	}
27666-	spin_unlock(&drm->event_lock);
27667+	spin_unlock_irqrestore(&drm->event_lock, flags);
27668 
27669 	if (test_and_clear_bit(VOP_PENDING_FB_UNREF, &vop->pending))
27670 		drm_flip_work_commit(&vop->fb_unref_work, system_unbound_wq);
27671@@ -1669,8 +4103,9 @@ static void vop_handle_vblank(struct vop *vop)
27672 static irqreturn_t vop_isr(int irq, void *data)
27673 {
27674 	struct vop *vop = data;
27675-	struct drm_crtc *crtc = &vop->crtc;
27676+	struct drm_crtc *crtc = &vop->rockchip_crtc.crtc;
27677 	uint32_t active_irqs;
27678+	unsigned long flags;
27679 	int ret = IRQ_NONE;
27680 
27681 	/*
27682@@ -1689,14 +4124,14 @@ static irqreturn_t vop_isr(int irq, void *data)
27683 	 * interrupt register has interrupt status, enable and clear bits, we
27684 	 * must hold irq_lock to avoid a race with enable/disable_vblank().
27685 	*/
27686-	spin_lock(&vop->irq_lock);
27687+	spin_lock_irqsave(&vop->irq_lock, flags);
27688 
27689 	active_irqs = VOP_INTR_GET_TYPE(vop, status, INTR_MASK);
27690 	/* Clear all active interrupt sources */
27691 	if (active_irqs)
27692 		VOP_INTR_SET_TYPE(vop, clear, active_irqs, 1);
27693 
27694-	spin_unlock(&vop->irq_lock);
27695+	spin_unlock_irqrestore(&vop->irq_lock, flags);
27696 
27697 	/* This is expected for vop iommu irqs, since the irq is shared */
27698 	if (!active_irqs)
27699@@ -1714,17 +4149,41 @@ static irqreturn_t vop_isr(int irq, void *data)
27700 		ret = IRQ_HANDLED;
27701 	}
27702 
27703-	if (active_irqs & FS_INTR) {
27704+	if ((active_irqs & FS_INTR) || (active_irqs & FS_FIELD_INTR)) {
27705+		/* This is IC design not reasonable, this two register bit need
27706+		 * frame effective, but actually it's effective immediately, so
27707+		 * we config this register at frame start.
27708+		 */
27709+		spin_lock_irqsave(&vop->irq_lock, flags);
27710+		VOP_CTRL_SET(vop, level2_overlay_en, vop->pre_overlay);
27711+		VOP_CTRL_SET(vop, alpha_hard_calc, vop->pre_overlay);
27712+		spin_unlock_irqrestore(&vop->irq_lock, flags);
27713 		drm_crtc_handle_vblank(crtc);
27714 		vop_handle_vblank(vop);
27715-		active_irqs &= ~FS_INTR;
27716+		active_irqs &= ~(FS_INTR | FS_FIELD_INTR);
27717 		ret = IRQ_HANDLED;
27718 	}
27719 
27720+#define ERROR_HANDLER(x) \
27721+	do { \
27722+		if (active_irqs & x##_INTR) {\
27723+			DRM_DEV_ERROR_RATELIMITED(vop->dev, #x " irq err\n"); \
27724+			active_irqs &= ~x##_INTR; \
27725+			ret = IRQ_HANDLED; \
27726+		} \
27727+	} while (0)
27728+
27729+	ERROR_HANDLER(BUS_ERROR);
27730+	ERROR_HANDLER(WIN0_EMPTY);
27731+	ERROR_HANDLER(WIN1_EMPTY);
27732+	ERROR_HANDLER(WIN2_EMPTY);
27733+	ERROR_HANDLER(WIN3_EMPTY);
27734+	ERROR_HANDLER(HWC_EMPTY);
27735+	ERROR_HANDLER(POST_BUF_EMPTY);
27736+
27737 	/* Unhandled irqs are spurious. */
27738 	if (active_irqs)
27739-		DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n",
27740-			      active_irqs);
27741+		DRM_ERROR("Unknown VOP IRQs: %#02x\n", active_irqs);
27742 
27743 out_disable:
27744 	vop_core_clks_disable(vop);
27745@@ -1733,27 +4192,254 @@ static irqreturn_t vop_isr(int irq, void *data)
27746 	return ret;
27747 }
27748 
27749-static void vop_plane_add_properties(struct drm_plane *plane,
27750-				     const struct vop_win_data *win_data)
27751+static void vop_plane_add_properties(struct vop *vop,
27752+				     struct drm_plane *plane,
27753+				     const struct vop_win *win)
27754 {
27755 	unsigned int flags = 0;
27756 
27757-	flags |= VOP_WIN_HAS_REG(win_data, x_mir_en) ? DRM_MODE_REFLECT_X : 0;
27758-	flags |= VOP_WIN_HAS_REG(win_data, y_mir_en) ? DRM_MODE_REFLECT_Y : 0;
27759+	flags |= (VOP_WIN_SUPPORT(vop, win, xmirror)) ? DRM_MODE_REFLECT_X : 0;
27760+	flags |= (VOP_WIN_SUPPORT(vop, win, ymirror)) ? DRM_MODE_REFLECT_Y : 0;
27761+
27762 	if (flags)
27763 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
27764 						   DRM_MODE_ROTATE_0 | flags);
27765 }
27766 
27767-static int vop_create_crtc(struct vop *vop)
27768+static int vop_plane_create_name_property(struct vop *vop, struct vop_win *win)
27769+{
27770+	struct drm_prop_enum_list *props = vop->plane_name_list;
27771+	struct drm_property *prop;
27772+	uint64_t bits = BIT_ULL(win->plane_id);
27773+
27774+	prop = drm_property_create_bitmask(vop->drm_dev,
27775+					   DRM_MODE_PROP_IMMUTABLE, "NAME",
27776+					   props, vop->num_wins, bits);
27777+	if (!prop) {
27778+		DRM_DEV_ERROR(vop->dev, "create Name prop for %s failed\n", win->name);
27779+		return -ENOMEM;
27780+	}
27781+	win->name_prop = prop;
27782+	drm_object_attach_property(&win->base.base, win->name_prop, bits);
27783+
27784+	return 0;
27785+}
27786+
27787+static int vop_plane_init(struct vop *vop, struct vop_win *win,
27788+			  unsigned long possible_crtcs)
27789+{
27790+	struct rockchip_drm_private *private = vop->drm_dev->dev_private;
27791+	unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) | BIT(DRM_MODE_BLEND_PREMULTI) |
27792+				  BIT(DRM_MODE_BLEND_COVERAGE);
27793+	const struct vop_data *vop_data = vop->data;
27794+	uint64_t feature = 0;
27795+	int ret;
27796+
27797+	ret = drm_universal_plane_init(vop->drm_dev, &win->base, possible_crtcs, &vop_plane_funcs,
27798+				       win->data_formats, win->nformats, win->format_modifiers,
27799+				       win->type, win->name);
27800+	if (ret) {
27801+		DRM_ERROR("failed to initialize plane %d\n", ret);
27802+		return ret;
27803+	}
27804+	drm_plane_helper_add(&win->base, &plane_helper_funcs);
27805+
27806+	if (win->phy->scl)
27807+		feature |= BIT(ROCKCHIP_DRM_PLANE_FEATURE_SCALE);
27808+	if (VOP_WIN_SUPPORT(vop, win, src_alpha_ctl) ||
27809+	    VOP_WIN_SUPPORT(vop, win, alpha_en))
27810+		feature |= BIT(ROCKCHIP_DRM_PLANE_FEATURE_ALPHA);
27811+	if (win->feature & WIN_FEATURE_HDR2SDR)
27812+		feature |= BIT(ROCKCHIP_DRM_PLANE_FEATURE_HDR2SDR);
27813+	if (win->feature & WIN_FEATURE_SDR2HDR)
27814+		feature |= BIT(ROCKCHIP_DRM_PLANE_FEATURE_SDR2HDR);
27815+	if (win->feature & WIN_FEATURE_AFBDC)
27816+		feature |= BIT(ROCKCHIP_DRM_PLANE_FEATURE_AFBDC);
27817+
27818+	drm_object_attach_property(&win->base.base, vop->plane_feature_prop,
27819+				   feature);
27820+	drm_object_attach_property(&win->base.base, private->eotf_prop, 0);
27821+	drm_object_attach_property(&win->base.base,
27822+				   private->color_space_prop, 0);
27823+	if (VOP_WIN_SUPPORT(vop, win, global_alpha_val))
27824+		drm_plane_create_alpha_property(&win->base);
27825+	drm_object_attach_property(&win->base.base,
27826+				   private->async_commit_prop, 0);
27827+
27828+	if (win->parent)
27829+		drm_object_attach_property(&win->base.base, private->share_id_prop,
27830+					   win->parent->base.base.id);
27831+	else
27832+		drm_object_attach_property(&win->base.base, private->share_id_prop,
27833+					   win->base.base.id);
27834+
27835+	drm_plane_create_blend_mode_property(&win->base, blend_caps);
27836+	drm_plane_create_zpos_property(&win->base, win->win_id, 0, vop->num_wins - 1);
27837+	vop_plane_create_name_property(vop, win);
27838+
27839+
27840+	win->input_width_prop = drm_property_create_range(vop->drm_dev, DRM_MODE_PROP_IMMUTABLE,
27841+							  "INPUT_WIDTH", 0, vop_data->max_input.width);
27842+	win->input_height_prop = drm_property_create_range(vop->drm_dev, DRM_MODE_PROP_IMMUTABLE,
27843+							   "INPUT_HEIGHT", 0, vop_data->max_input.height);
27844+
27845+	win->output_width_prop = drm_property_create_range(vop->drm_dev, DRM_MODE_PROP_IMMUTABLE,
27846+							   "OUTPUT_WIDTH", 0, vop_data->max_input.width);
27847+	win->output_height_prop = drm_property_create_range(vop->drm_dev, DRM_MODE_PROP_IMMUTABLE,
27848+							    "OUTPUT_HEIGHT", 0, vop_data->max_input.height);
27849+
27850+	win->scale_prop = drm_property_create_range(vop->drm_dev, DRM_MODE_PROP_IMMUTABLE,
27851+						    "SCALE_RATE", 8, 8);
27852+	/*
27853+	 * Support 24 bit(RGB888) or 16 bit(rgb565) color key.
27854+	 * Bit 31 is used as a flag to disable (0) or enable
27855+	 * color keying (1).
27856+	 */
27857+	win->color_key_prop = drm_property_create_range(vop->drm_dev, 0,
27858+							"colorkey", 0, 0x80ffffff);
27859+	if (!win->input_width_prop || !win->input_height_prop ||
27860+	    !win->scale_prop || !win->color_key_prop) {
27861+		DRM_ERROR("failed to create property\n");
27862+		return -ENOMEM;
27863+	}
27864+
27865+	drm_object_attach_property(&win->base.base, win->input_width_prop, 0);
27866+	drm_object_attach_property(&win->base.base, win->input_height_prop, 0);
27867+	drm_object_attach_property(&win->base.base, win->output_width_prop, 0);
27868+	drm_object_attach_property(&win->base.base, win->output_height_prop, 0);
27869+	drm_object_attach_property(&win->base.base, win->scale_prop, 0);
27870+	drm_object_attach_property(&win->base.base, win->color_key_prop, 0);
27871+
27872+	return 0;
27873+}
27874+
27875+static int vop_of_init_display_lut(struct vop *vop)
27876+{
27877+	struct device_node *node = vop->dev->of_node;
27878+	struct device_node *dsp_lut;
27879+	u32 lut_len = vop->lut_len;
27880+	struct property *prop;
27881+	int length, i, j;
27882+	int ret;
27883+
27884+	if (!vop->lut)
27885+		return -ENOMEM;
27886+
27887+	dsp_lut = of_parse_phandle(node, "dsp-lut", 0);
27888+	if (!dsp_lut)
27889+		return -ENXIO;
27890+
27891+	prop = of_find_property(dsp_lut, "gamma-lut", &length);
27892+	if (!prop) {
27893+		dev_err(vop->dev, "failed to find gamma_lut\n");
27894+		return -ENXIO;
27895+	}
27896+
27897+	length >>= 2;
27898+
27899+	if (length != lut_len) {
27900+		u32 r, g, b;
27901+		u32 *lut = kmalloc_array(length, sizeof(*lut), GFP_KERNEL);
27902+
27903+		if (!lut)
27904+			return -ENOMEM;
27905+		ret = of_property_read_u32_array(dsp_lut, "gamma-lut", lut,
27906+						 length);
27907+		if (ret) {
27908+			dev_err(vop->dev, "load gamma-lut failed\n");
27909+			kfree(lut);
27910+			return -EINVAL;
27911+		}
27912+
27913+		for (i = 0; i < lut_len; i++) {
27914+			j = i * length / lut_len;
27915+			r = lut[j] / length / length * lut_len / length;
27916+			g = lut[j] / length % length * lut_len / length;
27917+			b = lut[j] % length * lut_len / length;
27918+
27919+			vop->lut[i] = r * lut_len * lut_len + g * lut_len + b;
27920+		}
27921+
27922+		kfree(lut);
27923+	} else {
27924+		of_property_read_u32_array(dsp_lut, "gamma-lut",
27925+					   vop->lut, vop->lut_len);
27926+	}
27927+	vop->lut_active = true;
27928+
27929+	return 0;
27930+}
27931+
27932+static int vop_crtc_create_plane_mask_property(struct vop *vop, struct drm_crtc *crtc)
27933+{
27934+	struct drm_property *prop;
27935+
27936+	static const struct drm_prop_enum_list props[] = {
27937+		{ ROCKCHIP_VOP_WIN0, "Win0" },
27938+		{ ROCKCHIP_VOP_WIN1, "Win1" },
27939+		{ ROCKCHIP_VOP_WIN2, "Win2" },
27940+		{ ROCKCHIP_VOP_WIN3, "Win3" },
27941+	};
27942+
27943+	prop = drm_property_create_bitmask(vop->drm_dev,
27944+					   DRM_MODE_PROP_IMMUTABLE, "PLANE_MASK",
27945+					   props, ARRAY_SIZE(props),
27946+					   0xffffffff);
27947+	if (!prop) {
27948+		DRM_DEV_ERROR(vop->dev, "create plane_mask prop for vp%d failed\n", vop->id);
27949+		return -ENOMEM;
27950+	}
27951+
27952+	vop->plane_mask_prop = prop;
27953+	drm_object_attach_property(&crtc->base, vop->plane_mask_prop, vop->plane_mask);
27954+
27955+	return 0;
27956+}
27957+
27958+static int vop_crtc_create_feature_property(struct vop *vop, struct drm_crtc *crtc)
27959 {
27960 	const struct vop_data *vop_data = vop->data;
27961+
27962+	struct drm_property *prop;
27963+	u64 feature = 0;
27964+
27965+	static const struct drm_prop_enum_list props[] = {
27966+		{ ROCKCHIP_DRM_CRTC_FEATURE_ALPHA_SCALE, "ALPHA_SCALE" },
27967+		{ ROCKCHIP_DRM_CRTC_FEATURE_HDR10, "HDR10" },
27968+		{ ROCKCHIP_DRM_CRTC_FEATURE_NEXT_HDR, "NEXT_HDR" },
27969+	};
27970+
27971+	if (vop_data->feature & VOP_FEATURE_ALPHA_SCALE)
27972+		feature |= BIT(ROCKCHIP_DRM_CRTC_FEATURE_ALPHA_SCALE);
27973+	if (vop_data->feature & VOP_FEATURE_HDR10)
27974+		feature |= BIT(ROCKCHIP_DRM_CRTC_FEATURE_HDR10);
27975+	if (vop_data->feature & VOP_FEATURE_NEXT_HDR)
27976+		feature |= BIT(ROCKCHIP_DRM_CRTC_FEATURE_NEXT_HDR);
27977+
27978+	prop = drm_property_create_bitmask(vop->drm_dev,
27979+					   DRM_MODE_PROP_IMMUTABLE, "FEATURE",
27980+					   props, ARRAY_SIZE(props),
27981+					   0xffffffff);
27982+	if (!prop) {
27983+		DRM_DEV_ERROR(vop->dev, "create FEATURE prop for vop%d failed\n", vop->id);
27984+		return -ENOMEM;
27985+	}
27986+
27987+	vop->feature_prop = prop;
27988+	drm_object_attach_property(&crtc->base, vop->feature_prop, feature);
27989+
27990+	return 0;
27991+}
27992+
27993+static int vop_create_crtc(struct vop *vop)
27994+{
27995 	struct device *dev = vop->dev;
27996 	struct drm_device *drm_dev = vop->drm_dev;
27997+	struct rockchip_drm_private *private = drm_dev->dev_private;
27998 	struct drm_plane *primary = NULL, *cursor = NULL, *plane, *tmp;
27999-	struct drm_crtc *crtc = &vop->crtc;
28000+	struct drm_crtc *crtc = &vop->rockchip_crtc.crtc;
28001 	struct device_node *port;
28002-	int ret;
28003+	int ret = 0;
28004 	int i;
28005 
28006 	/*
28007@@ -1761,29 +4447,19 @@ static int vop_create_crtc(struct vop *vop)
28008 	 * to pass them to drm_crtc_init_with_planes, which sets the
28009 	 * "possible_crtcs" to the newly initialized crtc.
28010 	 */
28011-	for (i = 0; i < vop_data->win_size; i++) {
28012-		struct vop_win *vop_win = &vop->win[i];
28013-		const struct vop_win_data *win_data = vop_win->data;
28014+	for (i = 0; i < vop->num_wins; i++) {
28015+		struct vop_win *win = &vop->win[i];
28016 
28017-		if (win_data->type != DRM_PLANE_TYPE_PRIMARY &&
28018-		    win_data->type != DRM_PLANE_TYPE_CURSOR)
28019+		if (win->type != DRM_PLANE_TYPE_PRIMARY &&
28020+		    win->type != DRM_PLANE_TYPE_CURSOR)
28021 			continue;
28022 
28023-		ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
28024-					       0, &vop_plane_funcs,
28025-					       win_data->phy->data_formats,
28026-					       win_data->phy->nformats,
28027-					       win_data->phy->format_modifiers,
28028-					       win_data->type, NULL);
28029-		if (ret) {
28030-			DRM_DEV_ERROR(vop->dev, "failed to init plane %d\n",
28031-				      ret);
28032+		if (vop_plane_init(vop, win, 0)) {
28033+			DRM_DEV_ERROR(vop->dev, "failed to init plane\n");
28034 			goto err_cleanup_planes;
28035 		}
28036 
28037-		plane = &vop_win->base;
28038-		drm_plane_helper_add(plane, &plane_helper_funcs);
28039-		vop_plane_add_properties(plane, win_data);
28040+		plane = &win->base;
28041 		if (plane->type == DRM_PLANE_TYPE_PRIMARY)
28042 			primary = plane;
28043 		else if (plane->type == DRM_PLANE_TYPE_CURSOR)
28044@@ -1796,37 +4472,23 @@ static int vop_create_crtc(struct vop *vop)
28045 		goto err_cleanup_planes;
28046 
28047 	drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
28048-	if (vop->lut_regs) {
28049-		drm_mode_crtc_set_gamma_size(crtc, vop_data->lut_size);
28050-		drm_crtc_enable_color_mgmt(crtc, 0, false, vop_data->lut_size);
28051-	}
28052 
28053 	/*
28054 	 * Create drm_planes for overlay windows with possible_crtcs restricted
28055 	 * to the newly created crtc.
28056 	 */
28057-	for (i = 0; i < vop_data->win_size; i++) {
28058-		struct vop_win *vop_win = &vop->win[i];
28059-		const struct vop_win_data *win_data = vop_win->data;
28060+	for (i = 0; i < vop->num_wins; i++) {
28061+		struct vop_win *win = &vop->win[i];
28062 		unsigned long possible_crtcs = drm_crtc_mask(crtc);
28063 
28064-		if (win_data->type != DRM_PLANE_TYPE_OVERLAY)
28065+		if (win->type != DRM_PLANE_TYPE_OVERLAY)
28066 			continue;
28067 
28068-		ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
28069-					       possible_crtcs,
28070-					       &vop_plane_funcs,
28071-					       win_data->phy->data_formats,
28072-					       win_data->phy->nformats,
28073-					       win_data->phy->format_modifiers,
28074-					       win_data->type, NULL);
28075-		if (ret) {
28076-			DRM_DEV_ERROR(vop->dev, "failed to init overlay %d\n",
28077-				      ret);
28078+		if (vop_plane_init(vop, win, possible_crtcs)) {
28079+			DRM_DEV_ERROR(vop->dev, "failed to init overlay\n");
28080 			goto err_cleanup_crtc;
28081 		}
28082-		drm_plane_helper_add(&vop_win->base, &plane_helper_funcs);
28083-		vop_plane_add_properties(&vop_win->base, win_data);
28084+		vop_plane_add_properties(vop, &win->base, win);
28085 	}
28086 
28087 	port = of_get_child_by_name(dev->of_node, "port");
28088@@ -1843,15 +4505,60 @@ static int vop_create_crtc(struct vop *vop)
28089 	init_completion(&vop->dsp_hold_completion);
28090 	init_completion(&vop->line_flag_completion);
28091 	crtc->port = port;
28092+	rockchip_register_crtc_funcs(crtc, &private_crtc_funcs);
28093 
28094-	ret = drm_self_refresh_helper_init(crtc);
28095-	if (ret)
28096-		DRM_DEV_DEBUG_KMS(vop->dev,
28097-			"Failed to init %s with SR helpers %d, ignoring\n",
28098-			crtc->name, ret);
28099+	drm_object_attach_property(&crtc->base, private->soc_id_prop, vop->soc_id);
28100+	drm_object_attach_property(&crtc->base, private->port_id_prop, vop->id);
28101+	drm_object_attach_property(&crtc->base, private->aclk_prop, 0);
28102+	drm_object_attach_property(&crtc->base, private->bg_prop, 0);
28103+	drm_object_attach_property(&crtc->base, private->line_flag_prop, 0);
28104+
28105+#define VOP_ATTACH_MODE_CONFIG_PROP(prop, v) \
28106+	drm_object_attach_property(&crtc->base, drm_dev->mode_config.prop, v)
28107+
28108+	VOP_ATTACH_MODE_CONFIG_PROP(tv_left_margin_property, 100);
28109+	VOP_ATTACH_MODE_CONFIG_PROP(tv_right_margin_property, 100);
28110+	VOP_ATTACH_MODE_CONFIG_PROP(tv_top_margin_property, 100);
28111+	VOP_ATTACH_MODE_CONFIG_PROP(tv_bottom_margin_property, 100);
28112+#undef VOP_ATTACH_MODE_CONFIG_PROP
28113+	vop_crtc_create_plane_mask_property(vop, crtc);
28114+	vop_crtc_create_feature_property(vop, crtc);
28115 
28116+	if (vop->lut_regs) {
28117+		u16 *r_base, *g_base, *b_base;
28118+		u32 lut_len = vop->lut_len;
28119+
28120+		vop->lut = devm_kmalloc_array(dev, lut_len, sizeof(*vop->lut),
28121+					      GFP_KERNEL);
28122+		if (!vop->lut)
28123+			goto err_unregister_crtc_funcs;
28124+
28125+		if (vop_of_init_display_lut(vop)) {
28126+			for (i = 0; i < lut_len; i++) {
28127+				u32 r = i * lut_len * lut_len;
28128+				u32 g = i * lut_len;
28129+				u32 b = i;
28130+
28131+				vop->lut[i] = r | g | b;
28132+			}
28133+		}
28134+
28135+		drm_mode_crtc_set_gamma_size(crtc, lut_len);
28136+		drm_crtc_enable_color_mgmt(crtc, 0, false, lut_len);
28137+		r_base = crtc->gamma_store;
28138+		g_base = r_base + crtc->gamma_size;
28139+		b_base = g_base + crtc->gamma_size;
28140+
28141+		for (i = 0; i < lut_len; i++) {
28142+			rockchip_vop_crtc_fb_gamma_get(crtc, &r_base[i],
28143+						       &g_base[i], &b_base[i],
28144+						       i);
28145+		}
28146+	}
28147 	return 0;
28148 
28149+err_unregister_crtc_funcs:
28150+	rockchip_unregister_crtc_funcs(crtc);
28151 err_cleanup_crtc:
28152 	drm_crtc_cleanup(crtc);
28153 err_cleanup_planes:
28154@@ -1863,12 +4570,10 @@ static int vop_create_crtc(struct vop *vop)
28155 
28156 static void vop_destroy_crtc(struct vop *vop)
28157 {
28158-	struct drm_crtc *crtc = &vop->crtc;
28159+	struct drm_crtc *crtc = &vop->rockchip_crtc.crtc;
28160 	struct drm_device *drm_dev = vop->drm_dev;
28161 	struct drm_plane *plane, *tmp;
28162 
28163-	drm_self_refresh_helper_cleanup(crtc);
28164-
28165 	of_node_put(crtc->port);
28166 
28167 	/*
28168@@ -1891,137 +4596,129 @@ static void vop_destroy_crtc(struct vop *vop)
28169 	drm_flip_work_cleanup(&vop->fb_unref_work);
28170 }
28171 
28172-static int vop_initial(struct vop *vop)
28173+/*
28174+ * Win_id is the order in vop_win_data array.
28175+ * This is related to the actual hardware plane.
28176+ * But in the Linux platform, such as video hardware and camera preview,
28177+ * it can only be played on the nv12 plane.
28178+ * So set the order of zpos to PRIMARY < OVERLAY (if have) < CURSOR (if have).
28179+ */
28180+static int vop_plane_get_zpos(enum drm_plane_type type, unsigned int size)
28181 {
28182-	struct reset_control *ahb_rst;
28183-	int i, ret;
28184-
28185-	vop->hclk = devm_clk_get(vop->dev, "hclk_vop");
28186-	if (IS_ERR(vop->hclk)) {
28187-		DRM_DEV_ERROR(vop->dev, "failed to get hclk source\n");
28188-		return PTR_ERR(vop->hclk);
28189-	}
28190-	vop->aclk = devm_clk_get(vop->dev, "aclk_vop");
28191-	if (IS_ERR(vop->aclk)) {
28192-		DRM_DEV_ERROR(vop->dev, "failed to get aclk source\n");
28193-		return PTR_ERR(vop->aclk);
28194-	}
28195-	vop->dclk = devm_clk_get(vop->dev, "dclk_vop");
28196-	if (IS_ERR(vop->dclk)) {
28197-		DRM_DEV_ERROR(vop->dev, "failed to get dclk source\n");
28198-		return PTR_ERR(vop->dclk);
28199-	}
28200-
28201-	ret = pm_runtime_get_sync(vop->dev);
28202-	if (ret < 0) {
28203-		DRM_DEV_ERROR(vop->dev, "failed to get pm runtime: %d\n", ret);
28204-		return ret;
28205-	}
28206-
28207-	ret = clk_prepare(vop->dclk);
28208-	if (ret < 0) {
28209-		DRM_DEV_ERROR(vop->dev, "failed to prepare dclk\n");
28210-		goto err_put_pm_runtime;
28211-	}
28212-
28213-	/* Enable both the hclk and aclk to setup the vop */
28214-	ret = clk_prepare_enable(vop->hclk);
28215-	if (ret < 0) {
28216-		DRM_DEV_ERROR(vop->dev, "failed to prepare/enable hclk\n");
28217-		goto err_unprepare_dclk;
28218+	switch (type) {
28219+	case DRM_PLANE_TYPE_PRIMARY:
28220+		return 0;
28221+	case DRM_PLANE_TYPE_OVERLAY:
28222+		return 1;
28223+	case DRM_PLANE_TYPE_CURSOR:
28224+		return size - 1;
28225 	}
28226+	return 0;
28227+}
28228 
28229-	ret = clk_prepare_enable(vop->aclk);
28230-	if (ret < 0) {
28231-		DRM_DEV_ERROR(vop->dev, "failed to prepare/enable aclk\n");
28232-		goto err_disable_hclk;
28233-	}
28234+/*
28235+ * Initialize the vop->win array elements.
28236+ */
28237+static int vop_win_init(struct vop *vop)
28238+{
28239+	const struct vop_data *vop_data = vop->data;
28240+	unsigned int i, j;
28241+	unsigned int num_wins = 0;
28242+	char name[DRM_PROP_NAME_LEN];
28243+	uint8_t plane_id = 0;
28244+	struct drm_prop_enum_list *plane_name_list;
28245+	static const struct drm_prop_enum_list props[] = {
28246+		{ ROCKCHIP_DRM_PLANE_FEATURE_SCALE, "scale" },
28247+		{ ROCKCHIP_DRM_PLANE_FEATURE_ALPHA, "alpha" },
28248+		{ ROCKCHIP_DRM_PLANE_FEATURE_HDR2SDR, "hdr2sdr" },
28249+		{ ROCKCHIP_DRM_PLANE_FEATURE_SDR2HDR, "sdr2hdr" },
28250+		{ ROCKCHIP_DRM_PLANE_FEATURE_AFBDC, "afbdc" },
28251+	};
28252 
28253-	/*
28254-	 * do hclk_reset, reset all vop registers.
28255-	 */
28256-	ahb_rst = devm_reset_control_get(vop->dev, "ahb");
28257-	if (IS_ERR(ahb_rst)) {
28258-		DRM_DEV_ERROR(vop->dev, "failed to get ahb reset\n");
28259-		ret = PTR_ERR(ahb_rst);
28260-		goto err_disable_aclk;
28261-	}
28262-	reset_control_assert(ahb_rst);
28263-	usleep_range(10, 20);
28264-	reset_control_deassert(ahb_rst);
28265+	for (i = 0; i < vop_data->win_size; i++) {
28266+		struct vop_win *vop_win = &vop->win[num_wins];
28267+		const struct vop_win_data *win_data = &vop_data->win[i];
28268 
28269-	VOP_INTR_SET_TYPE(vop, clear, INTR_MASK, 1);
28270-	VOP_INTR_SET_TYPE(vop, enable, INTR_MASK, 0);
28271+		if (!win_data->phy)
28272+			continue;
28273 
28274-	for (i = 0; i < vop->len; i += sizeof(u32))
28275-		vop->regsbak[i / 4] = readl_relaxed(vop->regs + i);
28276+		vop_win->phy = win_data->phy;
28277+		vop_win->csc = win_data->csc;
28278+		vop_win->offset = win_data->base;
28279+		vop_win->type = win_data->type;
28280+		vop_win->data_formats = win_data->phy->data_formats;
28281+		vop_win->nformats = win_data->phy->nformats;
28282+		vop_win->format_modifiers = win_data->format_modifiers;
28283+		vop_win->feature = win_data->feature;
28284+		vop_win->vop = vop;
28285+		vop_win->win_id = i;
28286+		vop_win->area_id = 0;
28287+		vop_win->plane_id = plane_id++;
28288+		snprintf(name, sizeof(name), "VOP%d-win%d-%d", vop->id, vop_win->win_id, vop_win->area_id);
28289+		vop_win->name = devm_kstrdup(vop->dev, name, GFP_KERNEL);
28290+		vop_win->zpos = vop_plane_get_zpos(win_data->type,
28291+						   vop_data->win_size);
28292 
28293-	VOP_REG_SET(vop, misc, global_regdone_en, 1);
28294-	VOP_REG_SET(vop, common, dsp_blank, 0);
28295+		num_wins++;
28296 
28297-	for (i = 0; i < vop->data->win_size; i++) {
28298-		struct vop_win *vop_win = &vop->win[i];
28299-		const struct vop_win_data *win = vop_win->data;
28300-		int channel = i * 2 + 1;
28301+		if (!vop->support_multi_area)
28302+			continue;
28303 
28304-		VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel);
28305-		vop_win_disable(vop, vop_win);
28306-		VOP_WIN_SET(vop, win, gate, 1);
28307+		for (j = 0; j < win_data->area_size; j++) {
28308+			struct vop_win *vop_area = &vop->win[num_wins];
28309+			const struct vop_win_phy *area = win_data->area[j];
28310+
28311+			vop_area->parent = vop_win;
28312+			vop_area->offset = vop_win->offset;
28313+			vop_area->phy = area;
28314+			vop_area->type = DRM_PLANE_TYPE_OVERLAY;
28315+			vop_area->data_formats = vop_win->data_formats;
28316+			vop_area->nformats = vop_win->nformats;
28317+			vop_area->format_modifiers = win_data->format_modifiers;
28318+			vop_area->vop = vop;
28319+			vop_area->win_id = i;
28320+			vop_area->area_id = j + 1;
28321+			vop_area->plane_id = plane_id++;
28322+			snprintf(name, sizeof(name), "VOP%d-win%d-%d", vop->id, vop_area->win_id, vop_area->area_id);
28323+			vop_area->name = devm_kstrdup(vop->dev, name, GFP_KERNEL);
28324+			num_wins++;
28325+		}
28326+		vop->plane_mask |= BIT(vop_win->win_id);
28327 	}
28328 
28329-	vop_cfg_done(vop);
28330+	vop->num_wins = num_wins;
28331+
28332+	vop->plane_feature_prop = drm_property_create_bitmask(vop->drm_dev,
28333+				DRM_MODE_PROP_IMMUTABLE, "FEATURE",
28334+				props, ARRAY_SIZE(props),
28335+				BIT(ROCKCHIP_DRM_PLANE_FEATURE_SCALE) |
28336+				BIT(ROCKCHIP_DRM_PLANE_FEATURE_ALPHA) |
28337+				BIT(ROCKCHIP_DRM_PLANE_FEATURE_HDR2SDR) |
28338+				BIT(ROCKCHIP_DRM_PLANE_FEATURE_SDR2HDR) |
28339+				BIT(ROCKCHIP_DRM_PLANE_FEATURE_AFBDC));
28340+	if (!vop->plane_feature_prop) {
28341+		DRM_ERROR("failed to create feature property\n");
28342+		return -EINVAL;
28343+	}
28344 
28345-	/*
28346-	 * do dclk_reset, let all config take affect.
28347-	 */
28348-	vop->dclk_rst = devm_reset_control_get(vop->dev, "dclk");
28349-	if (IS_ERR(vop->dclk_rst)) {
28350-		DRM_DEV_ERROR(vop->dev, "failed to get dclk reset\n");
28351-		ret = PTR_ERR(vop->dclk_rst);
28352-		goto err_disable_aclk;
28353+	plane_name_list = devm_kzalloc(vop->dev,
28354+				       vop->num_wins * sizeof(*plane_name_list),
28355+				       GFP_KERNEL);
28356+	if (!plane_name_list) {
28357+		DRM_DEV_ERROR(vop->dev, "failed to alloc memory for plane_name_list\n");
28358+		return -ENOMEM;
28359 	}
28360-	reset_control_assert(vop->dclk_rst);
28361-	usleep_range(10, 20);
28362-	reset_control_deassert(vop->dclk_rst);
28363 
28364-	clk_disable(vop->hclk);
28365-	clk_disable(vop->aclk);
28366+	for (i = 0; i < vop->num_wins; i++) {
28367+		struct vop_win *vop_win = &vop->win[i];
28368 
28369-	vop->is_enabled = false;
28370+		plane_name_list[i].type = vop_win->plane_id;
28371+		plane_name_list[i].name = vop_win->name;
28372+	}
28373 
28374-	pm_runtime_put_sync(vop->dev);
28375+	vop->plane_name_list = plane_name_list;
28376 
28377 	return 0;
28378-
28379-err_disable_aclk:
28380-	clk_disable_unprepare(vop->aclk);
28381-err_disable_hclk:
28382-	clk_disable_unprepare(vop->hclk);
28383-err_unprepare_dclk:
28384-	clk_unprepare(vop->dclk);
28385-err_put_pm_runtime:
28386-	pm_runtime_put_sync(vop->dev);
28387-	return ret;
28388-}
28389-
28390-/*
28391- * Initialize the vop->win array elements.
28392- */
28393-static void vop_win_init(struct vop *vop)
28394-{
28395-	const struct vop_data *vop_data = vop->data;
28396-	unsigned int i;
28397-
28398-	for (i = 0; i < vop_data->win_size; i++) {
28399-		struct vop_win *vop_win = &vop->win[i];
28400-		const struct vop_win_data *win_data = &vop_data->win[i];
28401-
28402-		vop_win->data = win_data;
28403-		vop_win->vop = vop;
28404-
28405-		if (vop_data->win_yuv2yuv)
28406-			vop_win->yuv2yuv_data = &vop_data->win_yuv2yuv[i];
28407-	}
28408 }
28409 
28410 /**
28411@@ -2080,46 +4777,97 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
28412 	struct drm_device *drm_dev = data;
28413 	struct vop *vop;
28414 	struct resource *res;
28415-	int ret, irq;
28416+	size_t alloc_size;
28417+	int ret, irq, i;
28418+	int num_wins = 0;
28419+	bool dual_channel_swap = false;
28420+	struct device_node *mcu = NULL;
28421 
28422 	vop_data = of_device_get_match_data(dev);
28423 	if (!vop_data)
28424 		return -ENODEV;
28425 
28426+	for (i = 0; i < vop_data->win_size; i++) {
28427+		const struct vop_win_data *win_data = &vop_data->win[i];
28428+
28429+		num_wins += win_data->area_size + 1;
28430+	}
28431+
28432 	/* Allocate vop struct and its vop_win array */
28433-	vop = devm_kzalloc(dev, struct_size(vop, win, vop_data->win_size),
28434-			   GFP_KERNEL);
28435+	alloc_size = sizeof(*vop) + sizeof(*vop->win) * num_wins;
28436+	vop = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
28437 	if (!vop)
28438 		return -ENOMEM;
28439 
28440 	vop->dev = dev;
28441 	vop->data = vop_data;
28442 	vop->drm_dev = drm_dev;
28443+	vop->num_wins = num_wins;
28444+	vop->version = vop_data->version;
28445+	vop->soc_id = vop_data->soc_id;
28446+	vop->id = vop_data->vop_id;
28447 	dev_set_drvdata(dev, vop);
28448+	vop->support_multi_area = of_property_read_bool(dev->of_node, "support-multi-area");
28449 
28450-	vop_win_init(vop);
28451+	ret = vop_win_init(vop);
28452+	if (ret)
28453+		return ret;
28454 
28455-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
28456+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
28457+	if (!res) {
28458+		dev_warn(vop->dev, "failed to get vop register byname\n");
28459+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
28460+	}
28461 	vop->regs = devm_ioremap_resource(dev, res);
28462 	if (IS_ERR(vop->regs))
28463 		return PTR_ERR(vop->regs);
28464 	vop->len = resource_size(res);
28465 
28466-	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
28467+	vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL);
28468+	if (!vop->regsbak)
28469+		return -ENOMEM;
28470+
28471+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gamma_lut");
28472 	if (res) {
28473-		if (!vop_data->lut_size) {
28474-			DRM_DEV_ERROR(dev, "no gamma LUT size defined\n");
28475+		vop->lut_len = resource_size(res) / sizeof(*vop->lut);
28476+		if (vop->lut_len != 256 && vop->lut_len != 1024) {
28477+			dev_err(vop->dev, "unsupported lut sizes %d\n",
28478+				vop->lut_len);
28479 			return -EINVAL;
28480 		}
28481+
28482 		vop->lut_regs = devm_ioremap_resource(dev, res);
28483 		if (IS_ERR(vop->lut_regs))
28484 			return PTR_ERR(vop->lut_regs);
28485 	}
28486-
28487-	vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL);
28488-	if (!vop->regsbak)
28489-		return -ENOMEM;
28490-
28491+	vop->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
28492+						   "rockchip,grf");
28493+	if (IS_ERR(vop->grf))
28494+		dev_err(dev, "missing rockchip,grf property\n");
28495+	vop->hclk = devm_clk_get(vop->dev, "hclk_vop");
28496+	if (IS_ERR(vop->hclk)) {
28497+		dev_err(vop->dev, "failed to get hclk source\n");
28498+		return PTR_ERR(vop->hclk);
28499+	}
28500+	vop->aclk = devm_clk_get(vop->dev, "aclk_vop");
28501+	if (IS_ERR(vop->aclk)) {
28502+		dev_err(vop->dev, "failed to get aclk source\n");
28503+		return PTR_ERR(vop->aclk);
28504+	}
28505+	vop->dclk = devm_clk_get(vop->dev, "dclk_vop");
28506+	if (IS_ERR(vop->dclk)) {
28507+		dev_err(vop->dev, "failed to get dclk source\n");
28508+		return PTR_ERR(vop->dclk);
28509+	}
28510+	vop->dclk_source = devm_clk_get(vop->dev, "dclk_source");
28511+	if (PTR_ERR(vop->dclk_source) == -ENOENT) {
28512+		vop->dclk_source = NULL;
28513+	} else if (PTR_ERR(vop->dclk_source) == -EPROBE_DEFER) {
28514+		return -EPROBE_DEFER;
28515+	} else if (IS_ERR(vop->dclk_source)) {
28516+		dev_err(vop->dev, "failed to get dclk source parent\n");
28517+		return PTR_ERR(vop->dclk_source);
28518+	}
28519 	irq = platform_get_irq(pdev, 0);
28520 	if (irq < 0) {
28521 		DRM_DEV_ERROR(dev, "cannot find irq for vop\n");
28522@@ -2131,53 +4879,51 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
28523 	spin_lock_init(&vop->irq_lock);
28524 	mutex_init(&vop->vop_lock);
28525 
28526+	ret = devm_request_irq(dev, vop->irq, vop_isr,
28527+			       IRQF_SHARED, dev_name(dev), vop);
28528+	if (ret)
28529+		return ret;
28530 	ret = vop_create_crtc(vop);
28531 	if (ret)
28532 		return ret;
28533 
28534 	pm_runtime_enable(&pdev->dev);
28535 
28536-	ret = vop_initial(vop);
28537-	if (ret < 0) {
28538-		DRM_DEV_ERROR(&pdev->dev,
28539-			      "cannot initial vop dev - err %d\n", ret);
28540-		goto err_disable_pm_runtime;
28541-	}
28542-
28543-	ret = devm_request_irq(dev, vop->irq, vop_isr,
28544-			       IRQF_SHARED, dev_name(dev), vop);
28545-	if (ret)
28546-		goto err_disable_pm_runtime;
28547 
28548-	if (vop->data->feature & VOP_FEATURE_INTERNAL_RGB) {
28549-		vop->rgb = rockchip_rgb_init(dev, &vop->crtc, vop->drm_dev);
28550-		if (IS_ERR(vop->rgb)) {
28551-			ret = PTR_ERR(vop->rgb);
28552-			goto err_disable_pm_runtime;
28553-		}
28554+	mcu = of_get_child_by_name(dev->of_node, "mcu-timing");
28555+	if (!mcu) {
28556+		dev_dbg(dev, "no mcu-timing node found in %s\n",
28557+			dev->of_node->full_name);
28558+	} else {
28559+		u32 val;
28560+
28561+		if (!of_property_read_u32(mcu, "mcu-pix-total", &val))
28562+			vop->mcu_timing.mcu_pix_total = val;
28563+		if (!of_property_read_u32(mcu, "mcu-cs-pst", &val))
28564+			vop->mcu_timing.mcu_cs_pst = val;
28565+		if (!of_property_read_u32(mcu, "mcu-cs-pend", &val))
28566+			vop->mcu_timing.mcu_cs_pend = val;
28567+		if (!of_property_read_u32(mcu, "mcu-rw-pst", &val))
28568+			vop->mcu_timing.mcu_rw_pst = val;
28569+		if (!of_property_read_u32(mcu, "mcu-rw-pend", &val))
28570+			vop->mcu_timing.mcu_rw_pend = val;
28571+		if (!of_property_read_u32(mcu, "mcu-hold-mode", &val))
28572+			vop->mcu_timing.mcu_hold_mode = val;
28573 	}
28574 
28575-	return 0;
28576+	dual_channel_swap = of_property_read_bool(dev->of_node,
28577+						  "rockchip,dual-channel-swap");
28578+	vop->dual_channel_swap = dual_channel_swap;
28579 
28580-err_disable_pm_runtime:
28581-	pm_runtime_disable(&pdev->dev);
28582-	vop_destroy_crtc(vop);
28583-	return ret;
28584+	return 0;
28585 }
28586 
28587 static void vop_unbind(struct device *dev, struct device *master, void *data)
28588 {
28589 	struct vop *vop = dev_get_drvdata(dev);
28590 
28591-	if (vop->rgb)
28592-		rockchip_rgb_fini(vop->rgb);
28593-
28594 	pm_runtime_disable(dev);
28595 	vop_destroy_crtc(vop);
28596-
28597-	clk_unprepare(vop->aclk);
28598-	clk_unprepare(vop->hclk);
28599-	clk_unprepare(vop->dclk);
28600 }
28601 
28602 const struct component_ops vop_component_ops = {
28603diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
28604index 857d97cdc..8c2b38419 100644
28605--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
28606+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
28607@@ -7,6 +7,9 @@
28608 #ifndef _ROCKCHIP_DRM_VOP_H
28609 #define _ROCKCHIP_DRM_VOP_H
28610 
28611+#include <drm/drm_plane.h>
28612+#include <drm/drm_modes.h>
28613+
28614 /*
28615  * major: IP major version, used for IP structure
28616  * minor: big feature change under same structure
28617@@ -15,104 +18,334 @@
28618 #define VOP_MAJOR(version)		((version) >> 8)
28619 #define VOP_MINOR(version)		((version) & 0xff)
28620 
28621-#define NUM_YUV2YUV_COEFFICIENTS 12
28622+#define VOP_VERSION_RK3568	VOP_VERSION(0x40, 0x15)
28623+#define VOP_VERSION_RK3588	VOP_VERSION(0x40, 0x17)
28624 
28625-/* AFBC supports a number of configurable modes. Relevant to us is block size
28626- * (16x16 or 32x8), storage modifiers (SPARSE, SPLIT), and the YUV-like
28627- * colourspace transform (YTR). 16x16 SPARSE mode is always used. SPLIT mode
28628- * could be enabled via the hreg_block_split register, but is not currently
28629- * handled. The colourspace transform is implicitly always assumed by the
28630- * decoder, so consumers must use this transform as well.
28631- *
28632- * Failure to match modifiers will cause errors displaying AFBC buffers
28633- * produced by conformant AFBC producers, including Mesa.
28634+#define ROCKCHIP_OUTPUT_DUAL_CHANNEL_LEFT_RIGHT_MODE	BIT(0)
28635+#define ROCKCHIP_OUTPUT_DUAL_CHANNEL_ODD_EVEN_MODE	BIT(1)
28636+#define ROCKCHIP_OUTPUT_DATA_SWAP			BIT(2)
28637+/* MIPI DSI DataStream(cmd) mode on rk3588 */
28638+#define ROCKCHIP_OUTPUT_MIPI_DS_MODE			BIT(3)
28639+
28640+#define AFBDC_FMT_RGB565	0x0
28641+#define AFBDC_FMT_U8U8U8U8	0x5
28642+#define AFBDC_FMT_U8U8U8	0x4
28643+
28644+#define VOP_FEATURE_OUTPUT_RGB10	BIT(0)
28645+#define VOP_FEATURE_INTERNAL_RGB	BIT(1)
28646+#define VOP_FEATURE_ALPHA_SCALE		BIT(2)
28647+#define VOP_FEATURE_HDR10		BIT(3)
28648+#define VOP_FEATURE_NEXT_HDR		BIT(4)
28649+/* a feature to splice two windows and two vps to support resolution > 4096 */
28650+#define VOP_FEATURE_SPLICE		BIT(5)
28651+#define VOP_FEATURE_OVERSCAN		BIT(6)
28652+
28653+#define VOP_FEATURE_OUTPUT_10BIT	VOP_FEATURE_OUTPUT_RGB10
28654+
28655+
28656+#define WIN_FEATURE_HDR2SDR		BIT(0)
28657+#define WIN_FEATURE_SDR2HDR		BIT(1)
28658+#define WIN_FEATURE_PRE_OVERLAY		BIT(2)
28659+#define WIN_FEATURE_AFBDC		BIT(3)
28660+#define WIN_FEATURE_CLUSTER_MAIN	BIT(4)
28661+#define WIN_FEATURE_CLUSTER_SUB		BIT(5)
28662+/* Left win in splice mode */
28663+#define WIN_FEATURE_SPLICE_LEFT		BIT(6)
28664+/* a mirror win can only get fb address
28665+ * from source win:
28666+ * Cluster1---->Cluster0
28667+ * Esmart1 ---->Esmart0
28668+ * Smart1  ---->Smart0
28669+ * This is a feather on rk3566
28670  */
28671-#define ROCKCHIP_AFBC_MOD \
28672-	DRM_FORMAT_MOD_ARM_AFBC( \
28673-		AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 | AFBC_FORMAT_MOD_SPARSE \
28674-			| AFBC_FORMAT_MOD_YTR \
28675-	)
28676+#define WIN_FEATURE_MIRROR		BIT(6)
28677+#define WIN_FEATURE_MULTI_AREA		BIT(7)
28678+
28679+
28680+#define VOP2_SOC_VARIANT		4
28681+
28682+#define ROCKCHIP_DSC_PPS_SIZE_BYTE	88
28683+
28684+enum vop_win_phy_id {
28685+	ROCKCHIP_VOP_WIN0 = 0,
28686+	ROCKCHIP_VOP_WIN1,
28687+	ROCKCHIP_VOP_WIN2,
28688+	ROCKCHIP_VOP_WIN3,
28689+	ROCKCHIP_VOP_PHY_ID_INVALID = -1,
28690+};
28691+
28692+enum bcsh_out_mode {
28693+	BCSH_OUT_MODE_BLACK,
28694+	BCSH_OUT_MODE_BLUE,
28695+	BCSH_OUT_MODE_COLOR_BAR,
28696+	BCSH_OUT_MODE_NORMAL_VIDEO,
28697+};
28698+
28699+enum cabc_stage_mode {
28700+	LAST_FRAME_PWM_VAL	= 0x0,
28701+	CUR_FRAME_PWM_VAL	= 0x1,
28702+	STAGE_BY_STAGE		= 0x2
28703+};
28704+
28705+enum cabc_stage_up_mode {
28706+	MUL_MODE,
28707+	ADD_MODE,
28708+};
28709+
28710+/*
28711+ *  the delay number of a window in different mode.
28712+ */
28713+enum vop2_win_dly_mode {
28714+	VOP2_DLY_MODE_DEFAULT,   /**< default mode */
28715+	VOP2_DLY_MODE_HISO_S,    /** HDR in SDR out mode, as a SDR window */
28716+	VOP2_DLY_MODE_HIHO_H,    /** HDR in HDR out mode, as a HDR window */
28717+	VOP2_DLY_MODE_MAX,
28718+};
28719+
28720+/*
28721+ * vop2 internal power domain id,
28722+ * should be all none zero, 0 will be
28723+ * treat as invalid;
28724+ */
28725+#define VOP2_PD_CLUSTER0	BIT(0)
28726+#define VOP2_PD_CLUSTER1	BIT(1)
28727+#define VOP2_PD_CLUSTER2	BIT(2)
28728+#define VOP2_PD_CLUSTER3	BIT(3)
28729+#define VOP2_PD_DSC_8K		BIT(5)
28730+#define VOP2_PD_DSC_4K		BIT(6)
28731+#define VOP2_PD_ESMART0		BIT(7)
28732+
28733+/*
28734+ * vop2 submem power gate,
28735+ * should be all none zero, 0 will be
28736+ * treat as invalid;
28737+ */
28738+#define VOP2_MEM_PG_VP0		BIT(0)
28739+#define VOP2_MEM_PG_VP1		BIT(1)
28740+#define VOP2_MEM_PG_VP2		BIT(2)
28741+#define VOP2_MEM_PG_VP3		BIT(3)
28742+#define VOP2_MEM_PG_DB0		BIT(4)
28743+#define VOP2_MEM_PG_DB1		BIT(5)
28744+#define VOP2_MEM_PG_DB2		BIT(6)
28745+#define VOP2_MEM_PG_WB		BIT(7)
28746+
28747+#define DSP_BG_SWAP		0x1
28748+#define DSP_RB_SWAP		0x2
28749+#define DSP_RG_SWAP		0x4
28750+#define DSP_DELTA_SWAP		0x8
28751+
28752+enum vop_csc_format {
28753+	CSC_BT601L,
28754+	CSC_BT709L,
28755+	CSC_BT601F,
28756+	CSC_BT2020,
28757+};
28758+
28759+enum vop_csc_mode {
28760+	CSC_RGB,
28761+	CSC_YUV,
28762+};
28763 
28764 enum vop_data_format {
28765 	VOP_FMT_ARGB8888 = 0,
28766 	VOP_FMT_RGB888,
28767-	VOP_FMT_RGB565,
28768+	VOP_FMT_RGB565 = 2,
28769+	VOP_FMT_YUYV = 2,
28770 	VOP_FMT_YUV420SP = 4,
28771 	VOP_FMT_YUV422SP,
28772 	VOP_FMT_YUV444SP,
28773 };
28774 
28775+enum vop_dsc_interface_mode {
28776+	VOP_DSC_IF_DISABLE = 0,
28777+	VOP_DSC_IF_HDMI = 1,
28778+	VOP_DSC_IF_MIPI_DS_MODE = 2,
28779+	VOP_DSC_IF_MIPI_VIDEO_MODE = 3,
28780+};
28781+
28782+struct vop_reg_data {
28783+	uint32_t offset;
28784+	uint32_t value;
28785+};
28786+
28787 struct vop_reg {
28788 	uint32_t mask;
28789-	uint16_t offset;
28790-	uint8_t shift;
28791-	bool write_mask;
28792-	bool relaxed;
28793+	uint32_t offset:17;
28794+	uint32_t shift:5;
28795+	uint32_t begin_minor:4;
28796+	uint32_t end_minor:4;
28797+	uint32_t reserved:2;
28798+	uint32_t major:3;
28799+	uint32_t write_mask:1;
28800 };
28801 
28802-struct vop_afbc {
28803-	struct vop_reg enable;
28804-	struct vop_reg win_sel;
28805-	struct vop_reg format;
28806-	struct vop_reg hreg_block_split;
28807-	struct vop_reg pic_size;
28808-	struct vop_reg hdr_ptr;
28809-	struct vop_reg rstn;
28810+struct vop_csc {
28811+	struct vop_reg y2r_en;
28812+	struct vop_reg r2r_en;
28813+	struct vop_reg r2y_en;
28814+	struct vop_reg csc_mode;
28815+
28816+	uint32_t y2r_offset;
28817+	uint32_t r2r_offset;
28818+	uint32_t r2y_offset;
28819 };
28820 
28821-struct vop_modeset {
28822+struct vop_rect {
28823+	int width;
28824+	int height;
28825+};
28826+
28827+struct vop_ctrl {
28828+	struct vop_reg version;
28829+	struct vop_reg standby;
28830+	struct vop_reg dma_stop;
28831+	struct vop_reg axi_outstanding_max_num;
28832+	struct vop_reg axi_max_outstanding_en;
28833 	struct vop_reg htotal_pw;
28834 	struct vop_reg hact_st_end;
28835-	struct vop_reg hpost_st_end;
28836 	struct vop_reg vtotal_pw;
28837 	struct vop_reg vact_st_end;
28838+	struct vop_reg vact_st_end_f1;
28839+	struct vop_reg vs_st_end_f1;
28840+	struct vop_reg hpost_st_end;
28841 	struct vop_reg vpost_st_end;
28842-};
28843-
28844-struct vop_output {
28845-	struct vop_reg pin_pol;
28846-	struct vop_reg dp_pin_pol;
28847-	struct vop_reg dp_dclk_pol;
28848-	struct vop_reg edp_pin_pol;
28849-	struct vop_reg edp_dclk_pol;
28850-	struct vop_reg hdmi_pin_pol;
28851-	struct vop_reg hdmi_dclk_pol;
28852-	struct vop_reg mipi_pin_pol;
28853-	struct vop_reg mipi_dclk_pol;
28854-	struct vop_reg rgb_pin_pol;
28855-	struct vop_reg rgb_dclk_pol;
28856-	struct vop_reg dp_en;
28857+	struct vop_reg vpost_st_end_f1;
28858+	struct vop_reg post_scl_factor;
28859+	struct vop_reg post_scl_ctrl;
28860+	struct vop_reg dsp_interlace;
28861+	struct vop_reg global_regdone_en;
28862+	struct vop_reg auto_gate_en;
28863+	struct vop_reg post_lb_mode;
28864+	struct vop_reg dsp_layer_sel;
28865+	struct vop_reg overlay_mode;
28866+	struct vop_reg core_dclk_div;
28867+	struct vop_reg dclk_ddr;
28868+	struct vop_reg p2i_en;
28869+	struct vop_reg hdmi_dclk_out_en;
28870+	struct vop_reg rgb_en;
28871+	struct vop_reg lvds_en;
28872 	struct vop_reg edp_en;
28873 	struct vop_reg hdmi_en;
28874 	struct vop_reg mipi_en;
28875+	struct vop_reg data01_swap;
28876 	struct vop_reg mipi_dual_channel_en;
28877-	struct vop_reg rgb_en;
28878-};
28879-
28880-struct vop_common {
28881-	struct vop_reg cfg_done;
28882-	struct vop_reg dsp_blank;
28883-	struct vop_reg data_blank;
28884-	struct vop_reg pre_dither_down;
28885+	struct vop_reg dp_en;
28886+	struct vop_reg dclk_pol;
28887+	struct vop_reg pin_pol;
28888+	struct vop_reg rgb_dclk_pol;
28889+	struct vop_reg rgb_pin_pol;
28890+	struct vop_reg lvds_dclk_pol;
28891+	struct vop_reg lvds_pin_pol;
28892+	struct vop_reg hdmi_dclk_pol;
28893+	struct vop_reg hdmi_pin_pol;
28894+	struct vop_reg edp_dclk_pol;
28895+	struct vop_reg edp_pin_pol;
28896+	struct vop_reg mipi_dclk_pol;
28897+	struct vop_reg mipi_pin_pol;
28898+	struct vop_reg dp_dclk_pol;
28899+	struct vop_reg dp_pin_pol;
28900 	struct vop_reg dither_down_sel;
28901 	struct vop_reg dither_down_mode;
28902 	struct vop_reg dither_down_en;
28903-	struct vop_reg dither_up;
28904+	struct vop_reg pre_dither_down_en;
28905+	struct vop_reg dither_up_en;
28906+
28907+	struct vop_reg sw_dac_sel;
28908+	struct vop_reg tve_sw_mode;
28909+	struct vop_reg tve_dclk_pol;
28910+	struct vop_reg tve_dclk_en;
28911+	struct vop_reg sw_genlock;
28912+	struct vop_reg sw_uv_offset_en;
28913+	struct vop_reg dsp_out_yuv;
28914+	struct vop_reg dsp_data_swap;
28915+	struct vop_reg yuv_clip;
28916+	struct vop_reg dsp_ccir656_avg;
28917+	struct vop_reg dsp_black;
28918+	struct vop_reg dsp_blank;
28919+	struct vop_reg dsp_outzero;
28920+	struct vop_reg update_gamma_lut;
28921+	struct vop_reg lut_buffer_index;
28922 	struct vop_reg dsp_lut_en;
28923-	struct vop_reg gate_en;
28924-	struct vop_reg mmu_en;
28925+
28926 	struct vop_reg out_mode;
28927-	struct vop_reg standby;
28928-};
28929 
28930-struct vop_misc {
28931-	struct vop_reg global_regdone_en;
28932+	struct vop_reg xmirror;
28933+	struct vop_reg ymirror;
28934+	struct vop_reg dsp_background;
28935+
28936+	/* AFBDC */
28937+	struct vop_reg afbdc_en;
28938+	struct vop_reg afbdc_sel;
28939+	struct vop_reg afbdc_format;
28940+	struct vop_reg afbdc_hreg_block_split;
28941+	struct vop_reg afbdc_pic_size;
28942+	struct vop_reg afbdc_hdr_ptr;
28943+	struct vop_reg afbdc_rstn;
28944+	struct vop_reg afbdc_pic_vir_width;
28945+	struct vop_reg afbdc_pic_offset;
28946+	struct vop_reg afbdc_axi_ctrl;
28947+
28948+	/* BCSH */
28949+	struct vop_reg bcsh_brightness;
28950+	struct vop_reg bcsh_contrast;
28951+	struct vop_reg bcsh_sat_con;
28952+	struct vop_reg bcsh_sin_hue;
28953+	struct vop_reg bcsh_cos_hue;
28954+	struct vop_reg bcsh_r2y_csc_mode;
28955+	struct vop_reg bcsh_r2y_en;
28956+	struct vop_reg bcsh_y2r_csc_mode;
28957+	struct vop_reg bcsh_y2r_en;
28958+	struct vop_reg bcsh_color_bar;
28959+	struct vop_reg bcsh_out_mode;
28960+	struct vop_reg bcsh_en;
28961+
28962+	/* HDR */
28963+	struct vop_reg level2_overlay_en;
28964+	struct vop_reg alpha_hard_calc;
28965+	struct vop_reg hdr2sdr_en;
28966+	struct vop_reg hdr2sdr_en_win0_csc;
28967+	struct vop_reg hdr2sdr_src_min;
28968+	struct vop_reg hdr2sdr_src_max;
28969+	struct vop_reg hdr2sdr_normfaceetf;
28970+	struct vop_reg hdr2sdr_dst_min;
28971+	struct vop_reg hdr2sdr_dst_max;
28972+	struct vop_reg hdr2sdr_normfacgamma;
28973+
28974+	struct vop_reg bt1886eotf_pre_conv_en;
28975+	struct vop_reg rgb2rgb_pre_conv_en;
28976+	struct vop_reg rgb2rgb_pre_conv_mode;
28977+	struct vop_reg st2084oetf_pre_conv_en;
28978+	struct vop_reg bt1886eotf_post_conv_en;
28979+	struct vop_reg rgb2rgb_post_conv_en;
28980+	struct vop_reg rgb2rgb_post_conv_mode;
28981+	struct vop_reg st2084oetf_post_conv_en;
28982+	struct vop_reg win_csc_mode_sel;
28983+
28984+	/* MCU OUTPUT */
28985+	struct vop_reg mcu_pix_total;
28986+	struct vop_reg mcu_cs_pst;
28987+	struct vop_reg mcu_cs_pend;
28988+	struct vop_reg mcu_rw_pst;
28989+	struct vop_reg mcu_rw_pend;
28990+	struct vop_reg mcu_clk_sel;
28991+	struct vop_reg mcu_hold_mode;
28992+	struct vop_reg mcu_frame_st;
28993+	struct vop_reg mcu_rs;
28994+	struct vop_reg mcu_bypass;
28995+	struct vop_reg mcu_type;
28996+	struct vop_reg mcu_rw_bypass_port;
28997+
28998+	/* bt1120 */
28999+	struct vop_reg bt1120_yc_swap;
29000+	struct vop_reg bt1120_en;
29001+
29002+	struct vop_reg reg_done_frm;
29003+	struct vop_reg cfg_done;
29004 };
29005 
29006 struct vop_intr {
29007 	const int *intrs;
29008 	uint32_t nintrs;
29009-
29010 	struct vop_reg line_flag_num[2];
29011 	struct vop_reg enable;
29012 	struct vop_reg clear;
29013@@ -152,19 +385,129 @@ struct vop_scl_regs {
29014 	struct vop_reg scale_cbcr_y;
29015 };
29016 
29017-struct vop_yuv2yuv_phy {
29018-	struct vop_reg y2r_coefficients[NUM_YUV2YUV_COEFFICIENTS];
29019+struct vop_afbc {
29020+	struct vop_reg enable;
29021+	struct vop_reg win_sel;
29022+	struct vop_reg format;
29023+	struct vop_reg rb_swap;
29024+	struct vop_reg uv_swap;
29025+	struct vop_reg auto_gating_en;
29026+	struct vop_reg rotate;
29027+	struct vop_reg block_split_en;
29028+	struct vop_reg pic_vir_width;
29029+	struct vop_reg tile_num;
29030+	struct vop_reg pic_offset;
29031+	struct vop_reg pic_size;
29032+	struct vop_reg dsp_offset;
29033+	struct vop_reg transform_offset;
29034+	struct vop_reg hdr_ptr;
29035+	struct vop_reg half_block_en;
29036+	struct vop_reg xmirror;
29037+	struct vop_reg ymirror;
29038+	struct vop_reg rotate_270;
29039+	struct vop_reg rotate_90;
29040+	struct vop_reg rstn;
29041+};
29042+
29043+struct vop_csc_table {
29044+	const uint32_t *y2r_bt601;
29045+	const uint32_t *y2r_bt601_12_235;
29046+	const uint32_t *y2r_bt601_10bit;
29047+	const uint32_t *y2r_bt601_10bit_12_235;
29048+	const uint32_t *r2y_bt601;
29049+	const uint32_t *r2y_bt601_12_235;
29050+	const uint32_t *r2y_bt601_10bit;
29051+	const uint32_t *r2y_bt601_10bit_12_235;
29052+
29053+	const uint32_t *y2r_bt709;
29054+	const uint32_t *y2r_bt709_10bit;
29055+	const uint32_t *r2y_bt709;
29056+	const uint32_t *r2y_bt709_10bit;
29057+
29058+	const uint32_t *y2r_bt2020;
29059+	const uint32_t *r2y_bt2020;
29060+
29061+	const uint32_t *r2r_bt709_to_bt2020;
29062+	const uint32_t *r2r_bt2020_to_bt709;
29063+};
29064+
29065+struct vop_hdr_table {
29066+	const uint32_t hdr2sdr_eetf_oetf_y0_offset;
29067+	const uint32_t hdr2sdr_eetf_oetf_y1_offset;
29068+	const uint32_t *hdr2sdr_eetf_yn;
29069+	const uint32_t *hdr2sdr_bt1886oetf_yn;
29070+	const uint32_t hdr2sdr_sat_y0_offset;
29071+	const uint32_t hdr2sdr_sat_y1_offset;
29072+	const uint32_t *hdr2sdr_sat_yn;
29073+
29074+	const uint32_t hdr2sdr_src_range_min;
29075+	const uint32_t hdr2sdr_src_range_max;
29076+	const uint32_t hdr2sdr_normfaceetf;
29077+	const uint32_t hdr2sdr_dst_range_min;
29078+	const uint32_t hdr2sdr_dst_range_max;
29079+	const uint32_t hdr2sdr_normfacgamma;
29080+
29081+	const uint32_t sdr2hdr_eotf_oetf_y0_offset;
29082+	const uint32_t sdr2hdr_eotf_oetf_y1_offset;
29083+	const uint32_t *sdr2hdr_bt1886eotf_yn_for_hlg_hdr;
29084+	const uint32_t *sdr2hdr_bt1886eotf_yn_for_bt2020;
29085+	const uint32_t *sdr2hdr_bt1886eotf_yn_for_hdr;
29086+	const uint32_t *sdr2hdr_st2084oetf_yn_for_hlg_hdr;
29087+	const uint32_t *sdr2hdr_st2084oetf_yn_for_bt2020;
29088+	const uint32_t *sdr2hdr_st2084oetf_yn_for_hdr;
29089+	const uint32_t sdr2hdr_oetf_dx_dxpow1_offset;
29090+	const uint32_t *sdr2hdr_st2084oetf_dxn_pow2;
29091+	const uint32_t *sdr2hdr_st2084oetf_dxn;
29092+	const uint32_t sdr2hdr_oetf_xn1_offset;
29093+	const uint32_t *sdr2hdr_st2084oetf_xn;
29094+};
29095+
29096+enum {
29097+	VOP_CSC_Y2R_BT601,
29098+	VOP_CSC_Y2R_BT709,
29099+	VOP_CSC_Y2R_BT2020,
29100+	VOP_CSC_R2Y_BT601,
29101+	VOP_CSC_R2Y_BT709,
29102+	VOP_CSC_R2Y_BT2020,
29103+	VOP_CSC_R2R_BT2020_TO_BT709,
29104+	VOP_CSC_R2R_BT709_TO_2020,
29105+};
29106+
29107+enum _vop_overlay_mode {
29108+	VOP_RGB_DOMAIN,
29109+	VOP_YUV_DOMAIN
29110+};
29111+
29112+enum _vop_sdr2hdr_func {
29113+	SDR2HDR_FOR_BT2020,
29114+	SDR2HDR_FOR_HDR,
29115+	SDR2HDR_FOR_HLG_HDR,
29116+};
29117+
29118+enum _vop_rgb2rgb_conv_mode {
29119+	BT709_TO_BT2020,
29120+	BT2020_TO_BT709,
29121+};
29122+
29123+enum _MCU_IOCTL {
29124+	MCU_WRCMD = 0,
29125+	MCU_WRDATA,
29126+	MCU_SETBYPASS,
29127 };
29128 
29129 struct vop_win_phy {
29130 	const struct vop_scl_regs *scl;
29131 	const uint32_t *data_formats;
29132 	uint32_t nformats;
29133-	const uint64_t *format_modifiers;
29134 
29135-	struct vop_reg enable;
29136 	struct vop_reg gate;
29137+	struct vop_reg enable;
29138 	struct vop_reg format;
29139+	struct vop_reg fmt_10;
29140+	struct vop_reg fmt_yuyv;
29141+	struct vop_reg csc_mode;
29142+	struct vop_reg xmirror;
29143+	struct vop_reg ymirror;
29144 	struct vop_reg rb_swap;
29145 	struct vop_reg act_info;
29146 	struct vop_reg dsp_info;
29147@@ -173,56 +516,609 @@ struct vop_win_phy {
29148 	struct vop_reg uv_mst;
29149 	struct vop_reg yrgb_vir;
29150 	struct vop_reg uv_vir;
29151-	struct vop_reg y_mir_en;
29152-	struct vop_reg x_mir_en;
29153 
29154+	struct vop_reg channel;
29155 	struct vop_reg dst_alpha_ctl;
29156 	struct vop_reg src_alpha_ctl;
29157-	struct vop_reg alpha_pre_mul;
29158 	struct vop_reg alpha_mode;
29159 	struct vop_reg alpha_en;
29160-	struct vop_reg channel;
29161+	struct vop_reg alpha_pre_mul;
29162+	struct vop_reg global_alpha_val;
29163+	struct vop_reg key_color;
29164+	struct vop_reg key_en;
29165 };
29166 
29167-struct vop_win_yuv2yuv_data {
29168+struct vop_win_data {
29169 	uint32_t base;
29170-	const struct vop_yuv2yuv_phy *phy;
29171+	enum drm_plane_type type;
29172+	const struct vop_win_phy *phy;
29173+	const struct vop_win_phy **area;
29174+	const uint64_t *format_modifiers;
29175+	const struct vop_csc *csc;
29176+	unsigned int area_size;
29177+	u64 feature;
29178+};
29179+
29180+struct vop2_cluster_regs {
29181+	struct vop_reg enable;
29182+	struct vop_reg afbc_enable;
29183+	struct vop_reg lb_mode;
29184+};
29185+
29186+struct vop2_scl_regs {
29187+	struct vop_reg scale_yrgb_x;
29188+	struct vop_reg scale_yrgb_y;
29189+	struct vop_reg scale_cbcr_x;
29190+	struct vop_reg scale_cbcr_y;
29191+	struct vop_reg yrgb_hor_scl_mode;
29192+	struct vop_reg yrgb_hscl_filter_mode;
29193+	struct vop_reg yrgb_ver_scl_mode;
29194+	struct vop_reg yrgb_vscl_filter_mode;
29195+	struct vop_reg cbcr_ver_scl_mode;
29196+	struct vop_reg cbcr_hscl_filter_mode;
29197+	struct vop_reg cbcr_hor_scl_mode;
29198+	struct vop_reg cbcr_vscl_filter_mode;
29199+	struct vop_reg vsd_cbcr_gt2;
29200+	struct vop_reg vsd_cbcr_gt4;
29201+	struct vop_reg vsd_yrgb_gt2;
29202+	struct vop_reg vsd_yrgb_gt4;
29203+	struct vop_reg bic_coe_sel;
29204+};
29205+
29206+struct vop2_win_regs {
29207+	const struct vop2_scl_regs *scl;
29208+	const struct vop2_cluster_regs *cluster;
29209+	const struct vop_afbc *afbc;
29210+
29211+	struct vop_reg gate;
29212+	struct vop_reg enable;
29213+	struct vop_reg format;
29214+	struct vop_reg csc_mode;
29215+	struct vop_reg xmirror;
29216+	struct vop_reg ymirror;
29217+	struct vop_reg rb_swap;
29218+	struct vop_reg uv_swap;
29219+	struct vop_reg act_info;
29220+	struct vop_reg dsp_info;
29221+	struct vop_reg dsp_st;
29222+	struct vop_reg yrgb_mst;
29223+	struct vop_reg uv_mst;
29224+	struct vop_reg yrgb_vir;
29225+	struct vop_reg uv_vir;
29226+	struct vop_reg yuv_clip;
29227+	struct vop_reg lb_mode;
29228 	struct vop_reg y2r_en;
29229+	struct vop_reg r2y_en;
29230+	struct vop_reg channel;
29231+	struct vop_reg dst_alpha_ctl;
29232+	struct vop_reg src_alpha_ctl;
29233+	struct vop_reg alpha_mode;
29234+	struct vop_reg alpha_en;
29235+	struct vop_reg global_alpha_val;
29236+	struct vop_reg color_key;
29237+	struct vop_reg color_key_en;
29238+	struct vop_reg dither_up;
29239+	struct vop_reg axi_id;
29240+	struct vop_reg axi_yrgb_id;
29241+	struct vop_reg axi_uv_id;
29242 };
29243 
29244-struct vop_win_data {
29245+struct vop2_video_port_regs {
29246+	struct vop_reg cfg_done;
29247+	struct vop_reg overlay_mode;
29248+	struct vop_reg dsp_background;
29249+	struct vop_reg port_mux;
29250+	struct vop_reg out_mode;
29251+	struct vop_reg standby;
29252+	struct vop_reg dsp_interlace;
29253+	struct vop_reg dsp_filed_pol;
29254+	struct vop_reg dsp_data_swap;
29255+	struct vop_reg post_dsp_out_r2y;
29256+	struct vop_reg pre_scan_htiming;
29257+	struct vop_reg htotal_pw;
29258+	struct vop_reg hact_st_end;
29259+	struct vop_reg vtotal_pw;
29260+	struct vop_reg vact_st_end;
29261+	struct vop_reg vact_st_end_f1;
29262+	struct vop_reg vs_st_end_f1;
29263+	struct vop_reg hpost_st_end;
29264+	struct vop_reg vpost_st_end;
29265+	struct vop_reg vpost_st_end_f1;
29266+	struct vop_reg post_scl_factor;
29267+	struct vop_reg post_scl_ctrl;
29268+	struct vop_reg dither_down_sel;
29269+	struct vop_reg dither_down_mode;
29270+	struct vop_reg dither_down_en;
29271+	struct vop_reg pre_dither_down_en;
29272+	struct vop_reg dither_up_en;
29273+	struct vop_reg bg_dly;
29274+
29275+	struct vop_reg core_dclk_div;
29276+	struct vop_reg p2i_en;
29277+	struct vop_reg dual_channel_en;
29278+	struct vop_reg dual_channel_swap;
29279+	struct vop_reg dsp_lut_en;
29280+
29281+	struct vop_reg dclk_div2;
29282+	struct vop_reg dclk_div2_phase_lock;
29283+
29284+	struct vop_reg hdr10_en;
29285+	struct vop_reg hdr_lut_update_en;
29286+	struct vop_reg hdr_lut_mode;
29287+	struct vop_reg hdr_lut_mst;
29288+	struct vop_reg sdr2hdr_eotf_en;
29289+	struct vop_reg sdr2hdr_r2r_en;
29290+	struct vop_reg sdr2hdr_r2r_mode;
29291+	struct vop_reg sdr2hdr_oetf_en;
29292+	struct vop_reg sdr2hdr_bypass_en;
29293+	struct vop_reg sdr2hdr_auto_gating_en;
29294+	struct vop_reg sdr2hdr_path_en;
29295+	struct vop_reg hdr2sdr_en;
29296+	struct vop_reg hdr2sdr_bypass_en;
29297+	struct vop_reg hdr2sdr_auto_gating_en;
29298+	struct vop_reg hdr2sdr_src_min;
29299+	struct vop_reg hdr2sdr_src_max;
29300+	struct vop_reg hdr2sdr_normfaceetf;
29301+	struct vop_reg hdr2sdr_dst_min;
29302+	struct vop_reg hdr2sdr_dst_max;
29303+	struct vop_reg hdr2sdr_normfacgamma;
29304+	uint32_t hdr2sdr_eetf_oetf_y0_offset;
29305+	uint32_t hdr2sdr_sat_y0_offset;
29306+	uint32_t sdr2hdr_eotf_oetf_y0_offset;
29307+	uint32_t sdr2hdr_oetf_dx_pow1_offset;
29308+	uint32_t sdr2hdr_oetf_xn1_offset;
29309+	struct vop_reg hdr_src_color_ctrl;
29310+	struct vop_reg hdr_dst_color_ctrl;
29311+	struct vop_reg hdr_src_alpha_ctrl;
29312+	struct vop_reg hdr_dst_alpha_ctrl;
29313+	struct vop_reg bg_mix_ctrl;
29314+
29315+	/* BCSH */
29316+	struct vop_reg bcsh_brightness;
29317+	struct vop_reg bcsh_contrast;
29318+	struct vop_reg bcsh_sat_con;
29319+	struct vop_reg bcsh_sin_hue;
29320+	struct vop_reg bcsh_cos_hue;
29321+	struct vop_reg bcsh_r2y_csc_mode;
29322+	struct vop_reg bcsh_r2y_en;
29323+	struct vop_reg bcsh_y2r_csc_mode;
29324+	struct vop_reg bcsh_y2r_en;
29325+	struct vop_reg bcsh_out_mode;
29326+	struct vop_reg bcsh_en;
29327+
29328+	/* 3d lut */
29329+	struct vop_reg cubic_lut_en;
29330+	struct vop_reg cubic_lut_update_en;
29331+	struct vop_reg cubic_lut_mst;
29332+
29333+	/* cru */
29334+	struct vop_reg dclk_core_div;
29335+	struct vop_reg dclk_out_div;
29336+	struct vop_reg dclk_src_sel;
29337+
29338+	struct vop_reg splice_en;
29339+
29340+	struct vop_reg edpi_wms_hold_en;
29341+	struct vop_reg edpi_te_en;
29342+	struct vop_reg edpi_wms_fs;
29343+	struct vop_reg gamma_update_en;
29344+	struct vop_reg lut_dma_rid;
29345+};
29346+
29347+struct vop2_power_domain_regs {
29348+	struct vop_reg pd;
29349+	struct vop_reg status;
29350+	struct vop_reg bisr_en_status;
29351+	struct vop_reg pmu_status;
29352+};
29353+
29354+struct vop2_dsc_regs {
29355+	/* DSC SYS CTRL */
29356+	struct vop_reg dsc_port_sel;
29357+	struct vop_reg dsc_man_mode;
29358+	struct vop_reg dsc_interface_mode;
29359+	struct vop_reg dsc_pixel_num;
29360+	struct vop_reg dsc_pxl_clk_div;
29361+	struct vop_reg dsc_cds_clk_div;
29362+	struct vop_reg dsc_txp_clk_div;
29363+	struct vop_reg dsc_init_dly_mode;
29364+	struct vop_reg dsc_scan_en;
29365+	struct vop_reg dsc_halt_en;
29366+	struct vop_reg rst_deassert;
29367+	struct vop_reg dsc_flush;
29368+	struct vop_reg dsc_cfg_done;
29369+	struct vop_reg dsc_init_dly_num;
29370+	struct vop_reg scan_timing_para_imd_en;
29371+	struct vop_reg dsc_htotal_pw;
29372+	struct vop_reg dsc_hact_st_end;
29373+	struct vop_reg dsc_vtotal_pw;
29374+	struct vop_reg dsc_vact_st_end;
29375+	struct vop_reg dsc_error_status;
29376+
29377+	/* DSC encoder */
29378+	struct vop_reg dsc_pps0_3;
29379+	struct vop_reg dsc_en;
29380+	struct vop_reg dsc_rbit;
29381+	struct vop_reg dsc_rbyt;
29382+	struct vop_reg dsc_flal;
29383+	struct vop_reg dsc_mer;
29384+	struct vop_reg dsc_epb;
29385+	struct vop_reg dsc_epl;
29386+	struct vop_reg dsc_nslc;
29387+	struct vop_reg dsc_sbo;
29388+	struct vop_reg dsc_ifep;
29389+	struct vop_reg dsc_pps_upd;
29390+	struct vop_reg dsc_status;
29391+	struct vop_reg dsc_ecw;
29392+};
29393+
29394+struct vop2_wb_regs {
29395+	struct vop_reg enable;
29396+	struct vop_reg format;
29397+	struct vop_reg dither_en;
29398+	struct vop_reg r2y_en;
29399+	struct vop_reg yrgb_mst;
29400+	struct vop_reg uv_mst;
29401+	struct vop_reg vp_id;
29402+	struct vop_reg fifo_throd;
29403+	struct vop_reg scale_x_factor;
29404+	struct vop_reg scale_x_en;
29405+	struct vop_reg scale_y_en;
29406+	struct vop_reg axi_yrgb_id;
29407+	struct vop_reg axi_uv_id;
29408+};
29409+
29410+struct vop2_power_domain_data {
29411+	uint8_t id;
29412+	uint8_t parent_id;
29413+	const struct vop2_power_domain_regs *regs;
29414+};
29415+
29416+/*
29417+ * connector interface(RGB/HDMI/eDP/DP/MIPI) data
29418+ */
29419+struct vop2_connector_if_data {
29420+	u32 id;
29421+	const char *clk_src_name;
29422+	const char *clk_parent_name;
29423+	const char *pixclk_name;
29424+	const char *dclk_name;
29425+	u32 post_proc_div_shift;
29426+	u32 if_div_shift;
29427+	u32 if_div_yuv420_shift;
29428+	u32 bus_div_shift;
29429+	u32 pixel_clk_div_shift;
29430+};
29431+
29432+struct vop2_win_data {
29433+	const char *name;
29434+	uint8_t phys_id;
29435+	uint8_t splice_win_id;
29436+	uint8_t pd_id;
29437+	uint8_t axi_id;
29438+	uint8_t axi_yrgb_id;
29439+	uint8_t axi_uv_id;
29440+
29441 	uint32_t base;
29442-	const struct vop_win_phy *phy;
29443 	enum drm_plane_type type;
29444+
29445+	uint32_t nformats;
29446+	const uint32_t *formats;
29447+	const uint64_t *format_modifiers;
29448+	const unsigned int supported_rotations;
29449+
29450+	const struct vop2_win_regs *regs;
29451+	const struct vop2_win_regs **area;
29452+	unsigned int area_size;
29453+
29454+	/*
29455+	 * vertical/horizontal scale up/down filter mode
29456+	 */
29457+	const u8 hsu_filter_mode;
29458+	const u8 hsd_filter_mode;
29459+	const u8 vsu_filter_mode;
29460+	const u8 vsd_filter_mode;
29461+	/**
29462+	 * @layer_sel_id: defined by register OVERLAY_LAYER_SEL of VOP2
29463+	 */
29464+	int layer_sel_id;
29465+	uint64_t feature;
29466+
29467+	unsigned int max_upscale_factor;
29468+	unsigned int max_downscale_factor;
29469+	const uint8_t dly[VOP2_DLY_MODE_MAX];
29470+};
29471+
29472+struct dsc_error_info {
29473+	u32 dsc_error_val;
29474+	char dsc_error_info[50];
29475+};
29476+
29477+struct vop2_dsc_data {
29478+	uint8_t id;
29479+	uint8_t pd_id;
29480+	uint8_t max_slice_num;
29481+	uint8_t max_linebuf_depth;	/* used to generate the bitstream */
29482+	uint8_t min_bits_per_pixel;	/* bit num after encoder compress */
29483+	const char *dsc_txp_clk_src_name;
29484+	const char *dsc_txp_clk_name;
29485+	const char *dsc_pxl_clk_name;
29486+	const char *dsc_cds_clk_name;
29487+	const struct vop2_dsc_regs *regs;
29488+};
29489+
29490+struct vop2_wb_data {
29491+	uint32_t nformats;
29492+	const uint32_t *formats;
29493+	struct vop_rect max_output;
29494+	const struct vop2_wb_regs *regs;
29495+	uint32_t fifo_depth;
29496+};
29497+
29498+struct vop2_video_port_data {
29499+	char id;
29500+	uint8_t splice_vp_id;
29501+	uint16_t lut_dma_rid;
29502+	uint32_t feature;
29503+	uint64_t soc_id[VOP2_SOC_VARIANT];
29504+	uint16_t gamma_lut_len;
29505+	uint16_t cubic_lut_len;
29506+	unsigned long dclk_max;
29507+	struct vop_rect max_output;
29508+	const u8 pre_scan_max_dly[4];
29509+	const struct vop_intr *intr;
29510+	const struct vop_hdr_table *hdr_table;
29511+	const struct vop2_video_port_regs *regs;
29512+};
29513+
29514+struct vop2_layer_regs {
29515+	struct vop_reg layer_sel;
29516+};
29517+
29518+/**
29519+ * struct vop2_layer_data - The logic graphic layer in vop2
29520+ *
29521+ * The zorder:
29522+ *   LAYERn
29523+ *   LAYERn-1
29524+ *     .
29525+ *     .
29526+ *     .
29527+ *   LAYER5
29528+ *   LAYER4
29529+ *   LAYER3
29530+ *   LAYER2
29531+ *   LAYER1
29532+ *   LAYER0
29533+ *
29534+ * Each layer can select a unused window as input than feed to
29535+ * mixer for overlay.
29536+ *
29537+ * The pipeline in vop2:
29538+ *
29539+ * win-->layer-->mixer-->vp--->connector(RGB/LVDS/HDMI/MIPI)
29540+ *
29541+ */
29542+struct vop2_layer_data {
29543+	char id;
29544+	const struct vop2_layer_regs *regs;
29545+};
29546+
29547+struct vop_grf_ctrl {
29548+	struct vop_reg grf_dclk_inv;
29549+	struct vop_reg grf_bt1120_clk_inv;
29550+	struct vop_reg grf_bt656_clk_inv;
29551+	struct vop_reg grf_edp0_en;
29552+	struct vop_reg grf_edp1_en;
29553+	struct vop_reg grf_hdmi0_en;
29554+	struct vop_reg grf_hdmi1_en;
29555+	struct vop_reg grf_hdmi0_dsc_en;
29556+	struct vop_reg grf_hdmi1_dsc_en;
29557+	struct vop_reg grf_hdmi0_pin_pol;
29558+	struct vop_reg grf_hdmi1_pin_pol;
29559 };
29560 
29561 struct vop_data {
29562-	uint32_t version;
29563+	const struct vop_reg_data *init_table;
29564+	unsigned int table_size;
29565+	const struct vop_ctrl *ctrl;
29566 	const struct vop_intr *intr;
29567-	const struct vop_common *common;
29568-	const struct vop_misc *misc;
29569-	const struct vop_modeset *modeset;
29570-	const struct vop_output *output;
29571-	const struct vop_afbc *afbc;
29572-	const struct vop_win_yuv2yuv_data *win_yuv2yuv;
29573 	const struct vop_win_data *win;
29574+	const struct vop_csc_table *csc_table;
29575+	const struct vop_hdr_table *hdr_table;
29576+	const struct vop_grf_ctrl *grf_ctrl;
29577 	unsigned int win_size;
29578-	unsigned int lut_size;
29579-
29580-#define VOP_FEATURE_OUTPUT_RGB10	BIT(0)
29581-#define VOP_FEATURE_INTERNAL_RGB	BIT(1)
29582+	uint32_t version;
29583+	struct vop_rect max_input;
29584+	struct vop_rect max_output;
29585 	u64 feature;
29586+	u64 soc_id;
29587+	u8 vop_id;
29588 };
29589 
29590+struct vop2_ctrl {
29591+	struct vop_reg cfg_done_en;
29592+	struct vop_reg wb_cfg_done;
29593+	struct vop_reg auto_gating_en;
29594+	struct vop_reg ovl_cfg_done_port;
29595+	struct vop_reg ovl_port_mux_cfg_done_imd;
29596+	struct vop_reg ovl_port_mux_cfg;
29597+	struct vop_reg if_ctrl_cfg_done_imd;
29598+	struct vop_reg version;
29599+	struct vop_reg standby;
29600+	struct vop_reg dma_stop;
29601+	struct vop_reg lut_dma_en;
29602+	struct vop_reg axi_outstanding_max_num;
29603+	struct vop_reg axi_max_outstanding_en;
29604+	struct vop_reg hdmi_dclk_out_en;
29605+	struct vop_reg rgb_en;
29606+	struct vop_reg hdmi0_en;
29607+	struct vop_reg hdmi1_en;
29608+	struct vop_reg dp0_en;
29609+	struct vop_reg dp1_en;
29610+	struct vop_reg edp0_en;
29611+	struct vop_reg edp1_en;
29612+	struct vop_reg mipi0_en;
29613+	struct vop_reg mipi1_en;
29614+	struct vop_reg lvds0_en;
29615+	struct vop_reg lvds1_en;
29616+	struct vop_reg bt656_en;
29617+	struct vop_reg bt1120_en;
29618+	struct vop_reg dclk_pol;
29619+	struct vop_reg pin_pol;
29620+	struct vop_reg rgb_dclk_pol;
29621+	struct vop_reg rgb_pin_pol;
29622+	struct vop_reg lvds_dclk_pol;
29623+	struct vop_reg lvds_pin_pol;
29624+	struct vop_reg hdmi_dclk_pol;
29625+	struct vop_reg hdmi_pin_pol;
29626+	struct vop_reg edp_dclk_pol;
29627+	struct vop_reg edp_pin_pol;
29628+	struct vop_reg mipi_dclk_pol;
29629+	struct vop_reg mipi_pin_pol;
29630+	struct vop_reg dp0_dclk_pol;
29631+	struct vop_reg dp0_pin_pol;
29632+	struct vop_reg dp1_dclk_pol;
29633+	struct vop_reg dp1_pin_pol;
29634+
29635+	/* This will be reference by win_phy_id */
29636+	struct vop_reg win_vp_id[16];
29637+	struct vop_reg win_dly[16];
29638+
29639+	/* connector mux */
29640+	struct vop_reg rgb_mux;
29641+	struct vop_reg hdmi0_mux;
29642+	struct vop_reg hdmi1_mux;
29643+	struct vop_reg dp0_mux;
29644+	struct vop_reg dp1_mux;
29645+	struct vop_reg edp0_mux;
29646+	struct vop_reg edp1_mux;
29647+	struct vop_reg mipi0_mux;
29648+	struct vop_reg mipi1_mux;
29649+	struct vop_reg lvds0_mux;
29650+	struct vop_reg lvds1_mux;
29651+
29652+	struct vop_reg lvds_dual_en;
29653+	struct vop_reg lvds_dual_mode;
29654+	struct vop_reg lvds_dual_channel_swap;
29655+
29656+	struct vop_reg dp_dual_en;
29657+	struct vop_reg edp_dual_en;
29658+	struct vop_reg hdmi_dual_en;
29659+	struct vop_reg mipi_dual_en;
29660+
29661+	struct vop_reg hdmi0_dclk_div;
29662+	struct vop_reg hdmi0_pixclk_div;
29663+	struct vop_reg edp0_dclk_div;
29664+	struct vop_reg edp0_pixclk_div;
29665+
29666+	struct vop_reg hdmi1_dclk_div;
29667+	struct vop_reg hdmi1_pixclk_div;
29668+	struct vop_reg edp1_dclk_div;
29669+	struct vop_reg edp1_pixclk_div;
29670+
29671+	struct vop_reg mipi0_pixclk_div;
29672+	struct vop_reg mipi1_pixclk_div;
29673+	struct vop_reg mipi0_ds_mode;
29674+	struct vop_reg mipi1_ds_mode;
29675+
29676+	struct vop_reg cluster0_src_color_ctrl;
29677+	struct vop_reg cluster0_dst_color_ctrl;
29678+	struct vop_reg cluster0_src_alpha_ctrl;
29679+	struct vop_reg cluster0_dst_alpha_ctrl;
29680+	struct vop_reg src_color_ctrl;
29681+	struct vop_reg dst_color_ctrl;
29682+	struct vop_reg src_alpha_ctrl;
29683+	struct vop_reg dst_alpha_ctrl;
29684+
29685+	struct vop_reg bt1120_yc_swap;
29686+	struct vop_reg bt656_yc_swap;
29687+	struct vop_reg gamma_port_sel;
29688+	struct vop_reg pd_off_imd;
29689+
29690+	struct vop_reg otp_en;
29691+	struct vop_reg reg_done_frm;
29692+	struct vop_reg cfg_done;
29693+};
29694+
29695+/**
29696+ * VOP2 data structe
29697+ *
29698+ * @version: VOP IP version
29699+ * @win_size: hardware win number
29700+ */
29701+struct vop2_data {
29702+	uint32_t version;
29703+	uint32_t feature;
29704+	uint8_t nr_dscs;
29705+	uint8_t nr_dsc_ecw;
29706+	uint8_t nr_dsc_buffer_flow;
29707+	uint8_t nr_vps;
29708+	uint8_t nr_mixers;
29709+	uint8_t nr_layers;
29710+	uint8_t nr_axi_intr;
29711+	uint8_t nr_gammas;
29712+	uint8_t nr_conns;
29713+	uint8_t nr_pds;
29714+	uint8_t nr_mem_pgs;
29715+	bool delayed_pd;
29716+	const struct vop_intr *axi_intr;
29717+	const struct vop2_ctrl *ctrl;
29718+	const struct vop2_dsc_data *dsc;
29719+	const struct dsc_error_info *dsc_error_ecw;
29720+	const struct dsc_error_info *dsc_error_buffer_flow;
29721+	const struct vop2_win_data *win;
29722+	const struct vop2_video_port_data *vp;
29723+	const struct vop2_connector_if_data *conn;
29724+	const struct vop2_wb_data *wb;
29725+	const struct vop2_layer_data *layer;
29726+	const struct vop2_power_domain_data *pd;
29727+	const struct vop2_power_domain_data *mem_pg;
29728+	const struct vop_csc_table *csc_table;
29729+	const struct vop_hdr_table *hdr_table;
29730+	const struct vop_grf_ctrl *sys_grf;
29731+	const struct vop_grf_ctrl *grf;
29732+	const struct vop_grf_ctrl *vo0_grf;
29733+	const struct vop_grf_ctrl *vo1_grf;
29734+	struct vop_rect max_input;
29735+	struct vop_rect max_output;
29736+
29737+	unsigned int win_size;
29738+};
29739+
29740+#define CVBS_PAL_VDISPLAY		288
29741+
29742 /* interrupt define */
29743-#define DSP_HOLD_VALID_INTR		(1 << 0)
29744-#define FS_INTR				(1 << 1)
29745-#define LINE_FLAG_INTR			(1 << 2)
29746-#define BUS_ERROR_INTR			(1 << 3)
29747+#define DSP_HOLD_VALID_INTR		BIT(0)
29748+#define FS_INTR				BIT(1)
29749+#define LINE_FLAG_INTR			BIT(2)
29750+#define BUS_ERROR_INTR			BIT(3)
29751+#define FS_NEW_INTR			BIT(4)
29752+#define ADDR_SAME_INTR			BIT(5)
29753+#define LINE_FLAG1_INTR			BIT(6)
29754+#define WIN0_EMPTY_INTR			BIT(7)
29755+#define WIN1_EMPTY_INTR			BIT(8)
29756+#define WIN2_EMPTY_INTR			BIT(9)
29757+#define WIN3_EMPTY_INTR			BIT(10)
29758+#define HWC_EMPTY_INTR			BIT(11)
29759+#define POST_BUF_EMPTY_INTR		BIT(12)
29760+#define PWM_GEN_INTR			BIT(13)
29761+#define DMA_FINISH_INTR			BIT(14)
29762+#define FS_FIELD_INTR			BIT(15)
29763+#define FE_INTR				BIT(16)
29764+#define WB_UV_FIFO_FULL_INTR		BIT(17)
29765+#define WB_YRGB_FIFO_FULL_INTR		BIT(18)
29766+#define WB_COMPLETE_INTR		BIT(19)
29767 
29768 #define INTR_MASK			(DSP_HOLD_VALID_INTR | FS_INTR | \
29769-					 LINE_FLAG_INTR | BUS_ERROR_INTR)
29770-
29771+					 LINE_FLAG_INTR | BUS_ERROR_INTR | \
29772+					 FS_NEW_INTR | LINE_FLAG1_INTR | \
29773+					 WIN0_EMPTY_INTR | WIN1_EMPTY_INTR | \
29774+					 WIN2_EMPTY_INTR | WIN3_EMPTY_INTR | \
29775+					 HWC_EMPTY_INTR | \
29776+					 POST_BUF_EMPTY_INTR | \
29777+					 DMA_FINISH_INTR | FS_FIELD_INTR | \
29778+					 FE_INTR)
29779 #define DSP_HOLD_VALID_INTR_EN(x)	((x) << 4)
29780 #define FS_INTR_EN(x)			((x) << 5)
29781 #define LINE_FLAG_INTR_EN(x)		((x) << 6)
29782@@ -256,14 +1152,19 @@ struct vop_data {
29783 /*
29784  * display output interface supported by rockchip lcdc
29785  */
29786-#define ROCKCHIP_OUT_MODE_P888	0
29787-#define ROCKCHIP_OUT_MODE_P666	1
29788-#define ROCKCHIP_OUT_MODE_P565	2
29789+#define ROCKCHIP_OUT_MODE_P888		0
29790+#define ROCKCHIP_OUT_MODE_BT1120	0
29791+#define ROCKCHIP_OUT_MODE_P666		1
29792+#define ROCKCHIP_OUT_MODE_P565		2
29793+#define ROCKCHIP_OUT_MODE_BT656		5
29794+#define ROCKCHIP_OUT_MODE_S888		8
29795+#define ROCKCHIP_OUT_MODE_S888_DUMMY	12
29796+#define ROCKCHIP_OUT_MODE_YUV420	14
29797 /* for use special outface */
29798-#define ROCKCHIP_OUT_MODE_AAAA	15
29799+#define ROCKCHIP_OUT_MODE_AAAA		15
29800 
29801-/* output flags */
29802-#define ROCKCHIP_OUTPUT_DSI_DUAL	BIT(0)
29803+#define ROCKCHIP_OUT_MODE_TYPE(x)	((x) >> 16)
29804+#define ROCKCHIP_OUT_MODE(x)		((x) & 0xffff)
29805 
29806 enum alpha_mode {
29807 	ALPHA_STRAIGHT,
29808@@ -292,6 +1193,25 @@ enum factor_mode {
29809 	ALPHA_SRC,
29810 	ALPHA_SRC_INVERSE,
29811 	ALPHA_SRC_GLOBAL,
29812+	ALPHA_DST_GLOBAL,
29813+};
29814+
29815+enum src_factor_mode {
29816+	SRC_FAC_ALPHA_ZERO,
29817+	SRC_FAC_ALPHA_ONE,
29818+	SRC_FAC_ALPHA_DST,
29819+	SRC_FAC_ALPHA_DST_INVERSE,
29820+	SRC_FAC_ALPHA_SRC,
29821+	SRC_FAC_ALPHA_SRC_GLOBAL,
29822+};
29823+
29824+enum dst_factor_mode {
29825+	DST_FAC_ALPHA_ZERO,
29826+	DST_FAC_ALPHA_ONE,
29827+	DST_FAC_ALPHA_SRC,
29828+	DST_FAC_ALPHA_SRC_INVERSE,
29829+	DST_FAC_ALPHA_DST,
29830+	DST_FAC_ALPHA_DST_GLOBAL,
29831 };
29832 
29833 enum scale_mode {
29834@@ -319,6 +1239,18 @@ enum scale_down_mode {
29835 	SCALE_DOWN_AVG = 0x1
29836 };
29837 
29838+enum vop2_scale_up_mode {
29839+	VOP2_SCALE_UP_NRST_NBOR,
29840+	VOP2_SCALE_UP_BIL,
29841+	VOP2_SCALE_UP_BIC,
29842+};
29843+
29844+enum vop2_scale_down_mode {
29845+	VOP2_SCALE_DOWN_NRST_NBOR,
29846+	VOP2_SCALE_DOWN_BIL,
29847+	VOP2_SCALE_DOWN_AVG,
29848+};
29849+
29850 enum dither_down_mode {
29851 	RGB888_TO_RGB565 = 0x0,
29852 	RGB888_TO_RGB666 = 0x1
29853@@ -332,9 +1264,11 @@ enum dither_down_mode_sel {
29854 enum vop_pol {
29855 	HSYNC_POSITIVE = 0,
29856 	VSYNC_POSITIVE = 1,
29857-	DEN_NEGATIVE   = 2
29858+	DEN_NEGATIVE   = 2,
29859+	DCLK_INVERT    = 3
29860 };
29861 
29862+
29863 #define FRAC_16_16(mult, div)    (((mult) << 16) / (div))
29864 #define SCL_FT_DEFAULT_FIXPOINT_SHIFT	12
29865 #define SCL_MAX_VSKIPLINES		4
29866@@ -359,7 +1293,7 @@ static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h,
29867 {
29868 	int act_height;
29869 
29870-	act_height = DIV_ROUND_UP(src_h, vskiplines);
29871+	act_height = (src_h + vskiplines - 1) / vskiplines;
29872 
29873 	if (act_height == dst_h)
29874 		return GET_SCL_FT_BILI_DN(src_h, dst_h) / vskiplines;
29875@@ -409,5 +1343,17 @@ static inline int scl_vop_cal_lb_mode(int width, bool is_yuv)
29876 	return lb_mode;
29877 }
29878 
29879+static inline int us_to_vertical_line(struct drm_display_mode *mode, int us)
29880+{
29881+	return us * mode->clock / mode->htotal / 1000;
29882+}
29883+
29884+static inline int interpolate(int x1, int y1, int x2, int y2, int x)
29885+{
29886+	return y1 + (y2 - y1) * (x - x1) / (x2 - x1);
29887+}
29888+
29889+extern void vop2_standby(struct drm_crtc *crtc, bool standby);
29890 extern const struct component_ops vop_component_ops;
29891+extern const struct component_ops vop2_component_ops;
29892 #endif /* _ROCKCHIP_DRM_VOP_H */
29893diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
29894index e2487937c..ca2838db8 100644
29895--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
29896+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
29897@@ -6,98 +6,141 @@
29898  *      Sandy Huang <hjc@rock-chips.com>
29899  */
29900 
29901-#include <linux/clk.h>
29902 #include <linux/component.h>
29903 #include <linux/mfd/syscon.h>
29904 #include <linux/of_graph.h>
29905 #include <linux/phy/phy.h>
29906-#include <linux/pinctrl/devinfo.h>
29907-#include <linux/platform_device.h>
29908-#include <linux/pm_runtime.h>
29909+#include <linux/of_platform.h>
29910 #include <linux/regmap.h>
29911-#include <linux/reset.h>
29912-
29913 #include <drm/drm_atomic_helper.h>
29914 #include <drm/drm_bridge.h>
29915-#include <drm/drm_dp_helper.h>
29916 #include <drm/drm_of.h>
29917 #include <drm/drm_panel.h>
29918 #include <drm/drm_probe_helper.h>
29919 #include <drm/drm_simple_kms_helper.h>
29920 
29921+#include <uapi/linux/videodev2.h>
29922+
29923 #include "rockchip_drm_drv.h"
29924 #include "rockchip_drm_vop.h"
29925-#include "rockchip_lvds.h"
29926 
29927-#define DISPLAY_OUTPUT_RGB		0
29928-#define DISPLAY_OUTPUT_LVDS		1
29929-#define DISPLAY_OUTPUT_DUAL_LVDS	2
29930+#define HIWORD_UPDATE(v, h, l)  (((v) << (l)) | (GENMASK(h, l) << 16))
29931+
29932+#define PX30_GRF_PD_VO_CON1		0x0438
29933+#define PX30_LVDS_SELECT(x)		HIWORD_UPDATE(x, 14, 13)
29934+#define PX30_LVDS_MODE_EN(x)		HIWORD_UPDATE(x, 12, 12)
29935+#define PX30_LVDS_MSBSEL(x)		HIWORD_UPDATE(x, 11, 11)
29936+#define PX30_LVDS_P2S_EN(x)		HIWORD_UPDATE(x,  6,  6)
29937+#define PX30_LVDS_VOP_SEL(x)		HIWORD_UPDATE(x,  1,  1)
29938+
29939+#define RK3126_GRF_LVDS_CON0		0x0150
29940+#define RK3126_LVDS_P2S_EN(x)		HIWORD_UPDATE(x,  9,  9)
29941+#define RK3126_LVDS_MODE_EN(x)		HIWORD_UPDATE(x,  6,  6)
29942+#define RK3126_LVDS_MSBSEL(x)		HIWORD_UPDATE(x,  3,  3)
29943+#define RK3126_LVDS_SELECT(x)		HIWORD_UPDATE(x,  2,  1)
29944+
29945+#define RK3288_GRF_SOC_CON6		0x025c
29946+#define RK3288_LVDS_LCDC_SEL(x)		HIWORD_UPDATE(x,  3,  3)
29947+#define RK3288_GRF_SOC_CON7		0x0260
29948+#define RK3288_LVDS_PWRDWN(x)		HIWORD_UPDATE(x, 15, 15)
29949+#define RK3288_LVDS_CON_ENABLE_2(x)	HIWORD_UPDATE(x, 12, 12)
29950+#define RK3288_LVDS_CON_ENABLE_1(x)	HIWORD_UPDATE(x, 11, 11)
29951+#define RK3288_LVDS_CON_DEN_POL(x)	HIWORD_UPDATE(x, 10, 10)
29952+#define RK3288_LVDS_CON_HS_POL(x)	HIWORD_UPDATE(x,  9,  9)
29953+#define RK3288_LVDS_CON_CLKINV(x)	HIWORD_UPDATE(x,  8,  8)
29954+#define RK3288_LVDS_CON_STARTPHASE(x)	HIWORD_UPDATE(x,  7,  7)
29955+#define RK3288_LVDS_CON_TTL_EN(x)	HIWORD_UPDATE(x,  6,  6)
29956+#define RK3288_LVDS_CON_STARTSEL(x)	HIWORD_UPDATE(x,  5,  5)
29957+#define RK3288_LVDS_CON_CHASEL(x)	HIWORD_UPDATE(x,  4,  4)
29958+#define RK3288_LVDS_CON_MSBSEL(x)	HIWORD_UPDATE(x,  3,  3)
29959+#define RK3288_LVDS_CON_SELECT(x)	HIWORD_UPDATE(x,  2,  0)
29960+
29961+#define RK3368_GRF_SOC_CON7		0x041c
29962+#define RK3368_LVDS_SELECT(x)		HIWORD_UPDATE(x, 14, 13)
29963+#define RK3368_LVDS_MODE_EN(x)		HIWORD_UPDATE(x, 12, 12)
29964+#define RK3368_LVDS_MSBSEL(x)		HIWORD_UPDATE(x, 11, 11)
29965+#define RK3368_LVDS_P2S_EN(x)		HIWORD_UPDATE(x,  6,  6)
29966+
29967+#define RK3568_GRF_VO_CON0		0x0360
29968+#define RK3568_LVDS1_SELECT(x)		HIWORD_UPDATE(x, 13, 12)
29969+#define RK3568_LVDS1_MSBSEL(x)		HIWORD_UPDATE(x, 11, 11)
29970+#define RK3568_LVDS0_SELECT(x)		HIWORD_UPDATE(x,  5,  4)
29971+#define RK3568_LVDS0_MSBSEL(x)		HIWORD_UPDATE(x,  3,  3)
29972+#define RK3568_GRF_VO_CON2		0x0368
29973+#define RK3568_LVDS0_DCLK_INV_SEL(x)	HIWORD_UPDATE(x,  9,  9)
29974+#define RK3568_LVDS0_DCLK_DIV2_SEL(x)	HIWORD_UPDATE(x,  8,  8)
29975+#define RK3568_LVDS0_MODE_EN(x)		HIWORD_UPDATE(x,  1,  1)
29976+#define RK3568_LVDS0_P2S_EN(x)		HIWORD_UPDATE(x,  0,  0)
29977+#define RK3568_GRF_VO_CON3		0x036c
29978+#define RK3568_LVDS1_DCLK_INV_SEL(x)	HIWORD_UPDATE(x,  9,  9)
29979+#define RK3568_LVDS1_DCLK_DIV2_SEL(x)	HIWORD_UPDATE(x,  8,  8)
29980+#define RK3568_LVDS1_MODE_EN(x)		HIWORD_UPDATE(x,  1,  1)
29981+#define RK3568_LVDS1_P2S_EN(x)		HIWORD_UPDATE(x,  0,  0)
29982+
29983+enum lvds_format {
29984+	LVDS_8BIT_MODE_FORMAT_1,
29985+	LVDS_8BIT_MODE_FORMAT_2,
29986+	LVDS_8BIT_MODE_FORMAT_3,
29987+	LVDS_6BIT_MODE,
29988+	LVDS_10BIT_MODE_FORMAT_1,
29989+	LVDS_10BIT_MODE_FORMAT_2,
29990+};
29991 
29992 struct rockchip_lvds;
29993 
29994-#define connector_to_lvds(c) \
29995-		container_of(c, struct rockchip_lvds, connector)
29996-
29997-#define encoder_to_lvds(c) \
29998-		container_of(c, struct rockchip_lvds, encoder)
29999-
30000-/**
30001- * rockchip_lvds_soc_data - rockchip lvds Soc private data
30002- * @probe: LVDS platform probe function
30003- * @helper_funcs: LVDS connector helper functions
30004- */
30005-struct rockchip_lvds_soc_data {
30006-	int (*probe)(struct platform_device *pdev, struct rockchip_lvds *lvds);
30007-	const struct drm_encoder_helper_funcs *helper_funcs;
30008+struct rockchip_lvds_funcs {
30009+	int (*probe)(struct rockchip_lvds *lvds);
30010+	void (*enable)(struct rockchip_lvds *lvds);
30011+	void (*disable)(struct rockchip_lvds *lvds);
30012 };
30013 
30014 struct rockchip_lvds {
30015+	int id;
30016 	struct device *dev;
30017-	void __iomem *regs;
30018+	struct phy *phy;
30019 	struct regmap *grf;
30020-	struct clk *pclk;
30021-	struct phy *dphy;
30022-	const struct rockchip_lvds_soc_data *soc_data;
30023-	int output; /* rgb lvds or dual lvds output */
30024-	int format; /* vesa or jeida format */
30025-	struct drm_device *drm_dev;
30026+	const struct rockchip_lvds_funcs *funcs;
30027+	enum lvds_format format;
30028+	bool data_swap;
30029+	bool dual_channel;
30030+	enum drm_lvds_dual_link_pixels pixel_order;
30031+
30032+	struct rockchip_lvds *primary;
30033+	struct rockchip_lvds *secondary;
30034+
30035 	struct drm_panel *panel;
30036 	struct drm_bridge *bridge;
30037 	struct drm_connector connector;
30038 	struct drm_encoder encoder;
30039-	struct dev_pin_info *pins;
30040+	struct drm_display_mode mode;
30041+	struct rockchip_drm_sub_dev sub_dev;
30042 };
30043 
30044-static inline void rk3288_writel(struct rockchip_lvds *lvds, u32 offset,
30045-				 u32 val)
30046+static inline struct rockchip_lvds *connector_to_lvds(struct drm_connector *c)
30047 {
30048-	writel_relaxed(val, lvds->regs + offset);
30049-	if (lvds->output == DISPLAY_OUTPUT_LVDS)
30050-		return;
30051-	writel_relaxed(val, lvds->regs + offset + RK3288_LVDS_CH1_OFFSET);
30052+	return container_of(c, struct rockchip_lvds, connector);
30053 }
30054 
30055-static inline int rockchip_lvds_name_to_format(const char *s)
30056+static inline struct rockchip_lvds *encoder_to_lvds(struct drm_encoder *e)
30057 {
30058-	if (strncmp(s, "jeida-18", 8) == 0)
30059-		return LVDS_JEIDA_18;
30060-	else if (strncmp(s, "jeida-24", 8) == 0)
30061-		return LVDS_JEIDA_24;
30062-	else if (strncmp(s, "vesa-24", 7) == 0)
30063-		return LVDS_VESA_24;
30064-
30065-	return -EINVAL;
30066+	return container_of(e, struct rockchip_lvds, encoder);
30067 }
30068 
30069-static inline int rockchip_lvds_name_to_output(const char *s)
30070+static int
30071+rockchip_lvds_atomic_connector_get_property(struct drm_connector *connector,
30072+					    const struct drm_connector_state *state,
30073+					    struct drm_property *property,
30074+					    uint64_t *val)
30075 {
30076-	if (strncmp(s, "rgb", 3) == 0)
30077-		return DISPLAY_OUTPUT_RGB;
30078-	else if (strncmp(s, "lvds", 4) == 0)
30079-		return DISPLAY_OUTPUT_LVDS;
30080-	else if (strncmp(s, "duallvds", 8) == 0)
30081-		return DISPLAY_OUTPUT_DUAL_LVDS;
30082+	struct rockchip_lvds *lvds = connector_to_lvds(connector);
30083+	struct rockchip_drm_private *private = connector->dev->dev_private;
30084 
30085+	if (property == private->connector_id_prop) {
30086+		*val = lvds->id;
30087+		return 0;
30088+	}
30089+
30090+	DRM_ERROR("failed to get rockchip LVDS property\n");
30091 	return -EINVAL;
30092 }
30093 
30094@@ -107,6 +150,7 @@ static const struct drm_connector_funcs rockchip_lvds_connector_funcs = {
30095 	.reset = drm_atomic_helper_connector_reset,
30096 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
30097 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
30098+	.atomic_get_property = rockchip_lvds_atomic_connector_get_property,
30099 };
30100 
30101 static int rockchip_lvds_connector_get_modes(struct drm_connector *connector)
30102@@ -122,504 +166,235 @@ struct drm_connector_helper_funcs rockchip_lvds_connector_helper_funcs = {
30103 	.get_modes = rockchip_lvds_connector_get_modes,
30104 };
30105 
30106-static int
30107-rockchip_lvds_encoder_atomic_check(struct drm_encoder *encoder,
30108-				   struct drm_crtc_state *crtc_state,
30109-				   struct drm_connector_state *conn_state)
30110-{
30111-	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
30112-
30113-	s->output_mode = ROCKCHIP_OUT_MODE_P888;
30114-	s->output_type = DRM_MODE_CONNECTOR_LVDS;
30115-
30116-	return 0;
30117-}
30118-
30119-static int rk3288_lvds_poweron(struct rockchip_lvds *lvds)
30120+static void
30121+rockchip_lvds_encoder_atomic_mode_set(struct drm_encoder *encoder,
30122+				      struct drm_crtc_state *crtc_state,
30123+				      struct drm_connector_state *conn_state)
30124 {
30125-	int ret;
30126-	u32 val;
30127-
30128-	ret = clk_enable(lvds->pclk);
30129-	if (ret < 0) {
30130-		DRM_DEV_ERROR(lvds->dev, "failed to enable lvds pclk %d\n", ret);
30131-		return ret;
30132-	}
30133-	ret = pm_runtime_resume_and_get(lvds->dev);
30134-	if (ret < 0) {
30135-		DRM_DEV_ERROR(lvds->dev, "failed to get pm runtime: %d\n", ret);
30136-		clk_disable(lvds->pclk);
30137-		return ret;
30138-	}
30139-	val = RK3288_LVDS_CH0_REG0_LANE4_EN | RK3288_LVDS_CH0_REG0_LANE3_EN |
30140-		RK3288_LVDS_CH0_REG0_LANE2_EN | RK3288_LVDS_CH0_REG0_LANE1_EN |
30141-		RK3288_LVDS_CH0_REG0_LANE0_EN;
30142-	if (lvds->output == DISPLAY_OUTPUT_RGB) {
30143-		val |= RK3288_LVDS_CH0_REG0_TTL_EN |
30144-			RK3288_LVDS_CH0_REG0_LANECK_EN;
30145-		rk3288_writel(lvds, RK3288_LVDS_CH0_REG0, val);
30146-		rk3288_writel(lvds, RK3288_LVDS_CH0_REG2,
30147-			      RK3288_LVDS_PLL_FBDIV_REG2(0x46));
30148-		rk3288_writel(lvds, RK3288_LVDS_CH0_REG4,
30149-			      RK3288_LVDS_CH0_REG4_LANECK_TTL_MODE |
30150-			      RK3288_LVDS_CH0_REG4_LANE4_TTL_MODE |
30151-			      RK3288_LVDS_CH0_REG4_LANE3_TTL_MODE |
30152-			      RK3288_LVDS_CH0_REG4_LANE2_TTL_MODE |
30153-			      RK3288_LVDS_CH0_REG4_LANE1_TTL_MODE |
30154-			      RK3288_LVDS_CH0_REG4_LANE0_TTL_MODE);
30155-		rk3288_writel(lvds, RK3288_LVDS_CH0_REG5,
30156-			      RK3288_LVDS_CH0_REG5_LANECK_TTL_DATA |
30157-			      RK3288_LVDS_CH0_REG5_LANE4_TTL_DATA |
30158-			      RK3288_LVDS_CH0_REG5_LANE3_TTL_DATA |
30159-			      RK3288_LVDS_CH0_REG5_LANE2_TTL_DATA |
30160-			      RK3288_LVDS_CH0_REG5_LANE1_TTL_DATA |
30161-			      RK3288_LVDS_CH0_REG5_LANE0_TTL_DATA);
30162-	} else {
30163-		val |= RK3288_LVDS_CH0_REG0_LVDS_EN |
30164-			    RK3288_LVDS_CH0_REG0_LANECK_EN;
30165-		rk3288_writel(lvds, RK3288_LVDS_CH0_REG0, val);
30166-		rk3288_writel(lvds, RK3288_LVDS_CH0_REG1,
30167-			      RK3288_LVDS_CH0_REG1_LANECK_BIAS |
30168-			      RK3288_LVDS_CH0_REG1_LANE4_BIAS |
30169-			      RK3288_LVDS_CH0_REG1_LANE3_BIAS |
30170-			      RK3288_LVDS_CH0_REG1_LANE2_BIAS |
30171-			      RK3288_LVDS_CH0_REG1_LANE1_BIAS |
30172-			      RK3288_LVDS_CH0_REG1_LANE0_BIAS);
30173-		rk3288_writel(lvds, RK3288_LVDS_CH0_REG2,
30174-			      RK3288_LVDS_CH0_REG2_RESERVE_ON |
30175-			      RK3288_LVDS_CH0_REG2_LANECK_LVDS_MODE |
30176-			      RK3288_LVDS_CH0_REG2_LANE4_LVDS_MODE |
30177-			      RK3288_LVDS_CH0_REG2_LANE3_LVDS_MODE |
30178-			      RK3288_LVDS_CH0_REG2_LANE2_LVDS_MODE |
30179-			      RK3288_LVDS_CH0_REG2_LANE1_LVDS_MODE |
30180-			      RK3288_LVDS_CH0_REG2_LANE0_LVDS_MODE |
30181-			      RK3288_LVDS_PLL_FBDIV_REG2(0x46));
30182-		rk3288_writel(lvds, RK3288_LVDS_CH0_REG4, 0x00);
30183-		rk3288_writel(lvds, RK3288_LVDS_CH0_REG5, 0x00);
30184+	struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
30185+	struct drm_connector *connector = &lvds->connector;
30186+	struct drm_display_info *info = &connector->display_info;
30187+	u32 bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG;
30188+
30189+	if (info->num_bus_formats)
30190+		bus_format = info->bus_formats[0];
30191+
30192+	switch (bus_format) {
30193+	case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:	/* jeida-24 */
30194+		lvds->format = LVDS_8BIT_MODE_FORMAT_2;
30195+		break;
30196+	case MEDIA_BUS_FMT_RGB101010_1X7X5_JEIDA: /* jeida-30 */
30197+		lvds->format = LVDS_10BIT_MODE_FORMAT_2;
30198+		break;
30199+	case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:	/* vesa-18 */
30200+		lvds->format = LVDS_8BIT_MODE_FORMAT_3;
30201+		break;
30202+	case MEDIA_BUS_FMT_RGB101010_1X7X5_SPWG: /* vesa-30 */
30203+		lvds->format = LVDS_10BIT_MODE_FORMAT_1;
30204+		break;
30205+	case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:	/* vesa-24 */
30206+	default:
30207+		lvds->format = LVDS_8BIT_MODE_FORMAT_1;
30208+		break;
30209 	}
30210-	rk3288_writel(lvds, RK3288_LVDS_CH0_REG3,
30211-		      RK3288_LVDS_PLL_FBDIV_REG3(0x46));
30212-	rk3288_writel(lvds, RK3288_LVDS_CH0_REGD,
30213-		      RK3288_LVDS_PLL_PREDIV_REGD(0x0a));
30214-	rk3288_writel(lvds, RK3288_LVDS_CH0_REG20,
30215-		      RK3288_LVDS_CH0_REG20_LSB);
30216-
30217-	rk3288_writel(lvds, RK3288_LVDS_CFG_REGC,
30218-		      RK3288_LVDS_CFG_REGC_PLL_ENABLE);
30219-	rk3288_writel(lvds, RK3288_LVDS_CFG_REG21,
30220-		      RK3288_LVDS_CFG_REG21_TX_ENABLE);
30221-
30222-	return 0;
30223-}
30224 
30225-static void rk3288_lvds_poweroff(struct rockchip_lvds *lvds)
30226-{
30227-	int ret;
30228-	u32 val;
30229+	if (lvds->secondary)
30230+		lvds->secondary->format = lvds->format;
30231 
30232-	rk3288_writel(lvds, RK3288_LVDS_CFG_REG21,
30233-		      RK3288_LVDS_CFG_REG21_TX_ENABLE);
30234-	rk3288_writel(lvds, RK3288_LVDS_CFG_REGC,
30235-		      RK3288_LVDS_CFG_REGC_PLL_ENABLE);
30236-	val = LVDS_DUAL | LVDS_TTL_EN | LVDS_CH0_EN | LVDS_CH1_EN | LVDS_PWRDN;
30237-	val |= val << 16;
30238-	ret = regmap_write(lvds->grf, RK3288_LVDS_GRF_SOC_CON7, val);
30239-	if (ret != 0)
30240-		DRM_DEV_ERROR(lvds->dev, "Could not write to GRF: %d\n", ret);
30241-
30242-	pm_runtime_put(lvds->dev);
30243-	clk_disable(lvds->pclk);
30244+	drm_mode_copy(&lvds->mode, &crtc_state->adjusted_mode);
30245 }
30246 
30247-static int rk3288_lvds_grf_config(struct drm_encoder *encoder,
30248-				  struct drm_display_mode *mode)
30249+static int
30250+rockchip_lvds_encoder_atomic_check(struct drm_encoder *encoder,
30251+				   struct drm_crtc_state *crtc_state,
30252+				   struct drm_connector_state *conn_state)
30253 {
30254+	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
30255 	struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
30256-	u8 pin_hsync = (mode->flags & DRM_MODE_FLAG_PHSYNC) ? 1 : 0;
30257-	u8 pin_dclk = (mode->flags & DRM_MODE_FLAG_PCSYNC) ? 1 : 0;
30258-	u32 val;
30259-	int ret;
30260-
30261-	/* iomux to LCD data/sync mode */
30262-	if (lvds->output == DISPLAY_OUTPUT_RGB)
30263-		if (lvds->pins && !IS_ERR(lvds->pins->default_state))
30264-			pinctrl_select_state(lvds->pins->p,
30265-					     lvds->pins->default_state);
30266-	val = lvds->format | LVDS_CH0_EN;
30267-	if (lvds->output == DISPLAY_OUTPUT_RGB)
30268-		val |= LVDS_TTL_EN | LVDS_CH1_EN;
30269-	else if (lvds->output == DISPLAY_OUTPUT_DUAL_LVDS)
30270-		val |= LVDS_DUAL | LVDS_CH1_EN;
30271-
30272-	if ((mode->htotal - mode->hsync_start) & 0x01)
30273-		val |= LVDS_START_PHASE_RST_1;
30274-
30275-	val |= (pin_dclk << 8) | (pin_hsync << 9);
30276-	val |= (0xffff << 16);
30277-	ret = regmap_write(lvds->grf, RK3288_LVDS_GRF_SOC_CON7, val);
30278-	if (ret)
30279-		DRM_DEV_ERROR(lvds->dev, "Could not write to GRF: %d\n", ret);
30280-
30281-	return ret;
30282-}
30283+	struct drm_connector *connector = conn_state->connector;
30284+	struct drm_display_info *info = &connector->display_info;
30285 
30286-static int rk3288_lvds_set_vop_source(struct rockchip_lvds *lvds,
30287-				      struct drm_encoder *encoder)
30288-{
30289-	u32 val;
30290-	int ret;
30291+	if (info->num_bus_formats)
30292+		s->bus_format = info->bus_formats[0];
30293+	else
30294+		s->bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG;
30295 
30296-	ret = drm_of_encoder_active_endpoint_id(lvds->dev->of_node, encoder);
30297-	if (ret < 0)
30298-		return ret;
30299+	s->output_mode = ROCKCHIP_OUT_MODE_P888;
30300 
30301-	val = RK3288_LVDS_SOC_CON6_SEL_VOP_LIT << 16;
30302-	if (ret)
30303-		val |= RK3288_LVDS_SOC_CON6_SEL_VOP_LIT;
30304+	if (s->bus_format == MEDIA_BUS_FMT_RGB101010_1X7X5_SPWG ||
30305+	    s->bus_format == MEDIA_BUS_FMT_RGB101010_1X7X5_JEIDA)
30306+		s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
30307 
30308-	ret = regmap_write(lvds->grf, RK3288_LVDS_GRF_SOC_CON6, val);
30309-	if (ret < 0)
30310-		return ret;
30311+	s->output_type = DRM_MODE_CONNECTOR_LVDS;
30312+	s->bus_flags = info->bus_flags;
30313+	s->tv_state = &conn_state->tv;
30314+	s->eotf = HDMI_EOTF_TRADITIONAL_GAMMA_SDR;
30315+	s->color_space = V4L2_COLORSPACE_DEFAULT;
30316+
30317+	switch (lvds->pixel_order) {
30318+	case DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS:
30319+		s->output_flags |= ROCKCHIP_OUTPUT_DUAL_CHANNEL_ODD_EVEN_MODE;
30320+		s->output_if |= VOP_OUTPUT_IF_LVDS1 | VOP_OUTPUT_IF_LVDS0;
30321+		break;
30322+	case DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS:
30323+		s->output_flags |= ROCKCHIP_OUTPUT_DUAL_CHANNEL_ODD_EVEN_MODE;
30324+		s->output_flags |= ROCKCHIP_OUTPUT_DATA_SWAP;
30325+		s->output_if |= VOP_OUTPUT_IF_LVDS1 | VOP_OUTPUT_IF_LVDS0;
30326+		break;
30327+/*
30328+ * Fix me: To do it with a GKI compatible version.
30329+ */
30330+#if 0
30331+	case DRM_LVDS_DUAL_LINK_LEFT_RIGHT_PIXELS:
30332+		s->output_flags |= ROCKCHIP_OUTPUT_DUAL_CHANNEL_LEFT_RIGHT_MODE;
30333+		s->output_if |= VOP_OUTPUT_IF_LVDS1 | VOP_OUTPUT_IF_LVDS0;
30334+		break;
30335+	case DRM_LVDS_DUAL_LINK_RIGHT_LEFT_PIXELS:
30336+		s->output_flags |= ROCKCHIP_OUTPUT_DUAL_CHANNEL_LEFT_RIGHT_MODE;
30337+		s->output_flags |= ROCKCHIP_OUTPUT_DATA_SWAP;
30338+		s->output_if |= VOP_OUTPUT_IF_LVDS1 | VOP_OUTPUT_IF_LVDS0;
30339+		break;
30340+#endif
30341+	default:
30342+		if (lvds->id)
30343+			s->output_if |= VOP_OUTPUT_IF_LVDS1;
30344+		else
30345+			s->output_if |= VOP_OUTPUT_IF_LVDS0;
30346+		break;
30347+	}
30348 
30349 	return 0;
30350 }
30351 
30352-static void rk3288_lvds_encoder_enable(struct drm_encoder *encoder)
30353+static void rockchip_lvds_enable(struct rockchip_lvds *lvds)
30354 {
30355-	struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
30356-	struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
30357 	int ret;
30358 
30359-	drm_panel_prepare(lvds->panel);
30360+	if (lvds->funcs->enable)
30361+		lvds->funcs->enable(lvds);
30362 
30363-	ret = rk3288_lvds_poweron(lvds);
30364-	if (ret < 0) {
30365-		DRM_DEV_ERROR(lvds->dev, "failed to power on LVDS: %d\n", ret);
30366-		drm_panel_unprepare(lvds->panel);
30367-		return;
30368-	}
30369-
30370-	ret = rk3288_lvds_grf_config(encoder, mode);
30371+	ret = phy_set_mode(lvds->phy, PHY_MODE_LVDS);
30372 	if (ret) {
30373-		DRM_DEV_ERROR(lvds->dev, "failed to configure LVDS: %d\n", ret);
30374-		drm_panel_unprepare(lvds->panel);
30375+		DRM_DEV_ERROR(lvds->dev, "failed to set phy mode: %d\n", ret);
30376 		return;
30377 	}
30378 
30379-	ret = rk3288_lvds_set_vop_source(lvds, encoder);
30380-	if (ret) {
30381-		DRM_DEV_ERROR(lvds->dev, "failed to set VOP source: %d\n", ret);
30382-		drm_panel_unprepare(lvds->panel);
30383-		return;
30384-	}
30385+	phy_power_on(lvds->phy);
30386 
30387-	drm_panel_enable(lvds->panel);
30388+	if (lvds->secondary)
30389+		rockchip_lvds_enable(lvds->secondary);
30390 }
30391 
30392-static void rk3288_lvds_encoder_disable(struct drm_encoder *encoder)
30393+static void rockchip_lvds_disable(struct rockchip_lvds *lvds)
30394 {
30395-	struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
30396+	if (lvds->funcs->disable)
30397+		lvds->funcs->disable(lvds);
30398 
30399-	drm_panel_disable(lvds->panel);
30400-	rk3288_lvds_poweroff(lvds);
30401-	drm_panel_unprepare(lvds->panel);
30402-}
30403+	phy_power_off(lvds->phy);
30404 
30405-static int px30_lvds_poweron(struct rockchip_lvds *lvds)
30406-{
30407-	int ret;
30408-
30409-	ret = pm_runtime_resume_and_get(lvds->dev);
30410-	if (ret < 0) {
30411-		DRM_DEV_ERROR(lvds->dev, "failed to get pm runtime: %d\n", ret);
30412-		return ret;
30413-	}
30414-
30415-	/* Enable LVDS mode */
30416-	ret = regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
30417-				  PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1),
30418-				  PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1));
30419-	if (ret)
30420-		pm_runtime_put(lvds->dev);
30421-
30422-	return ret;
30423-}
30424-
30425-static void px30_lvds_poweroff(struct rockchip_lvds *lvds)
30426-{
30427-	regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
30428-			   PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1),
30429-			   PX30_LVDS_MODE_EN(0) | PX30_LVDS_P2S_EN(0));
30430-
30431-	pm_runtime_put(lvds->dev);
30432+	if (lvds->secondary)
30433+		rockchip_lvds_disable(lvds->secondary);
30434 }
30435 
30436-static int px30_lvds_grf_config(struct drm_encoder *encoder,
30437-				struct drm_display_mode *mode)
30438+static void rockchip_lvds_encoder_enable(struct drm_encoder *encoder)
30439 {
30440 	struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
30441 
30442-	if (lvds->output != DISPLAY_OUTPUT_LVDS) {
30443-		DRM_DEV_ERROR(lvds->dev, "Unsupported display output %d\n",
30444-			      lvds->output);
30445-		return -EINVAL;
30446-	}
30447-
30448-	/* Set format */
30449-	return regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
30450-				  PX30_LVDS_FORMAT(lvds->format),
30451-				  PX30_LVDS_FORMAT(lvds->format));
30452-}
30453-
30454-static int px30_lvds_set_vop_source(struct rockchip_lvds *lvds,
30455-				    struct drm_encoder *encoder)
30456-{
30457-	int vop;
30458-
30459-	vop = drm_of_encoder_active_endpoint_id(lvds->dev->of_node, encoder);
30460-	if (vop < 0)
30461-		return vop;
30462-
30463-	return regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
30464-				  PX30_LVDS_VOP_SEL(1),
30465-				  PX30_LVDS_VOP_SEL(vop));
30466+	if (lvds->panel)
30467+		drm_panel_prepare(lvds->panel);
30468+	rockchip_lvds_enable(lvds);
30469+	if (lvds->panel)
30470+		drm_panel_enable(lvds->panel);
30471 }
30472 
30473-static void px30_lvds_encoder_enable(struct drm_encoder *encoder)
30474+static void rockchip_lvds_encoder_disable(struct drm_encoder *encoder)
30475 {
30476 	struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
30477-	struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
30478-	int ret;
30479-
30480-	drm_panel_prepare(lvds->panel);
30481-
30482-	ret = px30_lvds_poweron(lvds);
30483-	if (ret) {
30484-		DRM_DEV_ERROR(lvds->dev, "failed to power on LVDS: %d\n", ret);
30485-		drm_panel_unprepare(lvds->panel);
30486-		return;
30487-	}
30488-
30489-	ret = px30_lvds_grf_config(encoder, mode);
30490-	if (ret) {
30491-		DRM_DEV_ERROR(lvds->dev, "failed to configure LVDS: %d\n", ret);
30492-		drm_panel_unprepare(lvds->panel);
30493-		return;
30494-	}
30495 
30496-	ret = px30_lvds_set_vop_source(lvds, encoder);
30497-	if (ret) {
30498-		DRM_DEV_ERROR(lvds->dev, "failed to set VOP source: %d\n", ret);
30499+	if (lvds->panel)
30500+		drm_panel_disable(lvds->panel);
30501+	rockchip_lvds_disable(lvds);
30502+	if (lvds->panel)
30503 		drm_panel_unprepare(lvds->panel);
30504-		return;
30505-	}
30506-
30507-	drm_panel_enable(lvds->panel);
30508 }
30509 
30510-static void px30_lvds_encoder_disable(struct drm_encoder *encoder)
30511+static void rockchip_lvds_encoder_loader_protect(struct drm_encoder *encoder,
30512+						 bool on)
30513 {
30514 	struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
30515 
30516-	drm_panel_disable(lvds->panel);
30517-	px30_lvds_poweroff(lvds);
30518-	drm_panel_unprepare(lvds->panel);
30519+	if (lvds->panel)
30520+		panel_simple_loader_protect(lvds->panel);
30521 }
30522 
30523 static const
30524-struct drm_encoder_helper_funcs rk3288_lvds_encoder_helper_funcs = {
30525-	.enable = rk3288_lvds_encoder_enable,
30526-	.disable = rk3288_lvds_encoder_disable,
30527+struct drm_encoder_helper_funcs rockchip_lvds_encoder_helper_funcs = {
30528+	.enable = rockchip_lvds_encoder_enable,
30529+	.disable = rockchip_lvds_encoder_disable,
30530 	.atomic_check = rockchip_lvds_encoder_atomic_check,
30531+	.atomic_mode_set = rockchip_lvds_encoder_atomic_mode_set,
30532 };
30533 
30534-static const
30535-struct drm_encoder_helper_funcs px30_lvds_encoder_helper_funcs = {
30536-	.enable = px30_lvds_encoder_enable,
30537-	.disable = px30_lvds_encoder_disable,
30538-	.atomic_check = rockchip_lvds_encoder_atomic_check,
30539+static const struct drm_encoder_funcs rockchip_lvds_encoder_funcs = {
30540+	.destroy = drm_encoder_cleanup,
30541 };
30542 
30543-static int rk3288_lvds_probe(struct platform_device *pdev,
30544-			     struct rockchip_lvds *lvds)
30545+static int rockchip_lvds_match_by_id(struct device *dev, const void *data)
30546 {
30547-	struct resource *res;
30548-	int ret;
30549-
30550-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
30551-	lvds->regs = devm_ioremap_resource(lvds->dev, res);
30552-	if (IS_ERR(lvds->regs))
30553-		return PTR_ERR(lvds->regs);
30554-
30555-	lvds->pclk = devm_clk_get(lvds->dev, "pclk_lvds");
30556-	if (IS_ERR(lvds->pclk)) {
30557-		DRM_DEV_ERROR(lvds->dev, "could not get pclk_lvds\n");
30558-		return PTR_ERR(lvds->pclk);
30559-	}
30560-
30561-	lvds->pins = devm_kzalloc(lvds->dev, sizeof(*lvds->pins),
30562-				  GFP_KERNEL);
30563-	if (!lvds->pins)
30564-		return -ENOMEM;
30565-
30566-	lvds->pins->p = devm_pinctrl_get(lvds->dev);
30567-	if (IS_ERR(lvds->pins->p)) {
30568-		DRM_DEV_ERROR(lvds->dev, "no pinctrl handle\n");
30569-		devm_kfree(lvds->dev, lvds->pins);
30570-		lvds->pins = NULL;
30571-	} else {
30572-		lvds->pins->default_state =
30573-			pinctrl_lookup_state(lvds->pins->p, "lcdc");
30574-		if (IS_ERR(lvds->pins->default_state)) {
30575-			DRM_DEV_ERROR(lvds->dev, "no default pinctrl state\n");
30576-			devm_kfree(lvds->dev, lvds->pins);
30577-			lvds->pins = NULL;
30578-		}
30579-	}
30580-
30581-	ret = clk_prepare(lvds->pclk);
30582-	if (ret < 0) {
30583-		DRM_DEV_ERROR(lvds->dev, "failed to prepare pclk_lvds\n");
30584-		return ret;
30585-	}
30586+	struct rockchip_lvds *lvds = dev_get_drvdata(dev);
30587+	unsigned int *id = (unsigned int *)data;
30588 
30589-	return 0;
30590+	return lvds->id == *id;
30591 }
30592 
30593-static int px30_lvds_probe(struct platform_device *pdev,
30594-			   struct rockchip_lvds *lvds)
30595+static struct rockchip_lvds *rockchip_lvds_find_by_id(struct device_driver *drv,
30596+						      unsigned int id)
30597 {
30598-	int ret;
30599-
30600-	/* MSB */
30601-	ret =  regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
30602-				  PX30_LVDS_MSBSEL(1),
30603-				  PX30_LVDS_MSBSEL(1));
30604-	if (ret)
30605-		return ret;
30606-
30607-	/* PHY */
30608-	lvds->dphy = devm_phy_get(&pdev->dev, "dphy");
30609-	if (IS_ERR(lvds->dphy))
30610-		return PTR_ERR(lvds->dphy);
30611-
30612-	ret = phy_init(lvds->dphy);
30613-	if (ret)
30614-		return ret;
30615+	struct device *dev;
30616 
30617-	ret = phy_set_mode(lvds->dphy, PHY_MODE_LVDS);
30618-	if (ret)
30619-		return ret;
30620+	dev = driver_find_device(drv, NULL, &id, rockchip_lvds_match_by_id);
30621+	if (!dev)
30622+		return NULL;
30623 
30624-	return phy_power_on(lvds->dphy);
30625+	return dev_get_drvdata(dev);
30626 }
30627 
30628-static const struct rockchip_lvds_soc_data rk3288_lvds_data = {
30629-	.probe = rk3288_lvds_probe,
30630-	.helper_funcs = &rk3288_lvds_encoder_helper_funcs,
30631-};
30632-
30633-static const struct rockchip_lvds_soc_data px30_lvds_data = {
30634-	.probe = px30_lvds_probe,
30635-	.helper_funcs = &px30_lvds_encoder_helper_funcs,
30636-};
30637-
30638-static const struct of_device_id rockchip_lvds_dt_ids[] = {
30639-	{
30640-		.compatible = "rockchip,rk3288-lvds",
30641-		.data = &rk3288_lvds_data
30642-	},
30643-	{
30644-		.compatible = "rockchip,px30-lvds",
30645-		.data = &px30_lvds_data
30646-	},
30647-	{}
30648-};
30649-MODULE_DEVICE_TABLE(of, rockchip_lvds_dt_ids);
30650-
30651 static int rockchip_lvds_bind(struct device *dev, struct device *master,
30652 			      void *data)
30653 {
30654 	struct rockchip_lvds *lvds = dev_get_drvdata(dev);
30655 	struct drm_device *drm_dev = data;
30656-	struct drm_encoder *encoder;
30657-	struct drm_connector *connector;
30658-	struct device_node *remote = NULL;
30659-	struct device_node  *port, *endpoint;
30660-	int ret = 0, child_count = 0;
30661-	const char *name;
30662-	u32 endpoint_id = 0;
30663-
30664-	lvds->drm_dev = drm_dev;
30665-	port = of_graph_get_port_by_id(dev->of_node, 1);
30666-	if (!port) {
30667-		DRM_DEV_ERROR(dev,
30668-			      "can't found port point, please init lvds panel port!\n");
30669-		return -EINVAL;
30670-	}
30671-	for_each_child_of_node(port, endpoint) {
30672-		child_count++;
30673-		of_property_read_u32(endpoint, "reg", &endpoint_id);
30674-		ret = drm_of_find_panel_or_bridge(dev->of_node, 1, endpoint_id,
30675-						  &lvds->panel, &lvds->bridge);
30676-		if (!ret) {
30677-			of_node_put(endpoint);
30678-			break;
30679-		}
30680-	}
30681-	if (!child_count) {
30682-		DRM_DEV_ERROR(dev, "lvds port does not have any children\n");
30683-		ret = -EINVAL;
30684-		goto err_put_port;
30685-	} else if (ret) {
30686-		DRM_DEV_ERROR(dev, "failed to find panel and bridge node\n");
30687-		ret = -EPROBE_DEFER;
30688-		goto err_put_port;
30689-	}
30690-	if (lvds->panel)
30691-		remote = lvds->panel->dev->of_node;
30692-	else
30693-		remote = lvds->bridge->of_node;
30694-	if (of_property_read_string(dev->of_node, "rockchip,output", &name))
30695-		/* default set it as output rgb */
30696-		lvds->output = DISPLAY_OUTPUT_RGB;
30697-	else
30698-		lvds->output = rockchip_lvds_name_to_output(name);
30699-
30700-	if (lvds->output < 0) {
30701-		DRM_DEV_ERROR(dev, "invalid output type [%s]\n", name);
30702-		ret = lvds->output;
30703-		goto err_put_remote;
30704-	}
30705+	struct drm_encoder *encoder = &lvds->encoder;
30706+	struct drm_connector *connector = &lvds->connector;
30707+	int ret;
30708 
30709-	if (of_property_read_string(remote, "data-mapping", &name))
30710-		/* default set it as format vesa 18 */
30711-		lvds->format = LVDS_VESA_18;
30712-	else
30713-		lvds->format = rockchip_lvds_name_to_format(name);
30714+	/*
30715+	 * dual channel lvds mode only need to register one connector.
30716+	 */
30717+	if (lvds->primary)
30718+		return 0;
30719 
30720-	if (lvds->format < 0) {
30721-		DRM_DEV_ERROR(dev, "invalid data-mapping format [%s]\n", name);
30722-		ret = lvds->format;
30723-		goto err_put_remote;
30724-	}
30725+	ret = drm_of_find_panel_or_bridge(dev->of_node, 1, -1,
30726+					  &lvds->panel, &lvds->bridge);
30727+	if (ret)
30728+		return ret;
30729 
30730-	encoder = &lvds->encoder;
30731-	encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
30732-							     dev->of_node);
30733+	encoder->possible_crtcs = rockchip_drm_of_find_possible_crtcs(drm_dev,
30734+								      dev->of_node);
30735 
30736-	ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_LVDS);
30737+	ret = drm_encoder_init(drm_dev, encoder, &rockchip_lvds_encoder_funcs,
30738+			       DRM_MODE_ENCODER_LVDS, NULL);
30739 	if (ret < 0) {
30740-		DRM_DEV_ERROR(drm_dev->dev,
30741+		DRM_DEV_ERROR(lvds->dev,
30742 			      "failed to initialize encoder: %d\n", ret);
30743-		goto err_put_remote;
30744+		return ret;
30745 	}
30746 
30747-	drm_encoder_helper_add(encoder, lvds->soc_data->helper_funcs);
30748+	drm_encoder_helper_add(encoder, &rockchip_lvds_encoder_helper_funcs);
30749 
30750 	if (lvds->panel) {
30751-		connector = &lvds->connector;
30752-		connector->dpms = DRM_MODE_DPMS_OFF;
30753+		struct rockchip_drm_private *private = drm_dev->dev_private;
30754+
30755 		ret = drm_connector_init(drm_dev, connector,
30756 					 &rockchip_lvds_connector_funcs,
30757 					 DRM_MODE_CONNECTOR_LVDS);
30758@@ -634,34 +409,31 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
30759 
30760 		ret = drm_connector_attach_encoder(connector, encoder);
30761 		if (ret < 0) {
30762-			DRM_DEV_ERROR(drm_dev->dev,
30763+			DRM_DEV_ERROR(lvds->dev,
30764 				      "failed to attach encoder: %d\n", ret);
30765 			goto err_free_connector;
30766 		}
30767+
30768+		lvds->sub_dev.connector = &lvds->connector;
30769+		lvds->sub_dev.of_node = lvds->dev->of_node;
30770+		lvds->sub_dev.loader_protect = rockchip_lvds_encoder_loader_protect;
30771+		rockchip_drm_register_sub_dev(&lvds->sub_dev);
30772+		drm_object_attach_property(&connector->base, private->connector_id_prop, 0);
30773 	} else {
30774 		ret = drm_bridge_attach(encoder, lvds->bridge, NULL, 0);
30775 		if (ret) {
30776-			DRM_DEV_ERROR(drm_dev->dev,
30777+			DRM_DEV_ERROR(lvds->dev,
30778 				      "failed to attach bridge: %d\n", ret);
30779 			goto err_free_encoder;
30780 		}
30781 	}
30782 
30783-	pm_runtime_enable(dev);
30784-	of_node_put(remote);
30785-	of_node_put(port);
30786-
30787 	return 0;
30788 
30789 err_free_connector:
30790 	drm_connector_cleanup(connector);
30791 err_free_encoder:
30792 	drm_encoder_cleanup(encoder);
30793-err_put_remote:
30794-	of_node_put(remote);
30795-err_put_port:
30796-	of_node_put(port);
30797-
30798 	return ret;
30799 }
30800 
30801@@ -669,13 +441,14 @@ static void rockchip_lvds_unbind(struct device *dev, struct device *master,
30802 				void *data)
30803 {
30804 	struct rockchip_lvds *lvds = dev_get_drvdata(dev);
30805-	const struct drm_encoder_helper_funcs *encoder_funcs;
30806 
30807-	encoder_funcs = lvds->soc_data->helper_funcs;
30808-	encoder_funcs->disable(&lvds->encoder);
30809-	pm_runtime_disable(dev);
30810-	drm_connector_cleanup(&lvds->connector);
30811-	drm_encoder_cleanup(&lvds->encoder);
30812+	if (lvds->sub_dev.connector)
30813+		rockchip_drm_unregister_sub_dev(&lvds->sub_dev);
30814+	if (lvds->panel)
30815+		drm_connector_cleanup(&lvds->connector);
30816+
30817+	if (lvds->encoder.dev)
30818+		drm_encoder_cleanup(&lvds->encoder);
30819 }
30820 
30821 static const struct component_ops rockchip_lvds_component_ops = {
30822@@ -687,56 +460,219 @@ static int rockchip_lvds_probe(struct platform_device *pdev)
30823 {
30824 	struct device *dev = &pdev->dev;
30825 	struct rockchip_lvds *lvds;
30826-	const struct of_device_id *match;
30827 	int ret;
30828 
30829 	if (!dev->of_node)
30830 		return -ENODEV;
30831 
30832-	lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
30833+	lvds = devm_kzalloc(dev, sizeof(*lvds), GFP_KERNEL);
30834 	if (!lvds)
30835 		return -ENOMEM;
30836 
30837+	lvds->id = of_alias_get_id(dev->of_node, "lvds");
30838+	if (lvds->id < 0)
30839+		lvds->id = 0;
30840+
30841 	lvds->dev = dev;
30842-	match = of_match_node(rockchip_lvds_dt_ids, dev->of_node);
30843-	if (!match)
30844-		return -ENODEV;
30845-	lvds->soc_data = match->data;
30846+	lvds->funcs = of_device_get_match_data(dev);
30847+	platform_set_drvdata(pdev, lvds);
30848+
30849+	lvds->dual_channel = of_property_read_bool(dev->of_node,
30850+						   "dual-channel");
30851+	lvds->data_swap = of_property_read_bool(dev->of_node,
30852+						"rockchip,data-swap");
30853+
30854+	lvds->phy = devm_phy_get(dev, "phy");
30855+	if (IS_ERR(lvds->phy)) {
30856+		ret = PTR_ERR(lvds->phy);
30857+		DRM_DEV_ERROR(dev, "failed to get phy: %d\n", ret);
30858+		return ret;
30859+	}
30860 
30861-	lvds->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
30862-						    "rockchip,grf");
30863+	lvds->grf = syscon_node_to_regmap(dev->parent->of_node);
30864 	if (IS_ERR(lvds->grf)) {
30865-		DRM_DEV_ERROR(dev, "missing rockchip,grf property\n");
30866-		return PTR_ERR(lvds->grf);
30867+		ret = PTR_ERR(lvds->grf);
30868+		DRM_DEV_ERROR(dev, "Unable to get grf: %d\n", ret);
30869+		return ret;
30870 	}
30871 
30872-	ret = lvds->soc_data->probe(pdev, lvds);
30873-	if (ret) {
30874-		DRM_DEV_ERROR(dev, "Platform initialization failed\n");
30875-		return ret;
30876+	lvds->pixel_order = -1;
30877+	if (lvds->funcs->probe) {
30878+		ret = lvds->funcs->probe(lvds);
30879+		if (ret)
30880+			return ret;
30881 	}
30882 
30883-	dev_set_drvdata(dev, lvds);
30884+	return component_add(dev, &rockchip_lvds_component_ops);
30885+}
30886 
30887-	ret = component_add(&pdev->dev, &rockchip_lvds_component_ops);
30888-	if (ret < 0) {
30889-		DRM_DEV_ERROR(dev, "failed to add component\n");
30890-		clk_unprepare(lvds->pclk);
30891+static int rockchip_lvds_remove(struct platform_device *pdev)
30892+{
30893+	component_del(&pdev->dev, &rockchip_lvds_component_ops);
30894+
30895+	return 0;
30896+}
30897+
30898+static void px30_lvds_enable(struct rockchip_lvds *lvds)
30899+{
30900+	int pipe = drm_of_encoder_active_endpoint_id(lvds->dev->of_node,
30901+						     &lvds->encoder);
30902+
30903+	regmap_write(lvds->grf, PX30_GRF_PD_VO_CON1,
30904+		     PX30_LVDS_SELECT(lvds->format) |
30905+		     PX30_LVDS_MODE_EN(1) | PX30_LVDS_MSBSEL(1) |
30906+		     PX30_LVDS_P2S_EN(1) | PX30_LVDS_VOP_SEL(pipe));
30907+}
30908+
30909+static void px30_lvds_disable(struct rockchip_lvds *lvds)
30910+{
30911+	regmap_write(lvds->grf, PX30_GRF_PD_VO_CON1,
30912+		     PX30_LVDS_MODE_EN(0) | PX30_LVDS_P2S_EN(0));
30913+}
30914+
30915+static const struct rockchip_lvds_funcs px30_lvds_funcs = {
30916+	.enable = px30_lvds_enable,
30917+	.disable = px30_lvds_disable,
30918+};
30919+
30920+static void rk3126_lvds_enable(struct rockchip_lvds *lvds)
30921+{
30922+	regmap_write(lvds->grf, RK3126_GRF_LVDS_CON0,
30923+		     RK3126_LVDS_P2S_EN(1) | RK3126_LVDS_MODE_EN(1) |
30924+		     RK3126_LVDS_MSBSEL(1) | RK3126_LVDS_SELECT(lvds->format));
30925+}
30926+
30927+static void rk3126_lvds_disable(struct rockchip_lvds *lvds)
30928+{
30929+	regmap_write(lvds->grf, RK3126_GRF_LVDS_CON0,
30930+		     RK3126_LVDS_P2S_EN(0) | RK3126_LVDS_MODE_EN(0));
30931+}
30932+
30933+static const struct rockchip_lvds_funcs rk3126_lvds_funcs = {
30934+	.enable = rk3126_lvds_enable,
30935+	.disable = rk3126_lvds_disable,
30936+};
30937+
30938+static void rk3288_lvds_enable(struct rockchip_lvds *lvds)
30939+{
30940+	struct drm_display_mode *mode = &lvds->mode;
30941+	int pipe;
30942+	u32 val;
30943+
30944+	pipe = drm_of_encoder_active_endpoint_id(lvds->dev->of_node,
30945+						 &lvds->encoder);
30946+	regmap_write(lvds->grf, RK3288_GRF_SOC_CON6,
30947+		     RK3288_LVDS_LCDC_SEL(pipe));
30948+
30949+	val = RK3288_LVDS_PWRDWN(0) | RK3288_LVDS_CON_CLKINV(0) |
30950+	      RK3288_LVDS_CON_CHASEL(lvds->dual_channel) |
30951+	      RK3288_LVDS_CON_SELECT(lvds->format);
30952+
30953+	if (lvds->dual_channel) {
30954+		u32 h_bp = mode->htotal - mode->hsync_start;
30955+
30956+		val |= RK3288_LVDS_CON_ENABLE_2(1) |
30957+		       RK3288_LVDS_CON_ENABLE_1(1) |
30958+		       RK3288_LVDS_CON_STARTSEL(lvds->data_swap);
30959+
30960+		if (h_bp % 2)
30961+			val |= RK3288_LVDS_CON_STARTPHASE(1);
30962+		else
30963+			val |= RK3288_LVDS_CON_STARTPHASE(0);
30964+
30965+	} else {
30966+		val |= RK3288_LVDS_CON_ENABLE_2(0) |
30967+		       RK3288_LVDS_CON_ENABLE_1(1);
30968 	}
30969 
30970-	return ret;
30971+	regmap_write(lvds->grf, RK3288_GRF_SOC_CON7, val);
30972+
30973+	phy_set_bus_width(lvds->phy, lvds->dual_channel ? 2 : 1);
30974 }
30975 
30976-static int rockchip_lvds_remove(struct platform_device *pdev)
30977+static void rk3288_lvds_disable(struct rockchip_lvds *lvds)
30978 {
30979-	struct rockchip_lvds *lvds = dev_get_drvdata(&pdev->dev);
30980+	regmap_write(lvds->grf, RK3288_GRF_SOC_CON7, RK3288_LVDS_PWRDWN(1));
30981+}
30982 
30983-	component_del(&pdev->dev, &rockchip_lvds_component_ops);
30984-	clk_unprepare(lvds->pclk);
30985+static const struct rockchip_lvds_funcs rk3288_lvds_funcs = {
30986+	.enable = rk3288_lvds_enable,
30987+	.disable = rk3288_lvds_disable,
30988+};
30989+
30990+static void rk3368_lvds_enable(struct rockchip_lvds *lvds)
30991+{
30992+	regmap_write(lvds->grf, RK3368_GRF_SOC_CON7,
30993+		     RK3368_LVDS_SELECT(lvds->format) |
30994+		     RK3368_LVDS_MODE_EN(1) | RK3368_LVDS_MSBSEL(1) |
30995+		     RK3368_LVDS_P2S_EN(1));
30996+}
30997+
30998+static void rk3368_lvds_disable(struct rockchip_lvds *lvds)
30999+{
31000+	regmap_write(lvds->grf, RK3368_GRF_SOC_CON7,
31001+		     RK3368_LVDS_MODE_EN(0) | RK3368_LVDS_P2S_EN(0));
31002+}
31003+
31004+static const struct rockchip_lvds_funcs rk3368_lvds_funcs = {
31005+	.enable = rk3368_lvds_enable,
31006+	.disable = rk3368_lvds_disable,
31007+};
31008+
31009+static int __maybe_unused rockchip_secondary_lvds_probe(struct rockchip_lvds *lvds)
31010+{
31011+	if (lvds->dual_channel) {
31012+		struct rockchip_lvds *secondary = NULL;
31013+		struct device_node *port0, *port1;
31014+		int pixel_order;
31015+
31016+		secondary = rockchip_lvds_find_by_id(lvds->dev->driver, 1);
31017+		if (!secondary)
31018+			return -EPROBE_DEFER;
31019+
31020+		port0 = of_graph_get_port_by_id(lvds->dev->of_node, 1);
31021+		port1 = of_graph_get_port_by_id(secondary->dev->of_node, 1);
31022+		pixel_order = drm_of_lvds_get_dual_link_pixel_order(port0, port1);
31023+		of_node_put(port1);
31024+		of_node_put(port0);
31025+
31026+		secondary->primary = lvds;
31027+		lvds->secondary = secondary;
31028+		lvds->pixel_order = pixel_order >= 0 ? pixel_order : 0;
31029+	}
31030 
31031 	return 0;
31032 }
31033 
31034+static void rk3568_lvds_enable(struct rockchip_lvds *lvds)
31035+{
31036+	regmap_write(lvds->grf, RK3568_GRF_VO_CON2,
31037+		     RK3568_LVDS0_MODE_EN(1) | RK3568_LVDS0_P2S_EN(1) |
31038+		     RK3568_LVDS0_DCLK_INV_SEL(1));
31039+	regmap_write(lvds->grf, RK3568_GRF_VO_CON0,
31040+		     RK3568_LVDS0_SELECT(lvds->format) | RK3568_LVDS0_MSBSEL(1));
31041+}
31042+
31043+static void rk3568_lvds_disable(struct rockchip_lvds *lvds)
31044+{
31045+	regmap_write(lvds->grf, RK3568_GRF_VO_CON2, RK3568_LVDS0_MODE_EN(0));
31046+}
31047+
31048+static const struct rockchip_lvds_funcs rk3568_lvds_funcs = {
31049+	.enable = rk3568_lvds_enable,
31050+	.disable = rk3568_lvds_disable,
31051+};
31052+
31053+static const struct of_device_id rockchip_lvds_dt_ids[] = {
31054+	{ .compatible = "rockchip,px30-lvds", .data = &px30_lvds_funcs },
31055+	{ .compatible = "rockchip,rk3126-lvds", .data = &rk3126_lvds_funcs },
31056+	{ .compatible = "rockchip,rk3288-lvds", .data = &rk3288_lvds_funcs },
31057+	{ .compatible = "rockchip,rk3368-lvds", .data = &rk3368_lvds_funcs },
31058+	{ .compatible = "rockchip,rk3568-lvds", .data = &rk3568_lvds_funcs },
31059+	{}
31060+};
31061+MODULE_DEVICE_TABLE(of, rockchip_lvds_dt_ids);
31062+
31063 struct platform_driver rockchip_lvds_driver = {
31064 	.probe = rockchip_lvds_probe,
31065 	.remove = rockchip_lvds_remove,
31066diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
31067index 9a771af5d..654de19f7 100644
31068--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
31069+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
31070@@ -6,29 +6,172 @@
31071  */
31072 
31073 #include <linux/component.h>
31074+#include <linux/of_device.h>
31075 #include <linux/of_graph.h>
31076+#include <linux/regmap.h>
31077+#include <linux/mfd/syscon.h>
31078+#include <linux/phy/phy.h>
31079+#include <linux/pinctrl/consumer.h>
31080 
31081 #include <drm/drm_atomic_helper.h>
31082-#include <drm/drm_bridge.h>
31083+#include <drm/drm_crtc_helper.h>
31084 #include <drm/drm_dp_helper.h>
31085 #include <drm/drm_of.h>
31086 #include <drm/drm_panel.h>
31087 #include <drm/drm_probe_helper.h>
31088-#include <drm/drm_simple_kms_helper.h>
31089+
31090+#include <uapi/linux/videodev2.h>
31091 
31092 #include "rockchip_drm_drv.h"
31093 #include "rockchip_drm_vop.h"
31094 
31095-#define encoder_to_rgb(c) container_of(c, struct rockchip_rgb, encoder)
31096+#define HIWORD_UPDATE(v, l, h)	(((v) << (l)) | (GENMASK(h, l) << 16))
31097+
31098+#define PX30_GRF_PD_VO_CON1		0x0438
31099+#define PX30_RGB_DATA_SYNC_BYPASS(v)	HIWORD_UPDATE(v, 3, 3)
31100+#define PX30_RGB_VOP_SEL(v)		HIWORD_UPDATE(v, 2, 2)
31101+
31102+#define RK1808_GRF_PD_VO_CON1		0x0444
31103+#define RK1808_RGB_DATA_SYNC_BYPASS(v)	HIWORD_UPDATE(v, 3, 3)
31104+
31105+#define RV1126_GRF_IOFUNC_CON3		0x1026c
31106+#define RV1126_LCDC_IO_BYPASS(v)	HIWORD_UPDATE(v, 0, 0)
31107+
31108+#define RK3288_GRF_SOC_CON6		0x025c
31109+#define RK3288_LVDS_LCDC_SEL(x)		HIWORD_UPDATE(x,  3,  3)
31110+#define RK3288_GRF_SOC_CON7		0x0260
31111+#define RK3288_LVDS_PWRDWN(x)		HIWORD_UPDATE(x, 15, 15)
31112+#define RK3288_LVDS_CON_ENABLE_2(x)	HIWORD_UPDATE(x, 12, 12)
31113+#define RK3288_LVDS_CON_ENABLE_1(x)	HIWORD_UPDATE(x, 11, 11)
31114+#define RK3288_LVDS_CON_CLKINV(x)	HIWORD_UPDATE(x,  8,  8)
31115+#define RK3288_LVDS_CON_TTL_EN(x)	HIWORD_UPDATE(x,  6,  6)
31116+
31117+#define RK3568_GRF_VO_CON1		0X0364
31118+#define RK3568_RGB_DATA_BYPASS(v)	HIWORD_UPDATE(v, 6, 6)
31119+
31120+struct rockchip_rgb;
31121+
31122+struct rockchip_rgb_funcs {
31123+	void (*enable)(struct rockchip_rgb *rgb);
31124+	void (*disable)(struct rockchip_rgb *rgb);
31125+};
31126 
31127 struct rockchip_rgb {
31128+	u8 id;
31129 	struct device *dev;
31130-	struct drm_device *drm_dev;
31131+	struct drm_panel *panel;
31132 	struct drm_bridge *bridge;
31133+	struct drm_connector connector;
31134 	struct drm_encoder encoder;
31135-	int output_mode;
31136+	struct phy *phy;
31137+	struct regmap *grf;
31138+	bool data_sync_bypass;
31139+	const struct rockchip_rgb_funcs *funcs;
31140+	struct rockchip_drm_sub_dev sub_dev;
31141 };
31142 
31143+static inline struct rockchip_rgb *connector_to_rgb(struct drm_connector *c)
31144+{
31145+	return container_of(c, struct rockchip_rgb, connector);
31146+}
31147+
31148+static inline struct rockchip_rgb *encoder_to_rgb(struct drm_encoder *e)
31149+{
31150+	return container_of(e, struct rockchip_rgb, encoder);
31151+}
31152+
31153+static enum drm_connector_status
31154+rockchip_rgb_connector_detect(struct drm_connector *connector, bool force)
31155+{
31156+	return connector_status_connected;
31157+}
31158+
31159+static int
31160+rockchip_rgb_atomic_connector_get_property(struct drm_connector *connector,
31161+					   const struct drm_connector_state *state,
31162+					   struct drm_property *property,
31163+					   uint64_t *val)
31164+{
31165+	struct rockchip_rgb *rgb = connector_to_rgb(connector);
31166+	struct rockchip_drm_private *private = connector->dev->dev_private;
31167+
31168+	if (property == private->connector_id_prop) {
31169+		*val = rgb->id;
31170+		return 0;
31171+	}
31172+
31173+	DRM_ERROR("failed to get rockchip RGB property\n");
31174+	return -EINVAL;
31175+}
31176+
31177+static const struct drm_connector_funcs rockchip_rgb_connector_funcs = {
31178+	.detect = rockchip_rgb_connector_detect,
31179+	.fill_modes = drm_helper_probe_single_connector_modes,
31180+	.destroy = drm_connector_cleanup,
31181+	.reset = drm_atomic_helper_connector_reset,
31182+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
31183+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
31184+	.atomic_get_property = rockchip_rgb_atomic_connector_get_property,
31185+};
31186+
31187+static int rockchip_rgb_connector_get_modes(struct drm_connector *connector)
31188+{
31189+	struct rockchip_rgb *rgb = connector_to_rgb(connector);
31190+	struct drm_panel *panel = rgb->panel;
31191+
31192+	return drm_panel_get_modes(panel, connector);
31193+}
31194+
31195+static struct drm_encoder *
31196+rockchip_rgb_connector_best_encoder(struct drm_connector *connector)
31197+{
31198+	struct rockchip_rgb *rgb = connector_to_rgb(connector);
31199+
31200+	return &rgb->encoder;
31201+}
31202+
31203+static const
31204+struct drm_connector_helper_funcs rockchip_rgb_connector_helper_funcs = {
31205+	.get_modes = rockchip_rgb_connector_get_modes,
31206+	.best_encoder = rockchip_rgb_connector_best_encoder,
31207+};
31208+
31209+static void rockchip_rgb_encoder_enable(struct drm_encoder *encoder)
31210+{
31211+	struct rockchip_rgb *rgb = encoder_to_rgb(encoder);
31212+
31213+	pinctrl_pm_select_default_state(rgb->dev);
31214+
31215+	if (rgb->funcs && rgb->funcs->enable)
31216+		rgb->funcs->enable(rgb);
31217+
31218+	if (rgb->phy)
31219+		phy_power_on(rgb->phy);
31220+
31221+	if (rgb->panel) {
31222+		drm_panel_prepare(rgb->panel);
31223+		drm_panel_enable(rgb->panel);
31224+	}
31225+}
31226+
31227+static void rockchip_rgb_encoder_disable(struct drm_encoder *encoder)
31228+{
31229+	struct rockchip_rgb *rgb = encoder_to_rgb(encoder);
31230+
31231+	if (rgb->panel) {
31232+		drm_panel_disable(rgb->panel);
31233+		drm_panel_unprepare(rgb->panel);
31234+	}
31235+
31236+	if (rgb->phy)
31237+		phy_power_off(rgb->phy);
31238+
31239+	if (rgb->funcs && rgb->funcs->disable)
31240+		rgb->funcs->disable(rgb);
31241+
31242+	pinctrl_pm_select_sleep_state(rgb->dev);
31243+}
31244+
31245 static int
31246 rockchip_rgb_encoder_atomic_check(struct drm_encoder *encoder,
31247 				   struct drm_crtc_state *crtc_state,
31248@@ -37,128 +180,310 @@ rockchip_rgb_encoder_atomic_check(struct drm_encoder *encoder,
31249 	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
31250 	struct drm_connector *connector = conn_state->connector;
31251 	struct drm_display_info *info = &connector->display_info;
31252-	u32 bus_format;
31253 
31254 	if (info->num_bus_formats)
31255-		bus_format = info->bus_formats[0];
31256+		s->bus_format = info->bus_formats[0];
31257 	else
31258-		bus_format = MEDIA_BUS_FMT_RGB888_1X24;
31259+		s->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
31260 
31261-	switch (bus_format) {
31262+	switch (s->bus_format) {
31263 	case MEDIA_BUS_FMT_RGB666_1X18:
31264 		s->output_mode = ROCKCHIP_OUT_MODE_P666;
31265+		s->output_if = VOP_OUTPUT_IF_RGB;
31266 		break;
31267 	case MEDIA_BUS_FMT_RGB565_1X16:
31268 		s->output_mode = ROCKCHIP_OUT_MODE_P565;
31269+		s->output_if = VOP_OUTPUT_IF_RGB;
31270+		break;
31271+	case MEDIA_BUS_FMT_RGB888_3X8:
31272+		s->output_mode = ROCKCHIP_OUT_MODE_S888;
31273+		s->output_if = VOP_OUTPUT_IF_RGB;
31274+		break;
31275+	case MEDIA_BUS_FMT_RGB888_DUMMY_4X8:
31276+		s->output_mode = ROCKCHIP_OUT_MODE_S888_DUMMY;
31277+		s->output_if = VOP_OUTPUT_IF_RGB;
31278+		break;
31279+	case MEDIA_BUS_FMT_YUYV8_2X8:
31280+	case MEDIA_BUS_FMT_YVYU8_2X8:
31281+	case MEDIA_BUS_FMT_UYVY8_2X8:
31282+	case MEDIA_BUS_FMT_VYUY8_2X8:
31283+		s->output_mode = ROCKCHIP_OUT_MODE_BT656;
31284+		s->output_if = VOP_OUTPUT_IF_BT656;
31285+		break;
31286+	case MEDIA_BUS_FMT_YUYV8_1X16:
31287+	case MEDIA_BUS_FMT_YVYU8_1X16:
31288+	case MEDIA_BUS_FMT_UYVY8_1X16:
31289+	case MEDIA_BUS_FMT_VYUY8_1X16:
31290+		s->output_mode = ROCKCHIP_OUT_MODE_BT1120;
31291+		s->output_if = VOP_OUTPUT_IF_BT1120;
31292 		break;
31293 	case MEDIA_BUS_FMT_RGB888_1X24:
31294 	case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
31295 	default:
31296 		s->output_mode = ROCKCHIP_OUT_MODE_P888;
31297+		s->output_if = VOP_OUTPUT_IF_RGB;
31298 		break;
31299 	}
31300 
31301-	s->output_type = DRM_MODE_CONNECTOR_LVDS;
31302+	s->output_type = DRM_MODE_CONNECTOR_DPI;
31303+	s->bus_flags = info->bus_flags;
31304+	s->tv_state = &conn_state->tv;
31305+	s->eotf = HDMI_EOTF_TRADITIONAL_GAMMA_SDR;
31306+	s->color_space = V4L2_COLORSPACE_DEFAULT;
31307 
31308 	return 0;
31309 }
31310 
31311+static void rockchip_rgb_encoder_loader_protect(struct drm_encoder *encoder,
31312+						bool on)
31313+{
31314+	struct rockchip_rgb *rgb = encoder_to_rgb(encoder);
31315+
31316+	if (rgb->panel)
31317+		panel_simple_loader_protect(rgb->panel);
31318+}
31319+
31320 static const
31321 struct drm_encoder_helper_funcs rockchip_rgb_encoder_helper_funcs = {
31322+	.enable = rockchip_rgb_encoder_enable,
31323+	.disable = rockchip_rgb_encoder_disable,
31324 	.atomic_check = rockchip_rgb_encoder_atomic_check,
31325 };
31326 
31327-struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
31328-				       struct drm_crtc *crtc,
31329-				       struct drm_device *drm_dev)
31330+static const struct drm_encoder_funcs rockchip_rgb_encoder_funcs = {
31331+	.destroy = drm_encoder_cleanup,
31332+};
31333+
31334+static int rockchip_rgb_bind(struct device *dev, struct device *master,
31335+			     void *data)
31336+{
31337+	struct rockchip_rgb *rgb = dev_get_drvdata(dev);
31338+	struct drm_device *drm_dev = data;
31339+	struct drm_encoder *encoder = &rgb->encoder;
31340+	struct drm_connector *connector;
31341+	int ret;
31342+
31343+	ret = drm_of_find_panel_or_bridge(dev->of_node, 1, -1,
31344+					  &rgb->panel, &rgb->bridge);
31345+	if (ret) {
31346+		DRM_DEV_ERROR(dev, "failed to find panel or bridge: %d\n", ret);
31347+		return ret;
31348+	}
31349+
31350+	encoder->possible_crtcs = rockchip_drm_of_find_possible_crtcs(drm_dev,
31351+								      dev->of_node);
31352+
31353+	ret = drm_encoder_init(drm_dev, encoder, &rockchip_rgb_encoder_funcs,
31354+			       DRM_MODE_ENCODER_DPI, NULL);
31355+	if (ret < 0) {
31356+		DRM_DEV_ERROR(dev, "failed to initialize encoder: %d\n", ret);
31357+		return ret;
31358+	}
31359+
31360+	drm_encoder_helper_add(encoder, &rockchip_rgb_encoder_helper_funcs);
31361+
31362+	if (rgb->panel) {
31363+		struct rockchip_drm_private *private = drm_dev->dev_private;
31364+
31365+		connector = &rgb->connector;
31366+		connector->interlace_allowed = true;
31367+		ret = drm_connector_init(drm_dev, connector,
31368+					 &rockchip_rgb_connector_funcs,
31369+					 DRM_MODE_CONNECTOR_DPI);
31370+		if (ret < 0) {
31371+			DRM_DEV_ERROR(dev,
31372+				      "failed to initialize connector: %d\n",
31373+				      ret);
31374+			goto err_free_encoder;
31375+		}
31376+
31377+		drm_connector_helper_add(connector,
31378+					 &rockchip_rgb_connector_helper_funcs);
31379+
31380+		ret = drm_connector_attach_encoder(connector, encoder);
31381+		if (ret < 0) {
31382+			DRM_DEV_ERROR(dev,
31383+				      "failed to attach encoder: %d\n", ret);
31384+			goto err_free_connector;
31385+		}
31386+		rgb->sub_dev.connector = &rgb->connector;
31387+		rgb->sub_dev.of_node = rgb->dev->of_node;
31388+		rgb->sub_dev.loader_protect = rockchip_rgb_encoder_loader_protect;
31389+		drm_object_attach_property(&connector->base, private->connector_id_prop, 0);
31390+		rockchip_drm_register_sub_dev(&rgb->sub_dev);
31391+	} else {
31392+		rgb->bridge->encoder = encoder;
31393+		ret = drm_bridge_attach(encoder, rgb->bridge, NULL, 0);
31394+		if (ret) {
31395+			DRM_DEV_ERROR(dev,
31396+				      "failed to attach bridge: %d\n", ret);
31397+			goto err_free_encoder;
31398+		}
31399+	}
31400+
31401+	return 0;
31402+
31403+err_free_connector:
31404+	drm_connector_cleanup(connector);
31405+err_free_encoder:
31406+	drm_encoder_cleanup(encoder);
31407+	return ret;
31408+}
31409+
31410+static void rockchip_rgb_unbind(struct device *dev, struct device *master,
31411+				void *data)
31412+{
31413+	struct rockchip_rgb *rgb = dev_get_drvdata(dev);
31414+
31415+	if (rgb->sub_dev.connector)
31416+		rockchip_drm_register_sub_dev(&rgb->sub_dev);
31417+	if (rgb->panel)
31418+		drm_connector_cleanup(&rgb->connector);
31419+
31420+	drm_encoder_cleanup(&rgb->encoder);
31421+}
31422+
31423+static const struct component_ops rockchip_rgb_component_ops = {
31424+	.bind = rockchip_rgb_bind,
31425+	.unbind = rockchip_rgb_unbind,
31426+};
31427+
31428+static int rockchip_rgb_probe(struct platform_device *pdev)
31429 {
31430+	struct device *dev = &pdev->dev;
31431 	struct rockchip_rgb *rgb;
31432-	struct drm_encoder *encoder;
31433-	struct device_node *port, *endpoint;
31434-	u32 endpoint_id;
31435-	int ret = 0, child_count = 0;
31436-	struct drm_panel *panel;
31437-	struct drm_bridge *bridge;
31438+	int ret, id;
31439 
31440-	rgb = devm_kzalloc(dev, sizeof(*rgb), GFP_KERNEL);
31441+	rgb = devm_kzalloc(&pdev->dev, sizeof(*rgb), GFP_KERNEL);
31442 	if (!rgb)
31443-		return ERR_PTR(-ENOMEM);
31444+		return -ENOMEM;
31445+
31446+	id = of_alias_get_id(dev->of_node, "rgb");
31447+	if (id < 0)
31448+		id = 0;
31449 
31450+	rgb->id = id;
31451 	rgb->dev = dev;
31452-	rgb->drm_dev = drm_dev;
31453-
31454-	port = of_graph_get_port_by_id(dev->of_node, 0);
31455-	if (!port)
31456-		return ERR_PTR(-EINVAL);
31457-
31458-	for_each_child_of_node(port, endpoint) {
31459-		if (of_property_read_u32(endpoint, "reg", &endpoint_id))
31460-			endpoint_id = 0;
31461-
31462-		/* if subdriver (> 0) or error case (< 0), ignore entry */
31463-		if (rockchip_drm_endpoint_is_subdriver(endpoint) != 0)
31464-			continue;
31465-
31466-		child_count++;
31467-		ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id,
31468-						  &panel, &bridge);
31469-		if (!ret) {
31470-			of_node_put(endpoint);
31471-			break;
31472+	rgb->funcs = of_device_get_match_data(dev);
31473+	platform_set_drvdata(pdev, rgb);
31474+
31475+	rgb->data_sync_bypass =
31476+	    of_property_read_bool(dev->of_node, "rockchip,data-sync-bypass");
31477+
31478+	if (dev->parent && dev->parent->of_node) {
31479+		rgb->grf = syscon_node_to_regmap(dev->parent->of_node);
31480+		if (IS_ERR(rgb->grf)) {
31481+			ret = PTR_ERR(rgb->grf);
31482+			dev_err(dev, "Unable to get grf: %d\n", ret);
31483+			return ret;
31484 		}
31485 	}
31486 
31487-	of_node_put(port);
31488+	rgb->phy = devm_phy_optional_get(dev, "phy");
31489+	if (IS_ERR(rgb->phy)) {
31490+		ret = PTR_ERR(rgb->phy);
31491+		dev_err(dev, "failed to get phy: %d\n", ret);
31492+		return ret;
31493+	}
31494 
31495-	/* if the rgb output is not connected to anything, just return */
31496-	if (!child_count)
31497-		return NULL;
31498+	return component_add(dev, &rockchip_rgb_component_ops);
31499+}
31500 
31501-	if (ret < 0) {
31502-		if (ret != -EPROBE_DEFER)
31503-			DRM_DEV_ERROR(dev, "failed to find panel or bridge %d\n", ret);
31504-		return ERR_PTR(ret);
31505-	}
31506+static int rockchip_rgb_remove(struct platform_device *pdev)
31507+{
31508+	component_del(&pdev->dev, &rockchip_rgb_component_ops);
31509 
31510-	encoder = &rgb->encoder;
31511-	encoder->possible_crtcs = drm_crtc_mask(crtc);
31512+	return 0;
31513+}
31514 
31515-	ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_NONE);
31516-	if (ret < 0) {
31517-		DRM_DEV_ERROR(drm_dev->dev,
31518-			      "failed to initialize encoder: %d\n", ret);
31519-		return ERR_PTR(ret);
31520-	}
31521+static void px30_rgb_enable(struct rockchip_rgb *rgb)
31522+{
31523+	int pipe = drm_of_encoder_active_endpoint_id(rgb->dev->of_node,
31524+						     &rgb->encoder);
31525 
31526-	drm_encoder_helper_add(encoder, &rockchip_rgb_encoder_helper_funcs);
31527+	regmap_write(rgb->grf, PX30_GRF_PD_VO_CON1, PX30_RGB_VOP_SEL(pipe) |
31528+		     PX30_RGB_DATA_SYNC_BYPASS(rgb->data_sync_bypass));
31529+}
31530 
31531-	if (panel) {
31532-		bridge = drm_panel_bridge_add_typed(panel,
31533-						    DRM_MODE_CONNECTOR_LVDS);
31534-		if (IS_ERR(bridge))
31535-			return ERR_CAST(bridge);
31536-	}
31537+static const struct rockchip_rgb_funcs px30_rgb_funcs = {
31538+	.enable = px30_rgb_enable,
31539+};
31540 
31541-	rgb->bridge = bridge;
31542+static void rk1808_rgb_enable(struct rockchip_rgb *rgb)
31543+{
31544+	regmap_write(rgb->grf, RK1808_GRF_PD_VO_CON1,
31545+		     RK1808_RGB_DATA_SYNC_BYPASS(rgb->data_sync_bypass));
31546+}
31547 
31548-	ret = drm_bridge_attach(encoder, rgb->bridge, NULL, 0);
31549-	if (ret) {
31550-		DRM_DEV_ERROR(drm_dev->dev,
31551-			      "failed to attach bridge: %d\n", ret);
31552-		goto err_free_encoder;
31553-	}
31554+static const struct rockchip_rgb_funcs rk1808_rgb_funcs = {
31555+	.enable = rk1808_rgb_enable,
31556+};
31557 
31558-	return rgb;
31559+static void rk3288_rgb_enable(struct rockchip_rgb *rgb)
31560+{
31561+	int pipe = drm_of_encoder_active_endpoint_id(rgb->dev->of_node,
31562+						     &rgb->encoder);
31563 
31564-err_free_encoder:
31565-	drm_encoder_cleanup(encoder);
31566-	return ERR_PTR(ret);
31567+	regmap_write(rgb->grf, RK3288_GRF_SOC_CON6, RK3288_LVDS_LCDC_SEL(pipe));
31568+	regmap_write(rgb->grf, RK3288_GRF_SOC_CON7,
31569+		     RK3288_LVDS_PWRDWN(0) | RK3288_LVDS_CON_ENABLE_2(1) |
31570+		     RK3288_LVDS_CON_ENABLE_1(1) | RK3288_LVDS_CON_CLKINV(0) |
31571+		     RK3288_LVDS_CON_TTL_EN(1));
31572 }
31573-EXPORT_SYMBOL_GPL(rockchip_rgb_init);
31574 
31575-void rockchip_rgb_fini(struct rockchip_rgb *rgb)
31576+static void rk3288_rgb_disable(struct rockchip_rgb *rgb)
31577 {
31578-	drm_panel_bridge_remove(rgb->bridge);
31579-	drm_encoder_cleanup(&rgb->encoder);
31580+	regmap_write(rgb->grf, RK3288_GRF_SOC_CON7,
31581+		     RK3288_LVDS_PWRDWN(1) | RK3288_LVDS_CON_ENABLE_2(0) |
31582+		     RK3288_LVDS_CON_ENABLE_1(0) | RK3288_LVDS_CON_TTL_EN(0));
31583 }
31584-EXPORT_SYMBOL_GPL(rockchip_rgb_fini);
31585+
31586+static const struct rockchip_rgb_funcs rk3288_rgb_funcs = {
31587+	.enable = rk3288_rgb_enable,
31588+	.disable = rk3288_rgb_disable,
31589+};
31590+
31591+static void rk3568_rgb_enable(struct rockchip_rgb *rgb)
31592+{
31593+	regmap_write(rgb->grf, RK3568_GRF_VO_CON1,
31594+		     RK3568_RGB_DATA_BYPASS(rgb->data_sync_bypass));
31595+}
31596+
31597+static const struct rockchip_rgb_funcs rk3568_rgb_funcs = {
31598+	.enable = rk3568_rgb_enable,
31599+};
31600+
31601+static void rv1126_rgb_enable(struct rockchip_rgb *rgb)
31602+{
31603+	regmap_write(rgb->grf, RV1126_GRF_IOFUNC_CON3,
31604+		     RV1126_LCDC_IO_BYPASS(rgb->data_sync_bypass));
31605+}
31606+
31607+static const struct rockchip_rgb_funcs rv1126_rgb_funcs = {
31608+	.enable = rv1126_rgb_enable,
31609+};
31610+
31611+static const struct of_device_id rockchip_rgb_dt_ids[] = {
31612+	{ .compatible = "rockchip,px30-rgb", .data = &px30_rgb_funcs },
31613+	{ .compatible = "rockchip,rk1808-rgb", .data = &rk1808_rgb_funcs },
31614+	{ .compatible = "rockchip,rk3066-rgb", },
31615+	{ .compatible = "rockchip,rk3128-rgb", },
31616+	{ .compatible = "rockchip,rk3288-rgb", .data = &rk3288_rgb_funcs },
31617+	{ .compatible = "rockchip,rk3308-rgb", },
31618+	{ .compatible = "rockchip,rk3368-rgb", },
31619+	{ .compatible = "rockchip,rk3568-rgb", .data = &rk3568_rgb_funcs },
31620+	{ .compatible = "rockchip,rk3588-rgb", },
31621+	{ .compatible = "rockchip,rv1108-rgb", },
31622+	{ .compatible = "rockchip,rv1126-rgb", .data = &rv1126_rgb_funcs},
31623+	{}
31624+};
31625+MODULE_DEVICE_TABLE(of, rockchip_rgb_dt_ids);
31626+
31627+struct platform_driver rockchip_rgb_driver = {
31628+	.probe = rockchip_rgb_probe,
31629+	.remove = rockchip_rgb_remove,
31630+	.driver = {
31631+		.name = "rockchip-rgb",
31632+		.of_match_table = of_match_ptr(rockchip_rgb_dt_ids),
31633+	},
31634+};
31635diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.h b/drivers/gpu/drm/rockchip/rockchip_rgb.h
31636index 27b963512..8973232e0 100644
31637--- a/drivers/gpu/drm/rockchip/rockchip_rgb.h
31638+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.h
31639@@ -8,12 +8,14 @@
31640 #ifdef CONFIG_ROCKCHIP_RGB
31641 struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
31642 				       struct drm_crtc *crtc,
31643-				       struct drm_device *drm_dev);
31644+				       struct drm_device *drm_dev,
31645+				       u32 port_id);
31646 void rockchip_rgb_fini(struct rockchip_rgb *rgb);
31647 #else
31648 static inline struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
31649 						     struct drm_crtc *crtc,
31650-						     struct drm_device *drm_dev)
31651+						     struct drm_device *drm_dev,
31652+						     u32 port_id)
31653 {
31654 	return NULL;
31655 }
31656diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
31657index 39e1e1ebe..f33e7d1be 100644
31658--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
31659+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
31660@@ -5,36 +5,36 @@
31661  */
31662 
31663 #include <linux/component.h>
31664-#include <linux/mod_devicetable.h>
31665-#include <linux/module.h>
31666+#include <linux/kernel.h>
31667 #include <linux/of.h>
31668 #include <linux/platform_device.h>
31669 
31670 #include <drm/drm_fourcc.h>
31671-#include <drm/drm_plane.h>
31672 #include <drm/drm_print.h>
31673 
31674 #include "rockchip_drm_vop.h"
31675 #include "rockchip_vop_reg.h"
31676-#include "rockchip_drm_drv.h"
31677 
31678-#define _VOP_REG(off, _mask, _shift, _write_mask, _relaxed) \
31679-		{ \
31680-		 .offset = off, \
31681+#define VOP_REG_VER_MASK(off, _mask, s, _write_mask, _major, \
31682+			 _begin_minor, _end_minor) \
31683+		{.offset = off, \
31684 		 .mask = _mask, \
31685-		 .shift = _shift, \
31686+		 .shift = s, \
31687 		 .write_mask = _write_mask, \
31688-		 .relaxed = _relaxed, \
31689-		}
31690+		 .major = _major, \
31691+		 .begin_minor = _begin_minor, \
31692+		 .end_minor = _end_minor,}
31693 
31694-#define VOP_REG(off, _mask, _shift) \
31695-		_VOP_REG(off, _mask, _shift, false, true)
31696+#define VOP_REG(off, _mask, s) \
31697+		VOP_REG_VER_MASK(off, _mask, s, false, 0, 0, -1)
31698 
31699-#define VOP_REG_SYNC(off, _mask, _shift) \
31700-		_VOP_REG(off, _mask, _shift, false, false)
31701+#define VOP_REG_MASK(off, _mask, s) \
31702+		VOP_REG_VER_MASK(off, _mask, s, true, 0, 0, -1)
31703+
31704+#define VOP_REG_VER(off, _mask, s, _major, _begin_minor, _end_minor) \
31705+		VOP_REG_VER_MASK(off, _mask, s, false, \
31706+				 _major, _begin_minor, _end_minor)
31707 
31708-#define VOP_REG_MASK_SYNC(off, _mask, _shift) \
31709-		_VOP_REG(off, _mask, _shift, true, false)
31710 
31711 static const uint32_t formats_win_full[] = {
31712 	DRM_FORMAT_XRGB8888,
31713@@ -50,15 +50,46 @@ static const uint32_t formats_win_full[] = {
31714 	DRM_FORMAT_NV24,
31715 };
31716 
31717-static const uint64_t format_modifiers_win_full[] = {
31718-	DRM_FORMAT_MOD_LINEAR,
31719-	DRM_FORMAT_MOD_INVALID,
31720+static const uint32_t formats_win_full_10bit[] = {
31721+	DRM_FORMAT_XRGB8888,
31722+	DRM_FORMAT_ARGB8888,
31723+	DRM_FORMAT_XBGR8888,
31724+	DRM_FORMAT_ABGR8888,
31725+	DRM_FORMAT_RGB888,
31726+	DRM_FORMAT_BGR888,
31727+	DRM_FORMAT_RGB565,
31728+	DRM_FORMAT_BGR565,
31729+	DRM_FORMAT_NV12,
31730+	DRM_FORMAT_NV16,
31731+	DRM_FORMAT_NV24,
31732+	DRM_FORMAT_NV15, /* yuv420_10bit linear mode, 2 plane, no padding */
31733+#ifdef CONFIG_NO_GKI
31734+	DRM_FORMAT_NV20, /* yuv422_10bit linear mode, 2 plane, no padding */
31735+	DRM_FORMAT_NV30, /* yuv444_10bit linear mode, 2 plane, no padding */
31736+#endif
31737 };
31738 
31739-static const uint64_t format_modifiers_win_full_afbc[] = {
31740-	ROCKCHIP_AFBC_MOD,
31741-	DRM_FORMAT_MOD_LINEAR,
31742-	DRM_FORMAT_MOD_INVALID,
31743+static const uint32_t formats_win_full_10bit_yuyv[] = {
31744+	DRM_FORMAT_XRGB8888,
31745+	DRM_FORMAT_ARGB8888,
31746+	DRM_FORMAT_XBGR8888,
31747+	DRM_FORMAT_ABGR8888,
31748+	DRM_FORMAT_RGB888,
31749+	DRM_FORMAT_BGR888,
31750+	DRM_FORMAT_RGB565,
31751+	DRM_FORMAT_BGR565,
31752+	DRM_FORMAT_NV12, /* yuv420_8bit linear mode, 2 plane */
31753+	DRM_FORMAT_NV16, /* yuv422_8bit linear mode, 2 plane */
31754+	DRM_FORMAT_NV24, /* yuv444_8bit linear mode, 2 plane */
31755+	DRM_FORMAT_NV15, /* yuv420_10bit linear mode, 2 plane, no padding */
31756+#ifdef CONFIG_NO_GKI
31757+	DRM_FORMAT_NV20, /* yuv422_10bit linear mode, 2 plane, no padding */
31758+	DRM_FORMAT_NV30, /* yuv444_10bit linear mode, 2 plane, no padding */
31759+#endif
31760+	DRM_FORMAT_YVYU, /* yuv422_8bit[YVYU] linear mode or non-Linear mode */
31761+	DRM_FORMAT_VYUY, /* yuv422_8bit[VYUY] linear mode or non-Linear mode */
31762+	DRM_FORMAT_YUYV, /* yuv422_8bit[YUYV] linear mode or non-Linear mode */
31763+	DRM_FORMAT_UYVY, /* yuv422_8bit[UYVY] linear mode or non-Linear mode */
31764 };
31765 
31766 static const uint32_t formats_win_lite[] = {
31767@@ -72,524 +103,571 @@ static const uint32_t formats_win_lite[] = {
31768 	DRM_FORMAT_BGR565,
31769 };
31770 
31771-static const uint64_t format_modifiers_win_lite[] = {
31772+static const uint64_t format_modifiers[] = {
31773 	DRM_FORMAT_MOD_LINEAR,
31774 	DRM_FORMAT_MOD_INVALID,
31775 };
31776 
31777-static const struct vop_scl_regs rk3036_win_scl = {
31778-	.scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
31779-	.scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
31780-	.scale_cbcr_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
31781-	.scale_cbcr_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
31782+static const uint64_t format_modifiers_afbc[] = {
31783+	DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16),
31784+
31785+	DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
31786+				AFBC_FORMAT_MOD_SPARSE),
31787+
31788+	DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
31789+				AFBC_FORMAT_MOD_YTR),
31790+
31791+	DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
31792+				AFBC_FORMAT_MOD_CBR),
31793+
31794+	DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
31795+				AFBC_FORMAT_MOD_YTR |
31796+				AFBC_FORMAT_MOD_SPARSE),
31797+
31798+	DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
31799+				AFBC_FORMAT_MOD_CBR |
31800+				AFBC_FORMAT_MOD_SPARSE),
31801+
31802+	DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
31803+				AFBC_FORMAT_MOD_YTR |
31804+				AFBC_FORMAT_MOD_CBR),
31805+
31806+	DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
31807+				AFBC_FORMAT_MOD_YTR |
31808+				AFBC_FORMAT_MOD_CBR |
31809+				AFBC_FORMAT_MOD_SPARSE),
31810+
31811+	/* SPLIT mandates SPARSE, RGB modes mandates YTR */
31812+	DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
31813+				AFBC_FORMAT_MOD_YTR |
31814+				AFBC_FORMAT_MOD_SPARSE |
31815+				AFBC_FORMAT_MOD_SPLIT),
31816+
31817+	DRM_FORMAT_MOD_LINEAR,
31818+	DRM_FORMAT_MOD_INVALID,
31819 };
31820 
31821-static const struct vop_win_phy rk3036_win0_data = {
31822-	.scl = &rk3036_win_scl,
31823-	.data_formats = formats_win_full,
31824-	.nformats = ARRAY_SIZE(formats_win_full),
31825-	.format_modifiers = format_modifiers_win_full,
31826-	.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0),
31827-	.format = VOP_REG(RK3036_SYS_CTRL, 0x7, 3),
31828-	.rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 15),
31829-	.act_info = VOP_REG(RK3036_WIN0_ACT_INFO, 0x1fff1fff, 0),
31830-	.dsp_info = VOP_REG(RK3036_WIN0_DSP_INFO, 0x0fff0fff, 0),
31831-	.dsp_st = VOP_REG(RK3036_WIN0_DSP_ST, 0x1fff1fff, 0),
31832-	.yrgb_mst = VOP_REG(RK3036_WIN0_YRGB_MST, 0xffffffff, 0),
31833-	.uv_mst = VOP_REG(RK3036_WIN0_CBR_MST, 0xffffffff, 0),
31834-	.yrgb_vir = VOP_REG(RK3036_WIN0_VIR, 0xffff, 0),
31835-	.uv_vir = VOP_REG(RK3036_WIN0_VIR, 0x1fff, 16),
31836+static const struct vop_scl_extension rk3288_win_full_scl_ext = {
31837+	.cbcr_vsd_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 31),
31838+	.cbcr_vsu_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 30),
31839+	.cbcr_hsd_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 28),
31840+	.cbcr_ver_scl_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 26),
31841+	.cbcr_hor_scl_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 24),
31842+	.yrgb_vsd_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 23),
31843+	.yrgb_vsu_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 22),
31844+	.yrgb_hsd_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 20),
31845+	.yrgb_ver_scl_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 18),
31846+	.yrgb_hor_scl_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 16),
31847+	.line_load_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 15),
31848+	.cbcr_axi_gather_num = VOP_REG(RK3288_WIN0_CTRL1, 0x7, 12),
31849+	.yrgb_axi_gather_num = VOP_REG(RK3288_WIN0_CTRL1, 0xf, 8),
31850+	.vsd_cbcr_gt2 = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 7),
31851+	.vsd_cbcr_gt4 = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 6),
31852+	.vsd_yrgb_gt2 = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 5),
31853+	.vsd_yrgb_gt4 = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 4),
31854+	.bic_coe_sel = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 2),
31855+	.cbcr_axi_gather_en = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 1),
31856+	.yrgb_axi_gather_en = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 0),
31857+	.lb_mode = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 5),
31858 };
31859 
31860-static const struct vop_win_phy rk3036_win1_data = {
31861+static const struct vop_scl_regs rk3288_win_full_scl = {
31862+	.ext = &rk3288_win_full_scl_ext,
31863+	.scale_yrgb_x = VOP_REG(RK3288_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
31864+	.scale_yrgb_y = VOP_REG(RK3288_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
31865+	.scale_cbcr_x = VOP_REG(RK3288_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
31866+	.scale_cbcr_y = VOP_REG(RK3288_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
31867+};
31868+
31869+static const struct vop_win_phy rk3288_win01_data = {
31870+	.scl = &rk3288_win_full_scl,
31871+	.data_formats = formats_win_full_10bit,
31872+	.nformats = ARRAY_SIZE(formats_win_full_10bit),
31873+	.enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
31874+	.format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
31875+	.fmt_10 = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 4),
31876+	.csc_mode = VOP_REG_VER(RK3288_WIN0_CTRL0, 0x3, 10, 3, 2, -1),
31877+	.rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
31878+	.xmirror = VOP_REG_VER(RK3368_WIN0_CTRL0, 0x1, 21, 3, 2, -1),
31879+	.ymirror = VOP_REG_VER(RK3368_WIN0_CTRL0, 0x1, 22, 3, 2, -1),
31880+	.act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0),
31881+	.dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0),
31882+	.dsp_st = VOP_REG(RK3288_WIN0_DSP_ST, 0x1fff1fff, 0),
31883+	.yrgb_mst = VOP_REG(RK3288_WIN0_YRGB_MST, 0xffffffff, 0),
31884+	.uv_mst = VOP_REG(RK3288_WIN0_CBR_MST, 0xffffffff, 0),
31885+	.yrgb_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 0),
31886+	.uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16),
31887+	.src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xffff, 0),
31888+	.global_alpha_val = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 16),
31889+	.dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xffffffff, 0),
31890+	.channel = VOP_REG_VER(RK3288_WIN0_CTRL2, 0xff, 0, 3, 8, 8),
31891+};
31892+
31893+static const struct vop_win_phy rk3288_win23_data = {
31894 	.data_formats = formats_win_lite,
31895 	.nformats = ARRAY_SIZE(formats_win_lite),
31896-	.format_modifiers = format_modifiers_win_lite,
31897-	.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1),
31898-	.format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6),
31899-	.rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19),
31900-	.act_info = VOP_REG(RK3036_WIN1_ACT_INFO, 0x1fff1fff, 0),
31901-	.dsp_info = VOP_REG(RK3036_WIN1_DSP_INFO, 0x0fff0fff, 0),
31902-	.dsp_st = VOP_REG(RK3036_WIN1_DSP_ST, 0x1fff1fff, 0),
31903-	.yrgb_mst = VOP_REG(RK3036_WIN1_MST, 0xffffffff, 0),
31904-	.yrgb_vir = VOP_REG(RK3036_WIN1_VIR, 0xffff, 0),
31905+	.gate = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 0),
31906+	.enable = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 4),
31907+	.format = VOP_REG(RK3288_WIN2_CTRL0, 0x7, 1),
31908+	.rb_swap = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 12),
31909+	.dsp_info = VOP_REG(RK3288_WIN2_DSP_INFO0, 0x0fff0fff, 0),
31910+	.dsp_st = VOP_REG(RK3288_WIN2_DSP_ST0, 0x1fff1fff, 0),
31911+	.yrgb_mst = VOP_REG(RK3288_WIN2_MST0, 0xffffffff, 0),
31912+	.yrgb_vir = VOP_REG(RK3288_WIN2_VIR0_1, 0x1fff, 0),
31913+	.src_alpha_ctl = VOP_REG(RK3288_WIN2_SRC_ALPHA_CTRL, 0xffff, 0),
31914+	.global_alpha_val = VOP_REG(RK3288_WIN2_SRC_ALPHA_CTRL, 0xff, 16),
31915+	.dst_alpha_ctl = VOP_REG(RK3288_WIN2_DST_ALPHA_CTRL, 0xffffffff, 0),
31916 };
31917 
31918-static const struct vop_win_data rk3036_vop_win_data[] = {
31919-	{ .base = 0x00, .phy = &rk3036_win0_data,
31920-	  .type = DRM_PLANE_TYPE_PRIMARY },
31921-	{ .base = 0x00, .phy = &rk3036_win1_data,
31922-	  .type = DRM_PLANE_TYPE_CURSOR },
31923+static const struct vop_win_phy rk3288_area1_data = {
31924+	.enable = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 5),
31925+	.dsp_info = VOP_REG(RK3288_WIN2_DSP_INFO1, 0x0fff0fff, 0),
31926+	.dsp_st = VOP_REG(RK3288_WIN2_DSP_ST1, 0x1fff1fff, 0),
31927+	.yrgb_mst = VOP_REG(RK3288_WIN2_MST1, 0xffffffff, 0),
31928+	.yrgb_vir = VOP_REG(RK3288_WIN2_VIR0_1, 0x1fff, 16),
31929 };
31930 
31931-static const int rk3036_vop_intrs[] = {
31932-	DSP_HOLD_VALID_INTR,
31933-	FS_INTR,
31934-	LINE_FLAG_INTR,
31935-	BUS_ERROR_INTR,
31936+static const struct vop_win_phy rk3288_area2_data = {
31937+	.enable = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 6),
31938+	.dsp_info = VOP_REG(RK3288_WIN2_DSP_INFO2, 0x0fff0fff, 0),
31939+	.dsp_st = VOP_REG(RK3288_WIN2_DSP_ST2, 0x1fff1fff, 0),
31940+	.yrgb_mst = VOP_REG(RK3288_WIN2_MST2, 0xffffffff, 0),
31941+	.yrgb_vir = VOP_REG(RK3288_WIN2_VIR2_3, 0x1fff, 0),
31942 };
31943 
31944-static const struct vop_intr rk3036_intr = {
31945-	.intrs = rk3036_vop_intrs,
31946-	.nintrs = ARRAY_SIZE(rk3036_vop_intrs),
31947-	.line_flag_num[0] = VOP_REG(RK3036_INT_STATUS, 0xfff, 12),
31948-	.status = VOP_REG_SYNC(RK3036_INT_STATUS, 0xf, 0),
31949-	.enable = VOP_REG_SYNC(RK3036_INT_STATUS, 0xf, 4),
31950-	.clear = VOP_REG_SYNC(RK3036_INT_STATUS, 0xf, 8),
31951+static const struct vop_win_phy rk3288_area3_data = {
31952+	.enable = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 7),
31953+	.dsp_info = VOP_REG(RK3288_WIN2_DSP_INFO3, 0x0fff0fff, 0),
31954+	.dsp_st = VOP_REG(RK3288_WIN2_DSP_ST3, 0x1fff1fff, 0),
31955+	.yrgb_mst = VOP_REG(RK3288_WIN2_MST3, 0xffffffff, 0),
31956+	.yrgb_vir = VOP_REG(RK3288_WIN2_VIR2_3, 0x1fff, 16),
31957 };
31958 
31959-static const struct vop_modeset rk3036_modeset = {
31960-	.htotal_pw = VOP_REG(RK3036_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
31961-	.hact_st_end = VOP_REG(RK3036_DSP_HACT_ST_END, 0x1fff1fff, 0),
31962-	.vtotal_pw = VOP_REG(RK3036_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
31963-	.vact_st_end = VOP_REG(RK3036_DSP_VACT_ST_END, 0x1fff1fff, 0),
31964+static const struct vop_win_phy *rk3288_area_data[] = {
31965+	&rk3288_area1_data,
31966+	&rk3288_area2_data,
31967+	&rk3288_area3_data
31968 };
31969 
31970-static const struct vop_output rk3036_output = {
31971-	.pin_pol = VOP_REG(RK3036_DSP_CTRL0, 0xf, 4),
31972+static const struct vop_ctrl rk3288_ctrl_data = {
31973+	.version = VOP_REG(RK3288_VERSION_INFO, 0xffff, 16),
31974+	.standby = VOP_REG(RK3288_SYS_CTRL, 0x1, 22),
31975+	.dma_stop = VOP_REG(RK3288_SYS_CTRL, 0x1, 21),
31976+	.axi_outstanding_max_num = VOP_REG(RK3288_SYS_CTRL1, 0x1f, 13),
31977+	.axi_max_outstanding_en = VOP_REG(RK3288_SYS_CTRL1, 0x1, 12),
31978+	.reg_done_frm = VOP_REG_VER(RK3288_SYS_CTRL1, 0x1, 24, 3, 5, -1),
31979+	.htotal_pw = VOP_REG(RK3288_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
31980+	.hact_st_end = VOP_REG(RK3288_DSP_HACT_ST_END, 0x1fff1fff, 0),
31981+	.vtotal_pw = VOP_REG(RK3288_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
31982+	.vact_st_end = VOP_REG(RK3288_DSP_VACT_ST_END, 0x1fff1fff, 0),
31983+	.vact_st_end_f1 = VOP_REG(RK3288_DSP_VACT_ST_END_F1, 0x1fff1fff, 0),
31984+	.vs_st_end_f1 = VOP_REG(RK3288_DSP_VS_ST_END_F1, 0x1fff1fff, 0),
31985+	.hpost_st_end = VOP_REG(RK3288_POST_DSP_HACT_INFO, 0x1fff1fff, 0),
31986+	.vpost_st_end = VOP_REG(RK3288_POST_DSP_VACT_INFO, 0x1fff1fff, 0),
31987+	.vpost_st_end_f1 = VOP_REG(RK3288_POST_DSP_VACT_INFO_F1, 0x1fff1fff, 0),
31988+	.post_scl_factor = VOP_REG(RK3288_POST_SCL_FACTOR_YRGB, 0xffffffff, 0),
31989+	.post_scl_ctrl = VOP_REG(RK3288_POST_SCL_CTRL, 0x3, 0),
31990+
31991+	.dsp_interlace = VOP_REG(RK3288_DSP_CTRL0, 0x1, 10),
31992+	.auto_gate_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 23),
31993+	.dsp_layer_sel = VOP_REG(RK3288_DSP_CTRL1, 0xff, 8),
31994+	.post_lb_mode = VOP_REG_VER(RK3288_SYS_CTRL, 0x1, 18, 3, 2, -1),
31995+	.global_regdone_en = VOP_REG_VER(RK3288_SYS_CTRL, 0x1, 11, 3, 2, -1),
31996+	.overlay_mode = VOP_REG_VER(RK3288_SYS_CTRL, 0x1, 16, 3, 2, -1),
31997+	.core_dclk_div = VOP_REG_VER(RK3366_DSP_CTRL0, 0x1, 4, 3, 4, -1),
31998+	.p2i_en = VOP_REG_VER(RK3366_DSP_CTRL0, 0x1, 5, 3, 4, -1),
31999+	.dclk_ddr = VOP_REG_VER(RK3288_DSP_CTRL0, 0x1, 8, 3, 1, -1),
32000+	.dp_en = VOP_REG_VER(RK3399_SYS_CTRL, 0x1, 11, 3, 5, -1),
32001+	.hdmi_dclk_out_en = VOP_REG_VER(RK3288_SYS_CTRL, 0x1, 11, 3, 1, 1),
32002+	.rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12),
32003+	.hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
32004+	.edp_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 14),
32005+	.mipi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 15),
32006+	.mipi_dual_channel_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 3),
32007+	.data01_swap = VOP_REG_VER(RK3288_SYS_CTRL, 0x1, 17, 3, 5, -1),
32008+	.dclk_pol = VOP_REG_VER(RK3288_DSP_CTRL0, 0x1, 7, 3, 0, 1),
32009+	.pin_pol = VOP_REG_VER(RK3288_DSP_CTRL0, 0x7, 4, 3, 0, 1),
32010+	.dp_dclk_pol = VOP_REG_VER(RK3399_DSP_CTRL1, 0x1, 19, 3, 5, -1),
32011+	.dp_pin_pol = VOP_REG_VER(RK3399_DSP_CTRL1, 0x7, 16, 3, 5, -1),
32012+	.rgb_dclk_pol = VOP_REG_VER(RK3368_DSP_CTRL1, 0x1, 19, 3, 2, -1),
32013+	.rgb_pin_pol = VOP_REG_VER(RK3368_DSP_CTRL1, 0x7, 16, 3, 2, -1),
32014+	.tve_dclk_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 24),
32015+	.tve_dclk_pol = VOP_REG(RK3288_SYS_CTRL, 0x1, 25),
32016+	.tve_sw_mode = VOP_REG(RK3288_SYS_CTRL, 0x1, 26),
32017+	.sw_uv_offset_en  = VOP_REG(RK3288_SYS_CTRL, 0x1, 27),
32018+	.sw_genlock   = VOP_REG(RK3288_SYS_CTRL, 0x1, 28),
32019+	.hdmi_dclk_pol = VOP_REG_VER(RK3368_DSP_CTRL1, 0x1, 23, 3, 2, -1),
32020+	.hdmi_pin_pol = VOP_REG_VER(RK3368_DSP_CTRL1, 0x7, 20, 3, 2, -1),
32021+	.edp_dclk_pol = VOP_REG_VER(RK3368_DSP_CTRL1, 0x1, 27, 3, 2, -1),
32022+	.edp_pin_pol = VOP_REG_VER(RK3368_DSP_CTRL1, 0x7, 24, 3, 2, -1),
32023+	.mipi_dclk_pol = VOP_REG_VER(RK3368_DSP_CTRL1, 0x1, 31, 3, 2, -1),
32024+	.mipi_pin_pol = VOP_REG_VER(RK3368_DSP_CTRL1, 0x7, 28, 3, 2, -1),
32025+
32026+	.dither_down_sel = VOP_REG(RK3288_DSP_CTRL1, 0x1, 4),
32027+	.dither_down_mode = VOP_REG(RK3288_DSP_CTRL1, 0x1, 3),
32028+	.dither_down_en = VOP_REG(RK3288_DSP_CTRL1, 0x1, 2),
32029+	.pre_dither_down_en = VOP_REG(RK3288_DSP_CTRL1, 0x1, 1),
32030+	.dither_up_en = VOP_REG(RK3288_DSP_CTRL1, 0x1, 6),
32031+
32032+	.dsp_out_yuv = VOP_REG_VER(RK3399_POST_SCL_CTRL, 0x1, 2, 3, 5, -1),
32033+	.dsp_data_swap = VOP_REG(RK3288_DSP_CTRL0, 0x1f, 12),
32034+	.dsp_ccir656_avg = VOP_REG(RK3288_DSP_CTRL0, 0x1, 20),
32035+	.dsp_blank = VOP_REG(RK3288_DSP_CTRL0, 0x3, 18),
32036+	.update_gamma_lut = VOP_REG_VER(RK3288_DSP_CTRL1, 0x1, 7, 3, 5, -1),
32037+	.lut_buffer_index = VOP_REG_VER(RK3399_DBG_POST_REG1, 0x1, 1, 3, 5, -1),
32038+	.dsp_lut_en = VOP_REG(RK3288_DSP_CTRL1, 0x1, 0),
32039+	.out_mode = VOP_REG(RK3288_DSP_CTRL0, 0xf, 0),
32040+
32041+	.afbdc_rstn = VOP_REG_VER(RK3399_AFBCD0_CTRL, 0x1, 3, 3, 5, -1),
32042+	.afbdc_en = VOP_REG_VER(RK3399_AFBCD0_CTRL, 0x1, 0, 3, 5, -1),
32043+	.afbdc_sel = VOP_REG_VER(RK3399_AFBCD0_CTRL, 0x3, 1, 3, 5, -1),
32044+	.afbdc_format = VOP_REG_VER(RK3399_AFBCD0_CTRL, 0x1f, 16, 3, 5, -1),
32045+	.afbdc_hreg_block_split = VOP_REG_VER(RK3399_AFBCD0_CTRL,
32046+					      0x1, 21, 3, 5, -1),
32047+	.afbdc_hdr_ptr = VOP_REG_VER(RK3399_AFBCD0_HDR_PTR, 0xffffffff,
32048+				     0, 3, 5, -1),
32049+	.afbdc_pic_size = VOP_REG_VER(RK3399_AFBCD0_PIC_SIZE, 0xffffffff,
32050+				      0, 3, 5, -1),
32051+	.bcsh_brightness = VOP_REG(RK3288_BCSH_BCS, 0xff, 0),
32052+	.bcsh_contrast = VOP_REG(RK3288_BCSH_BCS, 0x1ff, 8),
32053+	.bcsh_sat_con = VOP_REG(RK3288_BCSH_BCS, 0x3ff, 20),
32054+	.bcsh_out_mode = VOP_REG(RK3288_BCSH_BCS, 0x3, 30),
32055+	.bcsh_sin_hue = VOP_REG(RK3288_BCSH_H, 0x1ff, 0),
32056+	.bcsh_cos_hue = VOP_REG(RK3288_BCSH_H, 0x1ff, 16),
32057+	.bcsh_r2y_csc_mode = VOP_REG_VER(RK3368_BCSH_CTRL, 0x1, 6, 3, 1, -1),
32058+	.bcsh_r2y_en = VOP_REG_VER(RK3368_BCSH_CTRL, 0x1, 4, 3, 1, -1),
32059+	.bcsh_y2r_csc_mode = VOP_REG_VER(RK3368_BCSH_CTRL, 0x3, 2, 3, 1, -1),
32060+	.bcsh_y2r_en = VOP_REG_VER(RK3368_BCSH_CTRL, 0x1, 0, 3, 1, -1),
32061+	.bcsh_color_bar = VOP_REG(RK3288_BCSH_COLOR_BAR, 0xffffff, 8),
32062+	.bcsh_en = VOP_REG(RK3288_BCSH_COLOR_BAR, 0x1, 0),
32063+
32064+	.xmirror = VOP_REG(RK3288_DSP_CTRL0, 0x1, 22),
32065+	.ymirror = VOP_REG(RK3288_DSP_CTRL0, 0x1, 23),
32066+
32067+	.dsp_background = VOP_REG(RK3288_DSP_BG, 0xffffffff, 0),
32068+
32069+	.cfg_done = VOP_REG(RK3288_REG_CFG_DONE, 0x1, 0),
32070 };
32071 
32072-static const struct vop_common rk3036_common = {
32073-	.standby = VOP_REG_SYNC(RK3036_SYS_CTRL, 0x1, 30),
32074-	.out_mode = VOP_REG(RK3036_DSP_CTRL0, 0xf, 0),
32075-	.dsp_blank = VOP_REG(RK3036_DSP_CTRL1, 0x1, 24),
32076-	.dither_down_sel = VOP_REG(RK3036_DSP_CTRL0, 0x1, 27),
32077-	.dither_down_en = VOP_REG(RK3036_DSP_CTRL0, 0x1, 11),
32078-	.dither_down_mode = VOP_REG(RK3036_DSP_CTRL0, 0x1, 10),
32079-	.cfg_done = VOP_REG_SYNC(RK3036_REG_CFG_DONE, 0x1, 0),
32080+/*
32081+ * Note: rk3288 has a dedicated 'cursor' window, however, that window requires
32082+ * special support to get alpha blending working.  For now, just use overlay
32083+ * window 3 for the drm cursor.
32084+ *
32085+ */
32086+static const struct vop_win_data rk3288_vop_win_data[] = {
32087+	{ .base = 0x00, .phy = &rk3288_win01_data,
32088+	  .type = DRM_PLANE_TYPE_PRIMARY },
32089+	{ .base = 0x40, .phy = &rk3288_win01_data,
32090+	  .type = DRM_PLANE_TYPE_OVERLAY },
32091+	{ .base = 0x00, .phy = &rk3288_win23_data,
32092+	  .type = DRM_PLANE_TYPE_OVERLAY,
32093+	  .area = rk3288_area_data,
32094+	  .area_size = ARRAY_SIZE(rk3288_area_data), },
32095+	{ .base = 0x50, .phy = &rk3288_win23_data,
32096+	  .type = DRM_PLANE_TYPE_CURSOR,
32097+	  .area = rk3288_area_data,
32098+	  .area_size = ARRAY_SIZE(rk3288_area_data), },
32099 };
32100 
32101-static const struct vop_data rk3036_vop = {
32102-	.intr = &rk3036_intr,
32103-	.common = &rk3036_common,
32104-	.modeset = &rk3036_modeset,
32105-	.output = &rk3036_output,
32106-	.win = rk3036_vop_win_data,
32107-	.win_size = ARRAY_SIZE(rk3036_vop_win_data),
32108+static const int rk3288_vop_intrs[] = {
32109+	DSP_HOLD_VALID_INTR,
32110+	FS_INTR,
32111+	LINE_FLAG_INTR,
32112+	BUS_ERROR_INTR,
32113 };
32114 
32115-static const struct vop_win_phy rk3126_win1_data = {
32116-	.data_formats = formats_win_lite,
32117-	.nformats = ARRAY_SIZE(formats_win_lite),
32118-	.format_modifiers = format_modifiers_win_lite,
32119-	.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1),
32120-	.format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6),
32121-	.rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19),
32122-	.dsp_info = VOP_REG(RK3126_WIN1_DSP_INFO, 0x0fff0fff, 0),
32123-	.dsp_st = VOP_REG(RK3126_WIN1_DSP_ST, 0x1fff1fff, 0),
32124-	.yrgb_mst = VOP_REG(RK3126_WIN1_MST, 0xffffffff, 0),
32125-	.yrgb_vir = VOP_REG(RK3036_WIN1_VIR, 0xffff, 0),
32126+static const struct vop_intr rk3288_vop_intr = {
32127+	.intrs = rk3288_vop_intrs,
32128+	.nintrs = ARRAY_SIZE(rk3288_vop_intrs),
32129+	.line_flag_num[0] = VOP_REG(RK3288_INTR_CTRL0, 0x1fff, 12),
32130+	.status = VOP_REG(RK3288_INTR_CTRL0, 0xf, 0),
32131+	.enable = VOP_REG(RK3288_INTR_CTRL0, 0xf, 4),
32132+	.clear = VOP_REG(RK3288_INTR_CTRL0, 0xf, 8),
32133 };
32134 
32135-static const struct vop_win_data rk3126_vop_win_data[] = {
32136-	{ .base = 0x00, .phy = &rk3036_win0_data,
32137-	  .type = DRM_PLANE_TYPE_PRIMARY },
32138-	{ .base = 0x00, .phy = &rk3126_win1_data,
32139-	  .type = DRM_PLANE_TYPE_CURSOR },
32140+static const struct vop_grf_ctrl rk3288_vop_big_grf_ctrl = {
32141+	.grf_dclk_inv = VOP_REG(RK3288_GRF_SOC_CON15, 0x1, 13),
32142 };
32143 
32144-static const struct vop_data rk3126_vop = {
32145-	.intr = &rk3036_intr,
32146-	.common = &rk3036_common,
32147-	.modeset = &rk3036_modeset,
32148-	.output = &rk3036_output,
32149-	.win = rk3126_vop_win_data,
32150-	.win_size = ARRAY_SIZE(rk3126_vop_win_data),
32151+static const struct vop_grf_ctrl rk3288_vop_lit_grf_ctrl = {
32152+	.grf_dclk_inv = VOP_REG(RK3288_GRF_SOC_CON15, 0x1, 15),
32153+};
32154+
32155+static const struct vop_data rk3288_vop_big = {
32156+	.soc_id = 0x3288,
32157+	.vop_id = 0,
32158+	.version = VOP_VERSION(3, 0),
32159+	.feature = VOP_FEATURE_OUTPUT_10BIT | VOP_FEATURE_ALPHA_SCALE | VOP_FEATURE_OVERSCAN,
32160+	.max_input = {4096, 8192},
32161+	.max_output = {3840, 2160},
32162+	.intr = &rk3288_vop_intr,
32163+	.grf_ctrl = &rk3288_vop_big_grf_ctrl,
32164+	.ctrl = &rk3288_ctrl_data,
32165+	.win = rk3288_vop_win_data,
32166+	.win_size = ARRAY_SIZE(rk3288_vop_win_data),
32167+};
32168+
32169+static const struct vop_data rk3288_vop_lit = {
32170+	.soc_id = 0x3288,
32171+	.vop_id = 1,
32172+	.version = VOP_VERSION(3, 0),
32173+	.feature = VOP_FEATURE_OUTPUT_10BIT | VOP_FEATURE_ALPHA_SCALE | VOP_FEATURE_OVERSCAN,
32174+	.max_input = {4096, 8192},
32175+	.max_output = {2560, 1600},
32176+	.intr = &rk3288_vop_intr,
32177+	.grf_ctrl = &rk3288_vop_lit_grf_ctrl,
32178+	.ctrl = &rk3288_ctrl_data,
32179+	.win = rk3288_vop_win_data,
32180+	.win_size = ARRAY_SIZE(rk3288_vop_win_data),
32181 };
32182 
32183-static const int px30_vop_intrs[] = {
32184+static const int rk3368_vop_intrs[] = {
32185 	FS_INTR,
32186-	0, 0,
32187+	FS_NEW_INTR,
32188+	ADDR_SAME_INTR,
32189 	LINE_FLAG_INTR,
32190-	0,
32191+	LINE_FLAG1_INTR,
32192 	BUS_ERROR_INTR,
32193-	0, 0,
32194+	WIN0_EMPTY_INTR,
32195+	WIN1_EMPTY_INTR,
32196+	WIN2_EMPTY_INTR,
32197+	WIN3_EMPTY_INTR,
32198+	HWC_EMPTY_INTR,
32199+	POST_BUF_EMPTY_INTR,
32200+	FS_FIELD_INTR,
32201 	DSP_HOLD_VALID_INTR,
32202 };
32203 
32204-static const struct vop_intr px30_intr = {
32205-	.intrs = px30_vop_intrs,
32206-	.nintrs = ARRAY_SIZE(px30_vop_intrs),
32207-	.line_flag_num[0] = VOP_REG(PX30_LINE_FLAG, 0xfff, 0),
32208-	.status = VOP_REG_MASK_SYNC(PX30_INTR_STATUS, 0xffff, 0),
32209-	.enable = VOP_REG_MASK_SYNC(PX30_INTR_EN, 0xffff, 0),
32210-	.clear = VOP_REG_MASK_SYNC(PX30_INTR_CLEAR, 0xffff, 0),
32211+static const struct vop_intr rk3368_vop_intr = {
32212+	.intrs = rk3368_vop_intrs,
32213+	.nintrs = ARRAY_SIZE(rk3368_vop_intrs),
32214+	.line_flag_num[0] = VOP_REG(RK3368_LINE_FLAG, 0xffff, 0),
32215+	.line_flag_num[1] = VOP_REG(RK3368_LINE_FLAG, 0xffff, 16),
32216+	.status = VOP_REG_MASK(RK3368_INTR_STATUS, 0x3fff, 0),
32217+	.enable = VOP_REG_MASK(RK3368_INTR_EN, 0x3fff, 0),
32218+	.clear = VOP_REG_MASK(RK3368_INTR_CLEAR, 0x3fff, 0),
32219 };
32220 
32221-static const struct vop_common px30_common = {
32222-	.standby = VOP_REG_SYNC(PX30_SYS_CTRL2, 0x1, 1),
32223-	.out_mode = VOP_REG(PX30_DSP_CTRL2, 0xf, 16),
32224-	.dsp_blank = VOP_REG(PX30_DSP_CTRL2, 0x1, 14),
32225-	.dither_down_en = VOP_REG(PX30_DSP_CTRL2, 0x1, 8),
32226-	.dither_down_sel = VOP_REG(PX30_DSP_CTRL2, 0x1, 7),
32227-	.dither_down_mode = VOP_REG(PX30_DSP_CTRL2, 0x1, 6),
32228-	.cfg_done = VOP_REG_SYNC(PX30_REG_CFG_DONE, 0x1, 0),
32229+static const struct vop_win_phy rk3368_win23_data = {
32230+	.data_formats = formats_win_lite,
32231+	.nformats = ARRAY_SIZE(formats_win_lite),
32232+	.gate = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 0),
32233+	.enable = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 4),
32234+	.format = VOP_REG(RK3368_WIN2_CTRL0, 0x3, 5),
32235+	.ymirror = VOP_REG(RK3368_WIN2_CTRL1, 0x1, 15),
32236+	.rb_swap = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 20),
32237+	.dsp_info = VOP_REG(RK3368_WIN2_DSP_INFO0, 0x0fff0fff, 0),
32238+	.dsp_st = VOP_REG(RK3368_WIN2_DSP_ST0, 0x1fff1fff, 0),
32239+	.yrgb_mst = VOP_REG(RK3368_WIN2_MST0, 0xffffffff, 0),
32240+	.yrgb_vir = VOP_REG(RK3368_WIN2_VIR0_1, 0x1fff, 0),
32241+	.src_alpha_ctl = VOP_REG(RK3368_WIN2_SRC_ALPHA_CTRL, 0xffff, 0),
32242+	.global_alpha_val = VOP_REG(RK3368_WIN2_SRC_ALPHA_CTRL, 0xff, 16),
32243+	.dst_alpha_ctl = VOP_REG(RK3368_WIN2_DST_ALPHA_CTRL, 0xffffffff, 0),
32244 };
32245 
32246-static const struct vop_modeset px30_modeset = {
32247-	.htotal_pw = VOP_REG(PX30_DSP_HTOTAL_HS_END, 0x0fff0fff, 0),
32248-	.hact_st_end = VOP_REG(PX30_DSP_HACT_ST_END, 0x0fff0fff, 0),
32249-	.vtotal_pw = VOP_REG(PX30_DSP_VTOTAL_VS_END, 0x0fff0fff, 0),
32250-	.vact_st_end = VOP_REG(PX30_DSP_VACT_ST_END, 0x0fff0fff, 0),
32251+static const struct vop_win_phy rk3368_area1_data = {
32252+	.enable = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 8),
32253+	.format = VOP_REG(RK3368_WIN2_CTRL0, 0x3, 9),
32254+	.rb_swap = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 23),
32255+	.dsp_info = VOP_REG(RK3368_WIN2_DSP_INFO1, 0x0fff0fff, 0),
32256+	.dsp_st = VOP_REG(RK3368_WIN2_DSP_ST1, 0x1fff1fff, 0),
32257+	.yrgb_mst = VOP_REG(RK3368_WIN2_MST1, 0xffffffff, 0),
32258+	.yrgb_vir = VOP_REG(RK3368_WIN2_VIR0_1, 0x1fff, 16),
32259 };
32260 
32261-static const struct vop_output px30_output = {
32262-	.rgb_dclk_pol = VOP_REG(PX30_DSP_CTRL0, 0x1, 1),
32263-	.rgb_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0x7, 2),
32264-	.rgb_en = VOP_REG(PX30_DSP_CTRL0, 0x1, 0),
32265-	.mipi_dclk_pol = VOP_REG(PX30_DSP_CTRL0, 0x1, 25),
32266-	.mipi_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0x7, 26),
32267-	.mipi_en = VOP_REG(PX30_DSP_CTRL0, 0x1, 24),
32268+static const struct vop_win_phy rk3368_area2_data = {
32269+	.enable = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 12),
32270+	.format = VOP_REG(RK3368_WIN2_CTRL0, 0x3, 13),
32271+	.rb_swap = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 26),
32272+	.dsp_info = VOP_REG(RK3368_WIN2_DSP_INFO2, 0x0fff0fff, 0),
32273+	.dsp_st = VOP_REG(RK3368_WIN2_DSP_ST2, 0x1fff1fff, 0),
32274+	.yrgb_mst = VOP_REG(RK3368_WIN2_MST2, 0xffffffff, 0),
32275+	.yrgb_vir = VOP_REG(RK3368_WIN2_VIR2_3, 0x1fff, 0),
32276 };
32277 
32278-static const struct vop_scl_regs px30_win_scl = {
32279-	.scale_yrgb_x = VOP_REG(PX30_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
32280-	.scale_yrgb_y = VOP_REG(PX30_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
32281-	.scale_cbcr_x = VOP_REG(PX30_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
32282-	.scale_cbcr_y = VOP_REG(PX30_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
32283+static const struct vop_win_phy rk3368_area3_data = {
32284+	.enable = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 16),
32285+	.format = VOP_REG(RK3368_WIN2_CTRL0, 0x3, 17),
32286+	.rb_swap = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 29),
32287+	.dsp_info = VOP_REG(RK3368_WIN2_DSP_INFO3, 0x0fff0fff, 0),
32288+	.dsp_st = VOP_REG(RK3368_WIN2_DSP_ST3, 0x1fff1fff, 0),
32289+	.yrgb_mst = VOP_REG(RK3368_WIN2_MST3, 0xffffffff, 0),
32290+	.yrgb_vir = VOP_REG(RK3368_WIN2_VIR2_3, 0x1fff, 16),
32291 };
32292 
32293-static const struct vop_win_phy px30_win0_data = {
32294-	.scl = &px30_win_scl,
32295-	.data_formats = formats_win_full,
32296-	.nformats = ARRAY_SIZE(formats_win_full),
32297-	.format_modifiers = format_modifiers_win_full,
32298-	.enable = VOP_REG(PX30_WIN0_CTRL0, 0x1, 0),
32299-	.format = VOP_REG(PX30_WIN0_CTRL0, 0x7, 1),
32300-	.rb_swap = VOP_REG(PX30_WIN0_CTRL0, 0x1, 12),
32301-	.act_info = VOP_REG(PX30_WIN0_ACT_INFO, 0xffffffff, 0),
32302-	.dsp_info = VOP_REG(PX30_WIN0_DSP_INFO, 0xffffffff, 0),
32303-	.dsp_st = VOP_REG(PX30_WIN0_DSP_ST, 0xffffffff, 0),
32304-	.yrgb_mst = VOP_REG(PX30_WIN0_YRGB_MST0, 0xffffffff, 0),
32305-	.uv_mst = VOP_REG(PX30_WIN0_CBR_MST0, 0xffffffff, 0),
32306-	.yrgb_vir = VOP_REG(PX30_WIN0_VIR, 0x1fff, 0),
32307-	.uv_vir = VOP_REG(PX30_WIN0_VIR, 0x1fff, 16),
32308-	.alpha_pre_mul = VOP_REG(PX30_WIN0_ALPHA_CTRL, 0x1, 2),
32309-	.alpha_mode = VOP_REG(PX30_WIN0_ALPHA_CTRL, 0x1, 1),
32310-	.alpha_en = VOP_REG(PX30_WIN0_ALPHA_CTRL, 0x1, 0),
32311-};
32312-
32313-static const struct vop_win_phy px30_win1_data = {
32314-	.data_formats = formats_win_lite,
32315-	.nformats = ARRAY_SIZE(formats_win_lite),
32316-	.format_modifiers = format_modifiers_win_lite,
32317-	.enable = VOP_REG(PX30_WIN1_CTRL0, 0x1, 0),
32318-	.format = VOP_REG(PX30_WIN1_CTRL0, 0x7, 4),
32319-	.rb_swap = VOP_REG(PX30_WIN1_CTRL0, 0x1, 12),
32320-	.dsp_info = VOP_REG(PX30_WIN1_DSP_INFO, 0xffffffff, 0),
32321-	.dsp_st = VOP_REG(PX30_WIN1_DSP_ST, 0xffffffff, 0),
32322-	.yrgb_mst = VOP_REG(PX30_WIN1_MST, 0xffffffff, 0),
32323-	.yrgb_vir = VOP_REG(PX30_WIN1_VIR, 0x1fff, 0),
32324-	.alpha_pre_mul = VOP_REG(PX30_WIN1_ALPHA_CTRL, 0x1, 2),
32325-	.alpha_mode = VOP_REG(PX30_WIN1_ALPHA_CTRL, 0x1, 1),
32326-	.alpha_en = VOP_REG(PX30_WIN1_ALPHA_CTRL, 0x1, 0),
32327-};
32328-
32329-static const struct vop_win_phy px30_win2_data = {
32330-	.data_formats = formats_win_lite,
32331-	.nformats = ARRAY_SIZE(formats_win_lite),
32332-	.format_modifiers = format_modifiers_win_lite,
32333-	.gate = VOP_REG(PX30_WIN2_CTRL0, 0x1, 4),
32334-	.enable = VOP_REG(PX30_WIN2_CTRL0, 0x1, 0),
32335-	.format = VOP_REG(PX30_WIN2_CTRL0, 0x3, 5),
32336-	.rb_swap = VOP_REG(PX30_WIN2_CTRL0, 0x1, 20),
32337-	.dsp_info = VOP_REG(PX30_WIN2_DSP_INFO0, 0x0fff0fff, 0),
32338-	.dsp_st = VOP_REG(PX30_WIN2_DSP_ST0, 0x1fff1fff, 0),
32339-	.yrgb_mst = VOP_REG(PX30_WIN2_MST0, 0xffffffff, 0),
32340-	.yrgb_vir = VOP_REG(PX30_WIN2_VIR0_1, 0x1fff, 0),
32341-	.alpha_pre_mul = VOP_REG(PX30_WIN2_ALPHA_CTRL, 0x1, 2),
32342-	.alpha_mode = VOP_REG(PX30_WIN2_ALPHA_CTRL, 0x1, 1),
32343-	.alpha_en = VOP_REG(PX30_WIN2_ALPHA_CTRL, 0x1, 0),
32344+static const struct vop_win_phy *rk3368_area_data[] = {
32345+	&rk3368_area1_data,
32346+	&rk3368_area2_data,
32347+	&rk3368_area3_data
32348 };
32349 
32350-static const struct vop_win_data px30_vop_big_win_data[] = {
32351-	{ .base = 0x00, .phy = &px30_win0_data,
32352+static const struct vop_win_data rk3368_vop_win_data[] = {
32353+	{ .base = 0x00, .phy = &rk3288_win01_data,
32354 	  .type = DRM_PLANE_TYPE_PRIMARY },
32355-	{ .base = 0x00, .phy = &px30_win1_data,
32356+	{ .base = 0x40, .phy = &rk3288_win01_data,
32357 	  .type = DRM_PLANE_TYPE_OVERLAY },
32358-	{ .base = 0x00, .phy = &px30_win2_data,
32359-	  .type = DRM_PLANE_TYPE_CURSOR },
32360+	{ .base = 0x00, .phy = &rk3368_win23_data,
32361+	  .type = DRM_PLANE_TYPE_OVERLAY,
32362+	  .area = rk3368_area_data,
32363+	  .area_size = ARRAY_SIZE(rk3368_area_data), },
32364+	{ .base = 0x50, .phy = &rk3368_win23_data,
32365+	  .type = DRM_PLANE_TYPE_CURSOR,
32366+	  .area = rk3368_area_data,
32367+	  .area_size = ARRAY_SIZE(rk3368_area_data), },
32368 };
32369 
32370-static const struct vop_data px30_vop_big = {
32371-	.intr = &px30_intr,
32372-	.feature = VOP_FEATURE_INTERNAL_RGB,
32373-	.common = &px30_common,
32374-	.modeset = &px30_modeset,
32375-	.output = &px30_output,
32376-	.win = px30_vop_big_win_data,
32377-	.win_size = ARRAY_SIZE(px30_vop_big_win_data),
32378+static const struct vop_data rk3368_vop = {
32379+	.soc_id = 0x3368,
32380+	.vop_id = 0,
32381+	.version = VOP_VERSION(3, 2),
32382+	.feature = VOP_FEATURE_ALPHA_SCALE | VOP_FEATURE_OVERSCAN,
32383+	.max_input = {4096, 8192},
32384+	.max_output = {4096, 2160},
32385+	.intr = &rk3368_vop_intr,
32386+	.ctrl = &rk3288_ctrl_data,
32387+	.win = rk3368_vop_win_data,
32388+	.win_size = ARRAY_SIZE(rk3368_vop_win_data),
32389 };
32390 
32391-static const struct vop_win_data px30_vop_lit_win_data[] = {
32392-	{ .base = 0x00, .phy = &px30_win1_data,
32393-	  .type = DRM_PLANE_TYPE_PRIMARY },
32394+static const struct vop_intr rk3366_vop_intr = {
32395+	.intrs = rk3368_vop_intrs,
32396+	.nintrs = ARRAY_SIZE(rk3368_vop_intrs),
32397+	.line_flag_num[0] = VOP_REG(RK3366_LINE_FLAG, 0xffff, 0),
32398+	.line_flag_num[1] = VOP_REG(RK3366_LINE_FLAG, 0xffff, 16),
32399+	.status = VOP_REG_MASK(RK3366_INTR_STATUS0, 0xffff, 0),
32400+	.enable = VOP_REG_MASK(RK3366_INTR_EN0, 0xffff, 0),
32401+	.clear = VOP_REG_MASK(RK3366_INTR_CLEAR0, 0xffff, 0),
32402 };
32403 
32404-static const struct vop_data px30_vop_lit = {
32405-	.intr = &px30_intr,
32406-	.feature = VOP_FEATURE_INTERNAL_RGB,
32407-	.common = &px30_common,
32408-	.modeset = &px30_modeset,
32409-	.output = &px30_output,
32410-	.win = px30_vop_lit_win_data,
32411-	.win_size = ARRAY_SIZE(px30_vop_lit_win_data),
32412+static const struct vop_grf_ctrl rk3368_vop_grf_ctrl = {
32413+	.grf_dclk_inv = VOP_REG(RK3368_GRF_SOC_CON6, 0x1, 5),
32414 };
32415 
32416-static const struct vop_scl_regs rk3066_win_scl = {
32417-	.scale_yrgb_x = VOP_REG(RK3066_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
32418-	.scale_yrgb_y = VOP_REG(RK3066_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
32419-	.scale_cbcr_x = VOP_REG(RK3066_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
32420-	.scale_cbcr_y = VOP_REG(RK3066_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
32421+static const struct vop_data rk3366_vop = {
32422+	.soc_id = 0x3366,
32423+	.vop_id = 0,
32424+	.version = VOP_VERSION(3, 4),
32425+	.feature = VOP_FEATURE_ALPHA_SCALE | VOP_FEATURE_OVERSCAN,
32426+	.max_input = {4096, 8192},
32427+	.max_output = {4096, 2160},
32428+	.intr = &rk3366_vop_intr,
32429+	.grf_ctrl = &rk3368_vop_grf_ctrl,
32430+	.ctrl = &rk3288_ctrl_data,
32431+	.win = rk3368_vop_win_data,
32432+	.win_size = ARRAY_SIZE(rk3368_vop_win_data),
32433 };
32434 
32435-static const struct vop_win_phy rk3066_win0_data = {
32436-	.scl = &rk3066_win_scl,
32437-	.data_formats = formats_win_full,
32438-	.nformats = ARRAY_SIZE(formats_win_full),
32439-	.format_modifiers = format_modifiers_win_full,
32440-	.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 0),
32441-	.format = VOP_REG(RK3066_SYS_CTRL1, 0x7, 4),
32442-	.rb_swap = VOP_REG(RK3066_SYS_CTRL1, 0x1, 19),
32443-	.act_info = VOP_REG(RK3066_WIN0_ACT_INFO, 0x1fff1fff, 0),
32444-	.dsp_info = VOP_REG(RK3066_WIN0_DSP_INFO, 0x0fff0fff, 0),
32445-	.dsp_st = VOP_REG(RK3066_WIN0_DSP_ST, 0x1fff1fff, 0),
32446-	.yrgb_mst = VOP_REG(RK3066_WIN0_YRGB_MST0, 0xffffffff, 0),
32447-	.uv_mst = VOP_REG(RK3066_WIN0_CBR_MST0, 0xffffffff, 0),
32448-	.yrgb_vir = VOP_REG(RK3066_WIN0_VIR, 0xffff, 0),
32449-	.uv_vir = VOP_REG(RK3066_WIN0_VIR, 0x1fff, 16),
32450+static const uint32_t vop_csc_y2r_bt601[] = {
32451+	0x00000400, 0x0400059c, 0xfd25fea0, 0x07170400,
32452+	0x00000000, 0xfff4cab4, 0x00087932, 0xfff1d4f2,
32453 };
32454 
32455-static const struct vop_win_phy rk3066_win1_data = {
32456-	.data_formats = formats_win_full,
32457-	.nformats = ARRAY_SIZE(formats_win_full),
32458-	.format_modifiers = format_modifiers_win_full,
32459-	.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 1),
32460-	.format = VOP_REG(RK3066_SYS_CTRL1, 0x7, 7),
32461-	.rb_swap = VOP_REG(RK3066_SYS_CTRL1, 0x1, 23),
32462-	.act_info = VOP_REG(RK3066_WIN1_ACT_INFO, 0x1fff1fff, 0),
32463-	.dsp_info = VOP_REG(RK3066_WIN1_DSP_INFO, 0x0fff0fff, 0),
32464-	.dsp_st = VOP_REG(RK3066_WIN1_DSP_ST, 0x1fff1fff, 0),
32465-	.yrgb_mst = VOP_REG(RK3066_WIN1_YRGB_MST, 0xffffffff, 0),
32466-	.uv_mst = VOP_REG(RK3066_WIN1_CBR_MST, 0xffffffff, 0),
32467-	.yrgb_vir = VOP_REG(RK3066_WIN1_VIR, 0xffff, 0),
32468-	.uv_vir = VOP_REG(RK3066_WIN1_VIR, 0x1fff, 16),
32469+static const uint32_t vop_csc_y2r_bt601_12_235[] = {
32470+	0x000004a8, 0x04a80662, 0xfcbffe6f, 0x081204a8,
32471+	0x00000000, 0xfff2134e, 0x00087b58, 0xffeeb4b0,
32472 };
32473 
32474-static const struct vop_win_phy rk3066_win2_data = {
32475-	.data_formats = formats_win_lite,
32476-	.nformats = ARRAY_SIZE(formats_win_lite),
32477-	.format_modifiers = format_modifiers_win_lite,
32478-	.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 2),
32479-	.format = VOP_REG(RK3066_SYS_CTRL1, 0x7, 10),
32480-	.rb_swap = VOP_REG(RK3066_SYS_CTRL1, 0x1, 27),
32481-	.dsp_info = VOP_REG(RK3066_WIN2_DSP_INFO, 0x0fff0fff, 0),
32482-	.dsp_st = VOP_REG(RK3066_WIN2_DSP_ST, 0x1fff1fff, 0),
32483-	.yrgb_mst = VOP_REG(RK3066_WIN2_MST, 0xffffffff, 0),
32484-	.yrgb_vir = VOP_REG(RK3066_WIN2_VIR, 0xffff, 0),
32485+static const uint32_t vop_csc_r2y_bt601[] = {
32486+	0x02590132, 0xff530075, 0x0200fead, 0xfe530200,
32487+	0x0000ffad, 0x00000200, 0x00080200, 0x00080200,
32488 };
32489 
32490-static const struct vop_modeset rk3066_modeset = {
32491-	.htotal_pw = VOP_REG(RK3066_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
32492-	.hact_st_end = VOP_REG(RK3066_DSP_HACT_ST_END, 0x1fff1fff, 0),
32493-	.vtotal_pw = VOP_REG(RK3066_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
32494-	.vact_st_end = VOP_REG(RK3066_DSP_VACT_ST_END, 0x1fff1fff, 0),
32495+static const uint32_t vop_csc_r2y_bt601_12_235[] = {
32496+	0x02040107, 0xff680064, 0x01c2fed6, 0xfe8701c2,
32497+	0x0000ffb7, 0x00010200, 0x00080200, 0x00080200,
32498 };
32499 
32500-static const struct vop_output rk3066_output = {
32501-	.pin_pol = VOP_REG(RK3066_DSP_CTRL0, 0x7, 4),
32502+static const uint32_t vop_csc_y2r_bt709[] = {
32503+	0x000004a8, 0x04a8072c, 0xfddeff26, 0x087304a8,
32504+	0x00000000, 0xfff08077, 0x0004cfed, 0xffedf1b8,
32505 };
32506 
32507-static const struct vop_common rk3066_common = {
32508-	.standby = VOP_REG(RK3066_SYS_CTRL0, 0x1, 1),
32509-	.out_mode = VOP_REG(RK3066_DSP_CTRL0, 0xf, 0),
32510-	.cfg_done = VOP_REG(RK3066_REG_CFG_DONE, 0x1, 0),
32511-	.dither_down_en = VOP_REG(RK3066_DSP_CTRL0, 0x1, 11),
32512-	.dither_down_mode = VOP_REG(RK3066_DSP_CTRL0, 0x1, 10),
32513-	.dsp_blank = VOP_REG(RK3066_DSP_CTRL1, 0x1, 24),
32514-	.dither_up = VOP_REG(RK3066_DSP_CTRL0, 0x1, 9),
32515-	.dsp_lut_en = VOP_REG(RK3066_SYS_CTRL1, 0x1, 31),
32516-	.data_blank = VOP_REG(RK3066_DSP_CTRL1, 0x1, 25),
32517+static const uint32_t vop_csc_r2y_bt709[] = {
32518+	0x027500bb, 0xff99003f, 0x01c2fea5, 0xfe6801c2,
32519+	0x0000ffd7, 0x00010200, 0x00080200, 0x00080200,
32520 };
32521 
32522-static const struct vop_win_data rk3066_vop_win_data[] = {
32523-	{ .base = 0x00, .phy = &rk3066_win0_data,
32524-	  .type = DRM_PLANE_TYPE_PRIMARY },
32525-	{ .base = 0x00, .phy = &rk3066_win1_data,
32526-	  .type = DRM_PLANE_TYPE_OVERLAY },
32527-	{ .base = 0x00, .phy = &rk3066_win2_data,
32528-	  .type = DRM_PLANE_TYPE_CURSOR },
32529+static const uint32_t vop_csc_y2r_bt2020[] = {
32530+	0x000004a8, 0x04a806b6, 0xfd66ff40, 0x089004a8,
32531+	0x00000000, 0xfff16bfc, 0x00058ae9, 0xffedb828,
32532 };
32533 
32534-static const int rk3066_vop_intrs[] = {
32535-	/*
32536-	 * hs_start interrupt fires at frame-start, so serves
32537-	 * the same purpose as dsp_hold in the driver.
32538-	 */
32539-	DSP_HOLD_VALID_INTR,
32540-	FS_INTR,
32541-	LINE_FLAG_INTR,
32542-	BUS_ERROR_INTR,
32543+static const uint32_t vop_csc_r2y_bt2020[] = {
32544+	0x025300e6, 0xff830034, 0x01c1febd, 0xfe6401c1,
32545+	0x0000ffdc, 0x00010200, 0x00080200, 0x00080200,
32546 };
32547 
32548-static const struct vop_intr rk3066_intr = {
32549-	.intrs = rk3066_vop_intrs,
32550-	.nintrs = ARRAY_SIZE(rk3066_vop_intrs),
32551-	.line_flag_num[0] = VOP_REG(RK3066_INT_STATUS, 0xfff, 12),
32552-	.status = VOP_REG(RK3066_INT_STATUS, 0xf, 0),
32553-	.enable = VOP_REG(RK3066_INT_STATUS, 0xf, 4),
32554-	.clear = VOP_REG(RK3066_INT_STATUS, 0xf, 8),
32555+static const uint32_t vop_csc_r2r_bt709_to_bt2020[] = {
32556+	0xfda606a4, 0xff80ffb5, 0xfff80488, 0xff99ffed,
32557+	0x0000047a, 0x00000200, 0x00000200, 0x00000200,
32558 };
32559 
32560-static const struct vop_data rk3066_vop = {
32561-	.version = VOP_VERSION(2, 1),
32562-	.intr = &rk3066_intr,
32563-	.common = &rk3066_common,
32564-	.modeset = &rk3066_modeset,
32565-	.output = &rk3066_output,
32566-	.win = rk3066_vop_win_data,
32567-	.win_size = ARRAY_SIZE(rk3066_vop_win_data),
32568+static const uint32_t vop_csc_r2r_bt2020_to_bt709[] = {
32569+	0x01510282, 0x0047002c, 0x000c03ae, 0x005a0011,
32570+	0x00000394, 0x00000200, 0x00000200, 0x00000200,
32571 };
32572 
32573-static const struct vop_scl_regs rk3188_win_scl = {
32574-	.scale_yrgb_x = VOP_REG(RK3188_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
32575-	.scale_yrgb_y = VOP_REG(RK3188_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
32576-	.scale_cbcr_x = VOP_REG(RK3188_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
32577-	.scale_cbcr_y = VOP_REG(RK3188_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
32578-};
32579+static const struct vop_csc_table rk3399_csc_table = {
32580+	.y2r_bt601		= vop_csc_y2r_bt601,
32581+	.y2r_bt601_12_235	= vop_csc_y2r_bt601_12_235,
32582+	.r2y_bt601		= vop_csc_r2y_bt601,
32583+	.r2y_bt601_12_235	= vop_csc_r2y_bt601_12_235,
32584 
32585-static const struct vop_win_phy rk3188_win0_data = {
32586-	.scl = &rk3188_win_scl,
32587-	.data_formats = formats_win_full,
32588-	.nformats = ARRAY_SIZE(formats_win_full),
32589-	.format_modifiers = format_modifiers_win_full,
32590-	.enable = VOP_REG(RK3188_SYS_CTRL, 0x1, 0),
32591-	.format = VOP_REG(RK3188_SYS_CTRL, 0x7, 3),
32592-	.rb_swap = VOP_REG(RK3188_SYS_CTRL, 0x1, 15),
32593-	.act_info = VOP_REG(RK3188_WIN0_ACT_INFO, 0x1fff1fff, 0),
32594-	.dsp_info = VOP_REG(RK3188_WIN0_DSP_INFO, 0x0fff0fff, 0),
32595-	.dsp_st = VOP_REG(RK3188_WIN0_DSP_ST, 0x1fff1fff, 0),
32596-	.yrgb_mst = VOP_REG(RK3188_WIN0_YRGB_MST0, 0xffffffff, 0),
32597-	.uv_mst = VOP_REG(RK3188_WIN0_CBR_MST0, 0xffffffff, 0),
32598-	.yrgb_vir = VOP_REG(RK3188_WIN_VIR, 0x1fff, 0),
32599-};
32600-
32601-static const struct vop_win_phy rk3188_win1_data = {
32602-	.data_formats = formats_win_lite,
32603-	.nformats = ARRAY_SIZE(formats_win_lite),
32604-	.format_modifiers = format_modifiers_win_lite,
32605-	.enable = VOP_REG(RK3188_SYS_CTRL, 0x1, 1),
32606-	.format = VOP_REG(RK3188_SYS_CTRL, 0x7, 6),
32607-	.rb_swap = VOP_REG(RK3188_SYS_CTRL, 0x1, 19),
32608-	/* no act_info on window1 */
32609-	.dsp_info = VOP_REG(RK3188_WIN1_DSP_INFO, 0x07ff07ff, 0),
32610-	.dsp_st = VOP_REG(RK3188_WIN1_DSP_ST, 0x0fff0fff, 0),
32611-	.yrgb_mst = VOP_REG(RK3188_WIN1_MST, 0xffffffff, 0),
32612-	.yrgb_vir = VOP_REG(RK3188_WIN_VIR, 0x1fff, 16),
32613-};
32614-
32615-static const struct vop_modeset rk3188_modeset = {
32616-	.htotal_pw = VOP_REG(RK3188_DSP_HTOTAL_HS_END, 0x0fff0fff, 0),
32617-	.hact_st_end = VOP_REG(RK3188_DSP_HACT_ST_END, 0x0fff0fff, 0),
32618-	.vtotal_pw = VOP_REG(RK3188_DSP_VTOTAL_VS_END, 0x0fff0fff, 0),
32619-	.vact_st_end = VOP_REG(RK3188_DSP_VACT_ST_END, 0x0fff0fff, 0),
32620-};
32621-
32622-static const struct vop_output rk3188_output = {
32623-	.pin_pol = VOP_REG(RK3188_DSP_CTRL0, 0xf, 4),
32624-};
32625-
32626-static const struct vop_common rk3188_common = {
32627-	.gate_en = VOP_REG(RK3188_SYS_CTRL, 0x1, 31),
32628-	.standby = VOP_REG(RK3188_SYS_CTRL, 0x1, 30),
32629-	.out_mode = VOP_REG(RK3188_DSP_CTRL0, 0xf, 0),
32630-	.cfg_done = VOP_REG(RK3188_REG_CFG_DONE, 0x1, 0),
32631-	.dither_down_sel = VOP_REG(RK3188_DSP_CTRL0, 0x1, 27),
32632-	.dither_down_en = VOP_REG(RK3188_DSP_CTRL0, 0x1, 11),
32633-	.dither_down_mode = VOP_REG(RK3188_DSP_CTRL0, 0x1, 10),
32634-	.dsp_blank = VOP_REG(RK3188_DSP_CTRL1, 0x1, 24),
32635-	.dither_up = VOP_REG(RK3188_DSP_CTRL0, 0x1, 9),
32636-	.dsp_lut_en = VOP_REG(RK3188_SYS_CTRL, 0x1, 28),
32637-	.data_blank = VOP_REG(RK3188_DSP_CTRL1, 0x1, 25),
32638-};
32639-
32640-static const struct vop_win_data rk3188_vop_win_data[] = {
32641-	{ .base = 0x00, .phy = &rk3188_win0_data,
32642-	  .type = DRM_PLANE_TYPE_PRIMARY },
32643-	{ .base = 0x00, .phy = &rk3188_win1_data,
32644-	  .type = DRM_PLANE_TYPE_CURSOR },
32645-};
32646+	.y2r_bt709		= vop_csc_y2r_bt709,
32647+	.r2y_bt709		= vop_csc_r2y_bt709,
32648 
32649-static const int rk3188_vop_intrs[] = {
32650-	/*
32651-	 * hs_start interrupt fires at frame-start, so serves
32652-	 * the same purpose as dsp_hold in the driver.
32653-	 */
32654-	DSP_HOLD_VALID_INTR,
32655-	FS_INTR,
32656-	LINE_FLAG_INTR,
32657-	BUS_ERROR_INTR,
32658+	.y2r_bt2020		= vop_csc_y2r_bt2020,
32659+	.r2y_bt2020		= vop_csc_r2y_bt2020,
32660+
32661+	.r2r_bt709_to_bt2020	= vop_csc_r2r_bt709_to_bt2020,
32662+	.r2r_bt2020_to_bt709	= vop_csc_r2r_bt2020_to_bt709,
32663 };
32664 
32665-static const struct vop_intr rk3188_vop_intr = {
32666-	.intrs = rk3188_vop_intrs,
32667-	.nintrs = ARRAY_SIZE(rk3188_vop_intrs),
32668-	.line_flag_num[0] = VOP_REG(RK3188_INT_STATUS, 0xfff, 12),
32669-	.status = VOP_REG(RK3188_INT_STATUS, 0xf, 0),
32670-	.enable = VOP_REG(RK3188_INT_STATUS, 0xf, 4),
32671-	.clear = VOP_REG(RK3188_INT_STATUS, 0xf, 8),
32672+static const struct vop_csc rk3399_win0_csc = {
32673+	.r2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 0),
32674+	.y2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 1),
32675+	.r2y_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 2),
32676+	.y2r_offset = RK3399_WIN0_YUV2YUV_Y2R,
32677+	.r2r_offset = RK3399_WIN0_YUV2YUV_3X3,
32678+	.r2y_offset = RK3399_WIN0_YUV2YUV_R2Y,
32679 };
32680 
32681-static const struct vop_data rk3188_vop = {
32682-	.intr = &rk3188_vop_intr,
32683-	.common = &rk3188_common,
32684-	.modeset = &rk3188_modeset,
32685-	.output = &rk3188_output,
32686-	.win = rk3188_vop_win_data,
32687-	.win_size = ARRAY_SIZE(rk3188_vop_win_data),
32688-	.feature = VOP_FEATURE_INTERNAL_RGB,
32689+static const struct vop_csc rk3399_win1_csc = {
32690+	.r2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 8),
32691+	.y2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 9),
32692+	.r2y_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 10),
32693+	.y2r_offset = RK3399_WIN1_YUV2YUV_Y2R,
32694+	.r2r_offset = RK3399_WIN1_YUV2YUV_3X3,
32695+	.r2y_offset = RK3399_WIN1_YUV2YUV_R2Y,
32696 };
32697 
32698-static const struct vop_scl_extension rk3288_win_full_scl_ext = {
32699-	.cbcr_vsd_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 31),
32700-	.cbcr_vsu_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 30),
32701-	.cbcr_hsd_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 28),
32702-	.cbcr_ver_scl_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 26),
32703-	.cbcr_hor_scl_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 24),
32704-	.yrgb_vsd_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 23),
32705-	.yrgb_vsu_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 22),
32706-	.yrgb_hsd_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 20),
32707-	.yrgb_ver_scl_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 18),
32708-	.yrgb_hor_scl_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 16),
32709-	.line_load_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 15),
32710-	.cbcr_axi_gather_num = VOP_REG(RK3288_WIN0_CTRL1, 0x7, 12),
32711-	.yrgb_axi_gather_num = VOP_REG(RK3288_WIN0_CTRL1, 0xf, 8),
32712-	.vsd_cbcr_gt2 = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 7),
32713-	.vsd_cbcr_gt4 = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 6),
32714-	.vsd_yrgb_gt2 = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 5),
32715-	.vsd_yrgb_gt4 = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 4),
32716-	.bic_coe_sel = VOP_REG(RK3288_WIN0_CTRL1, 0x3, 2),
32717-	.cbcr_axi_gather_en = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 1),
32718-	.yrgb_axi_gather_en = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 0),
32719-	.lb_mode = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 5),
32720+static const struct vop_csc rk3399_win2_csc = {
32721+	.r2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 16),
32722+	.r2y_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 18),
32723+	.r2r_offset = RK3399_WIN2_YUV2YUV_3X3,
32724+	.csc_mode = VOP_REG(RK3399_YUV2YUV_WIN, 0x3, 22),
32725 };
32726 
32727-static const struct vop_scl_regs rk3288_win_full_scl = {
32728-	.ext = &rk3288_win_full_scl_ext,
32729-	.scale_yrgb_x = VOP_REG(RK3288_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
32730-	.scale_yrgb_y = VOP_REG(RK3288_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
32731-	.scale_cbcr_x = VOP_REG(RK3288_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
32732-	.scale_cbcr_y = VOP_REG(RK3288_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
32733+static const struct vop_csc rk3399_win3_csc = {
32734+	.r2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 24),
32735+	.r2y_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 26),
32736+	.r2r_offset = RK3399_WIN3_YUV2YUV_3X3,
32737+	.csc_mode = VOP_REG(RK3399_YUV2YUV_WIN, 0x3, 30),
32738 };
32739 
32740-static const struct vop_win_phy rk3288_win01_data = {
32741+static const struct vop_win_phy rk3399_win01_data = {
32742 	.scl = &rk3288_win_full_scl,
32743-	.data_formats = formats_win_full,
32744-	.nformats = ARRAY_SIZE(formats_win_full),
32745-	.format_modifiers = format_modifiers_win_full,
32746+	.data_formats = formats_win_full_10bit_yuyv,
32747+	.nformats = ARRAY_SIZE(formats_win_full_10bit_yuyv),
32748 	.enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
32749 	.format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
32750+	.fmt_10 = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 4),
32751+	.fmt_yuyv = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 17),
32752+	.csc_mode = VOP_REG_VER(RK3288_WIN0_CTRL0, 0x3, 10, 3, 2, -1),
32753 	.rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
32754+	.xmirror = VOP_REG_VER(RK3368_WIN0_CTRL0, 0x1, 21, 3, 2, -1),
32755+	.ymirror = VOP_REG_VER(RK3368_WIN0_CTRL0, 0x1, 22, 3, 2, -1),
32756 	.act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0),
32757 	.dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0),
32758 	.dsp_st = VOP_REG(RK3288_WIN0_DSP_ST, 0x1fff1fff, 0),
32759@@ -597,466 +675,1196 @@ static const struct vop_win_phy rk3288_win01_data = {
32760 	.uv_mst = VOP_REG(RK3288_WIN0_CBR_MST, 0xffffffff, 0),
32761 	.yrgb_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 0),
32762 	.uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16),
32763-	.src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
32764-	.dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0),
32765-	.channel = VOP_REG(RK3288_WIN0_CTRL2, 0xff, 0),
32766+	.src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xffff, 0),
32767+	.global_alpha_val = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 16),
32768+	.dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xffffffff, 0),
32769+	.channel = VOP_REG_VER(RK3288_WIN0_CTRL2, 0xff, 0, 3, 8, 8),
32770 };
32771 
32772-static const struct vop_win_phy rk3288_win23_data = {
32773-	.data_formats = formats_win_lite,
32774-	.nformats = ARRAY_SIZE(formats_win_lite),
32775-	.format_modifiers = format_modifiers_win_lite,
32776-	.enable = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 4),
32777-	.gate = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 0),
32778-	.format = VOP_REG(RK3288_WIN2_CTRL0, 0x7, 1),
32779-	.rb_swap = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 12),
32780-	.dsp_info = VOP_REG(RK3288_WIN2_DSP_INFO0, 0x0fff0fff, 0),
32781-	.dsp_st = VOP_REG(RK3288_WIN2_DSP_ST0, 0x1fff1fff, 0),
32782-	.yrgb_mst = VOP_REG(RK3288_WIN2_MST0, 0xffffffff, 0),
32783-	.yrgb_vir = VOP_REG(RK3288_WIN2_VIR0_1, 0x1fff, 0),
32784-	.src_alpha_ctl = VOP_REG(RK3288_WIN2_SRC_ALPHA_CTRL, 0xff, 0),
32785-	.dst_alpha_ctl = VOP_REG(RK3288_WIN2_DST_ALPHA_CTRL, 0xff, 0),
32786+static const struct vop_win_data rk3399_vop_win_data[] = {
32787+	{ .base = 0x00, .phy = &rk3399_win01_data, .csc = &rk3399_win0_csc,
32788+	  .format_modifiers = format_modifiers_afbc,
32789+	  .type = DRM_PLANE_TYPE_PRIMARY,
32790+	  .feature = WIN_FEATURE_AFBDC },
32791+	{ .base = 0x40, .phy = &rk3399_win01_data, .csc = &rk3399_win1_csc,
32792+	  .format_modifiers = format_modifiers_afbc,
32793+	  .type = DRM_PLANE_TYPE_OVERLAY,
32794+	  .feature = WIN_FEATURE_AFBDC },
32795+	{ .base = 0x00, .phy = &rk3368_win23_data, .csc = &rk3399_win2_csc,
32796+	  .format_modifiers = format_modifiers_afbc,
32797+	  .type = DRM_PLANE_TYPE_OVERLAY,
32798+	  .feature = WIN_FEATURE_AFBDC,
32799+	  .area = rk3368_area_data,
32800+	  .area_size = ARRAY_SIZE(rk3368_area_data), },
32801+	{ .base = 0x50, .phy = &rk3368_win23_data, .csc = &rk3399_win3_csc,
32802+	  .format_modifiers = format_modifiers_afbc,
32803+	  .type = DRM_PLANE_TYPE_CURSOR,
32804+	  .feature = WIN_FEATURE_AFBDC,
32805+	  .area = rk3368_area_data,
32806+	  .area_size = ARRAY_SIZE(rk3368_area_data), },
32807 };
32808 
32809-static const struct vop_modeset rk3288_modeset = {
32810-	.htotal_pw = VOP_REG(RK3288_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
32811-	.hact_st_end = VOP_REG(RK3288_DSP_HACT_ST_END, 0x1fff1fff, 0),
32812-	.vtotal_pw = VOP_REG(RK3288_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
32813-	.vact_st_end = VOP_REG(RK3288_DSP_VACT_ST_END, 0x1fff1fff, 0),
32814-	.hpost_st_end = VOP_REG(RK3288_POST_DSP_HACT_INFO, 0x1fff1fff, 0),
32815-	.vpost_st_end = VOP_REG(RK3288_POST_DSP_VACT_INFO, 0x1fff1fff, 0),
32816+static const struct vop_data rk3399_vop_big = {
32817+	.soc_id = 0x3399,
32818+	.vop_id = 0,
32819+	.version = VOP_VERSION(3, 5),
32820+	.csc_table = &rk3399_csc_table,
32821+	.feature = VOP_FEATURE_OUTPUT_10BIT | VOP_FEATURE_ALPHA_SCALE | VOP_FEATURE_OVERSCAN,
32822+	.max_input = {4096, 8192},
32823+	.max_output = {4096, 2160},
32824+	.intr = &rk3366_vop_intr,
32825+	.ctrl = &rk3288_ctrl_data,
32826+	.win = rk3399_vop_win_data,
32827+	.win_size = ARRAY_SIZE(rk3399_vop_win_data),
32828 };
32829 
32830-static const struct vop_output rk3288_output = {
32831-	.pin_pol = VOP_REG(RK3288_DSP_CTRL0, 0xf, 4),
32832-	.rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12),
32833-	.hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
32834-	.edp_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 14),
32835-	.mipi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 15),
32836+static const struct vop_win_data rk3399_vop_lit_win_data[] = {
32837+	{ .base = 0x00, .phy = &rk3399_win01_data, .csc = &rk3399_win0_csc,
32838+	  .format_modifiers = format_modifiers,
32839+	  .type = DRM_PLANE_TYPE_OVERLAY,
32840+	  .feature = WIN_FEATURE_AFBDC },
32841+	{ .phy = NULL },
32842+	{ .base = 0x00, .phy = &rk3368_win23_data, .csc = &rk3399_win2_csc,
32843+	  .format_modifiers = format_modifiers,
32844+	  .type = DRM_PLANE_TYPE_PRIMARY,
32845+	  .feature = WIN_FEATURE_AFBDC,
32846+	  .area = rk3368_area_data,
32847+	  .area_size = ARRAY_SIZE(rk3368_area_data), },
32848+	{ .phy = NULL },
32849 };
32850 
32851-static const struct vop_common rk3288_common = {
32852-	.standby = VOP_REG_SYNC(RK3288_SYS_CTRL, 0x1, 22),
32853-	.gate_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 23),
32854-	.mmu_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 20),
32855-	.dither_down_sel = VOP_REG(RK3288_DSP_CTRL1, 0x1, 4),
32856-	.dither_down_mode = VOP_REG(RK3288_DSP_CTRL1, 0x1, 3),
32857-	.dither_down_en = VOP_REG(RK3288_DSP_CTRL1, 0x1, 2),
32858-	.pre_dither_down = VOP_REG(RK3288_DSP_CTRL1, 0x1, 1),
32859-	.dither_up = VOP_REG(RK3288_DSP_CTRL1, 0x1, 6),
32860-	.dsp_lut_en = VOP_REG(RK3288_DSP_CTRL1, 0x1, 0),
32861-	.data_blank = VOP_REG(RK3288_DSP_CTRL0, 0x1, 19),
32862-	.dsp_blank = VOP_REG(RK3288_DSP_CTRL0, 0x3, 18),
32863-	.out_mode = VOP_REG(RK3288_DSP_CTRL0, 0xf, 0),
32864-	.cfg_done = VOP_REG_SYNC(RK3288_REG_CFG_DONE, 0x1, 0),
32865+
32866+static const struct vop_data rk3399_vop_lit = {
32867+	.soc_id = 0x3399,
32868+	.vop_id = 1,
32869+	.version = VOP_VERSION(3, 6),
32870+	.feature = VOP_FEATURE_ALPHA_SCALE | VOP_FEATURE_OVERSCAN,
32871+	.csc_table = &rk3399_csc_table,
32872+	.max_input = {4096, 8192},
32873+	.max_output = {2560, 1600},
32874+	.intr = &rk3366_vop_intr,
32875+	.ctrl = &rk3288_ctrl_data,
32876+	.win = rk3399_vop_lit_win_data,
32877+	.win_size = ARRAY_SIZE(rk3399_vop_lit_win_data),
32878 };
32879 
32880-/*
32881- * Note: rk3288 has a dedicated 'cursor' window, however, that window requires
32882- * special support to get alpha blending working.  For now, just use overlay
32883- * window 3 for the drm cursor.
32884- *
32885- */
32886-static const struct vop_win_data rk3288_vop_win_data[] = {
32887+static const struct vop_win_data rk322x_vop_win_data[] = {
32888 	{ .base = 0x00, .phy = &rk3288_win01_data,
32889 	  .type = DRM_PLANE_TYPE_PRIMARY },
32890 	{ .base = 0x40, .phy = &rk3288_win01_data,
32891-	  .type = DRM_PLANE_TYPE_OVERLAY },
32892-	{ .base = 0x00, .phy = &rk3288_win23_data,
32893-	  .type = DRM_PLANE_TYPE_OVERLAY },
32894-	{ .base = 0x50, .phy = &rk3288_win23_data,
32895 	  .type = DRM_PLANE_TYPE_CURSOR },
32896 };
32897 
32898-static const int rk3288_vop_intrs[] = {
32899-	DSP_HOLD_VALID_INTR,
32900-	FS_INTR,
32901-	LINE_FLAG_INTR,
32902-	BUS_ERROR_INTR,
32903-};
32904-
32905-static const struct vop_intr rk3288_vop_intr = {
32906-	.intrs = rk3288_vop_intrs,
32907-	.nintrs = ARRAY_SIZE(rk3288_vop_intrs),
32908-	.line_flag_num[0] = VOP_REG(RK3288_INTR_CTRL0, 0x1fff, 12),
32909-	.status = VOP_REG(RK3288_INTR_CTRL0, 0xf, 0),
32910-	.enable = VOP_REG(RK3288_INTR_CTRL0, 0xf, 4),
32911-	.clear = VOP_REG(RK3288_INTR_CTRL0, 0xf, 8),
32912+static const struct vop_data rk3228_vop = {
32913+	.soc_id = 0x3228,
32914+	.vop_id = 0,
32915+	.version = VOP_VERSION(3, 7),
32916+	.feature = VOP_FEATURE_OUTPUT_10BIT | VOP_FEATURE_ALPHA_SCALE | VOP_FEATURE_OVERSCAN,
32917+	.max_input = {4096, 8192},
32918+	.max_output = {4096, 2160},
32919+	.intr = &rk3366_vop_intr,
32920+	.ctrl = &rk3288_ctrl_data,
32921+	.win = rk322x_vop_win_data,
32922+	.win_size = ARRAY_SIZE(rk322x_vop_win_data),
32923 };
32924 
32925-static const struct vop_data rk3288_vop = {
32926-	.version = VOP_VERSION(3, 1),
32927-	.feature = VOP_FEATURE_OUTPUT_RGB10,
32928-	.intr = &rk3288_vop_intr,
32929-	.common = &rk3288_common,
32930-	.modeset = &rk3288_modeset,
32931-	.output = &rk3288_output,
32932-	.win = rk3288_vop_win_data,
32933-	.win_size = ARRAY_SIZE(rk3288_vop_win_data),
32934-	.lut_size = 1024,
32935+static const u32 sdr2hdr_bt1886eotf_yn_for_hlg_hdr[65] = {
32936+	0,
32937+	1,	7,	17,	35,
32938+	60,	92,	134,	184,
32939+	244,	315,	396,	487,
32940+	591,	706,	833,	915,
32941+	1129,	1392,	1717,	2118,
32942+	2352,	2612,	2900,	3221,
32943+	3577,	3972,	4411,	4899,
32944+	5441,	6042,	6710,	7452,
32945+	7853,	8276,	8721,	9191,
32946+	9685,	10207,	10756,	11335,
32947+	11945,	12588,	13266,	13980,
32948+	14732,	15525,	16361,	17241,
32949+	17699,	18169,	18652,	19147,
32950+	19656,	20178,	20714,	21264,
32951+	21829,	22408,	23004,	23615,
32952+	24242,	24886,	25547,	26214,
32953+};
32954+
32955+static const u32 sdr2hdr_bt1886eotf_yn_for_bt2020[65] = {
32956+	0,
32957+	1820,   3640,   5498,   7674,
32958+	10256,  13253,  16678,  20539,
32959+	24847,  29609,  34833,  40527,
32960+	46699,  53354,  60499,  68141,
32961+	76285,  84937,  94103,  103787,
32962+	108825, 113995, 119296, 124731,
32963+	130299, 136001, 141837, 147808,
32964+	153915, 160158, 166538, 173055,
32965+	176365, 179709, 183089, 186502,
32966+	189951, 193434, 196952, 200505,
32967+	204093, 207715, 211373, 215066,
32968+	218795, 222558, 226357, 230191,
32969+	232121, 234060, 236008, 237965,
32970+	239931, 241906, 243889, 245882,
32971+	247883, 249894, 251913, 253941,
32972+	255978, 258024, 260079, 262143,
32973+};
32974+
32975+static u32 sdr2hdr_bt1886eotf_yn_for_hdr[65] = {
32976+	/* dst_range 425int */
32977+	0,
32978+	5,     21,    49,     91,
32979+	150,   225,   320,   434,
32980+	569,   726,   905,   1108,
32981+	1336,  1588,  1866,  2171,
32982+	2502,  2862,  3250,  3667,
32983+	3887,  4114,  4349,  4591,
32984+	4841,  5099,  5364,  5638,
32985+	5920,  6209,  6507,  6812,
32986+	6968,  7126,  7287,  7449,
32987+	7613,  7779,  7948,  8118,
32988+	8291,  8466,  8643,  8822,
32989+	9003,  9187,  9372,  9560,
32990+	9655,  9750,  9846,  9942,
32991+	10039, 10136, 10234, 10333,
32992+	10432, 10531, 10631, 10732,
32993+	10833, 10935, 11038, 11141,
32994+};
32995+
32996+static const u32 sdr2hdr_st2084oetf_yn_for_hlg_hdr[65] = {
32997+	0,
32998+	668,	910,	1217,	1600,
32999+	2068,	2384,	2627,	3282,
33000+	3710,	4033,	4879,	5416,
33001+	5815,	6135,	6401,	6631,
33002+	6833,	7176,	7462,	7707,
33003+	7921,	8113,	8285,	8442,
33004+	8586,	8843,	9068,	9268,
33005+	9447,	9760,	10027,	10259,
33006+	10465,	10650,	10817,	10971,
33007+	11243,	11480,	11689,	11877,
33008+	12047,	12202,	12345,	12477,
33009+	12601,	12716,	12926,	13115,
33010+	13285,	13441,	13583,	13716,
33011+	13839,	13953,	14163,	14350,
33012+	14519,	14673,	14945,	15180,
33013+	15570,	15887,	16153,	16383,
33014+};
33015+
33016+static const u32 sdr2hdr_st2084oetf_yn_for_bt2020[65] = {
33017+	0,
33018+	0,     0,     1,     2,
33019+	4,     6,     9,     18,
33020+	27,    36,    72,    108,
33021+	144,   180,   216,   252,
33022+	288,   360,   432,   504,
33023+	576,   648,   720,   792,
33024+	864,   1008,  1152,  1296,
33025+	1444,  1706,  1945,  2166,
33026+	2372,  2566,  2750,  2924,
33027+	3251,  3553,  3834,  4099,
33028+	4350,  4588,  4816,  5035,
33029+	5245,  5447,  5832,  6194,
33030+	6536,  6862,  7173,  7471,
33031+	7758,  8035,  8560,  9055,
33032+	9523,  9968,  10800, 11569,
33033+	12963, 14210, 15347, 16383,
33034+};
33035+
33036+static u32 sdr2hdr_st2084oetf_yn_for_hdr[65] = {
33037+	0,
33038+	281,   418,   610,   871,
33039+	1217,  1464,  1662,  2218,
33040+	2599,  2896,  3699,  4228,
33041+	4628,  4953,  5227,  5466,
33042+	5676,  6038,  6341,  6602,
33043+	6833,  7039,  7226,  7396,
33044+	7554,  7835,  8082,  8302,
33045+	8501,  8848,  9145,  9405,
33046+	9635,  9842,  10031, 10204,
33047+	10512, 10779, 11017, 11230,
33048+	11423, 11599, 11762, 11913,
33049+	12054, 12185, 12426, 12641,
33050+	12835, 13013, 13177, 13328,
33051+	13469, 13600, 13840, 14055,
33052+	14248, 14425, 14737, 15006,
33053+	15453, 15816, 16121, 16383,
33054+};
33055+
33056+static const u32 sdr2hdr_st2084oetf_dxn_pow2[64] = {
33057+	0,  0,  1,  2,
33058+	3,  3,  3,  5,
33059+	5,  5,  7,  7,
33060+	7,  7,  7,  7,
33061+	7,  8,  8,  8,
33062+	8,  8,  8,  8,
33063+	8,  9,  9,  9,
33064+	9,  10, 10, 10,
33065+	10, 10, 10, 10,
33066+	11, 11, 11, 11,
33067+	11, 11, 11, 11,
33068+	11, 11, 12, 12,
33069+	12, 12, 12, 12,
33070+	12, 12, 13, 13,
33071+	13, 13, 14, 14,
33072+	15, 15, 15, 15,
33073+};
33074+
33075+static const u32 sdr2hdr_st2084oetf_dxn[64] = {
33076+	1,     1,     2,     4,
33077+	8,     8,     8,     32,
33078+	32,    32,    128,   128,
33079+	128,   128,   128,   128,
33080+	128,   256,   256,   256,
33081+	256,   256,   256,   256,
33082+	256,   512,   512,   512,
33083+	512,   1024,  1024,  1024,
33084+	1024,  1024,  1024,  1024,
33085+	2048,  2048,  2048,  2048,
33086+	2048,  2048,  2048,  2048,
33087+	2048,  2048,  4096,  4096,
33088+	4096,  4096,  4096,  4096,
33089+	4096,  4096,  8192,  8192,
33090+	8192,  8192,  16384, 16384,
33091+	32768, 32768, 32768, 32768,
33092+};
33093+
33094+static const u32 sdr2hdr_st2084oetf_xn[63] = {
33095+	1,      2,      4,      8,
33096+	16,     24,     32,     64,
33097+	96,     128,    256,    384,
33098+	512,    640,    768,    896,
33099+	1024,   1280,   1536,   1792,
33100+	2048,   2304,   2560,   2816,
33101+	3072,   3584,   4096,   4608,
33102+	5120,   6144,   7168,   8192,
33103+	9216,   10240,  11264,  12288,
33104+	14336,  16384,  18432,  20480,
33105+	22528,  24576,  26624,  28672,
33106+	30720,  32768,  36864,  40960,
33107+	45056,  49152,  53248,  57344,
33108+	61440,  65536,  73728,  81920,
33109+	90112,  98304,  114688, 131072,
33110+	163840, 196608, 229376,
33111+};
33112+
33113+static u32 hdr2sdr_eetf_yn[33] = {
33114+	1716,
33115+	1880,	2067,	2277,	2508,
33116+	2758,	3026,	3310,	3609,
33117+	3921,	4246,	4581,	4925,
33118+	5279,	5640,	6007,	6380,
33119+	6758,	7140,	7526,	7914,
33120+	8304,	8694,	9074,	9438,
33121+	9779,	10093,	10373,	10615,
33122+	10812,	10960,	11053,	11084,
33123+};
33124+
33125+static u32 hdr2sdr_bt1886oetf_yn[33] = {
33126+	0,
33127+	0,	0,	0,	0,
33128+	0,	0,	0,	314,
33129+	746,	1323,	2093,	2657,
33130+	3120,	3519,	3874,	4196,
33131+	4492,	5024,	5498,	5928,
33132+	6323,	7034,	7666,	8239,
33133+	8766,	9716,	10560,	11325,
33134+	12029,	13296,	14422,	16383,
33135 };
33136 
33137-static const int rk3368_vop_intrs[] = {
33138-	FS_INTR,
33139-	0, 0,
33140-	LINE_FLAG_INTR,
33141+static const u32 hdr2sdr_sat_yn[9] = {
33142 	0,
33143-	BUS_ERROR_INTR,
33144-	0, 0, 0, 0, 0, 0, 0,
33145-	DSP_HOLD_VALID_INTR,
33146+	1792, 3584, 3472, 2778,
33147+	2083, 1389, 694,  0,
33148+};
33149+
33150+static const struct vop_hdr_table rk3328_hdr_table = {
33151+	.hdr2sdr_eetf_oetf_y0_offset = RK3328_HDR2SDR_EETF_OETF_Y0,
33152+	.hdr2sdr_eetf_oetf_y1_offset = RK3328_HDR2SDR_EETF_OETF_Y1,
33153+	.hdr2sdr_eetf_yn	= hdr2sdr_eetf_yn,
33154+	.hdr2sdr_bt1886oetf_yn	= hdr2sdr_bt1886oetf_yn,
33155+	.hdr2sdr_sat_y0_offset = RK3328_HDR2DR_SAT_Y0,
33156+	.hdr2sdr_sat_y1_offset = RK3328_HDR2DR_SAT_Y1,
33157+	.hdr2sdr_sat_yn = hdr2sdr_sat_yn,
33158+
33159+	.hdr2sdr_src_range_min = 494,
33160+	.hdr2sdr_src_range_max = 12642,
33161+	.hdr2sdr_normfaceetf = 1327,
33162+	.hdr2sdr_dst_range_min = 4,
33163+	.hdr2sdr_dst_range_max = 3276,
33164+	.hdr2sdr_normfacgamma = 5120,
33165+
33166+	.sdr2hdr_eotf_oetf_y0_offset = RK3328_SDR2HDR_EOTF_OETF_Y0,
33167+	.sdr2hdr_eotf_oetf_y1_offset = RK3328_SDR2HDR_EOTF_OETF_Y1,
33168+	.sdr2hdr_bt1886eotf_yn_for_hlg_hdr = sdr2hdr_bt1886eotf_yn_for_hlg_hdr,
33169+	.sdr2hdr_bt1886eotf_yn_for_bt2020 = sdr2hdr_bt1886eotf_yn_for_bt2020,
33170+	.sdr2hdr_bt1886eotf_yn_for_hdr = sdr2hdr_bt1886eotf_yn_for_hdr,
33171+	.sdr2hdr_st2084oetf_yn_for_hlg_hdr = sdr2hdr_st2084oetf_yn_for_hlg_hdr,
33172+	.sdr2hdr_st2084oetf_yn_for_bt2020 = sdr2hdr_st2084oetf_yn_for_bt2020,
33173+	.sdr2hdr_st2084oetf_yn_for_hdr = sdr2hdr_st2084oetf_yn_for_hdr,
33174+	.sdr2hdr_oetf_dx_dxpow1_offset = RK3328_SDR2HDR_OETF_DX_DXPOW1,
33175+	.sdr2hdr_oetf_xn1_offset = RK3328_SDR2HDR_OETF_XN1,
33176+	.sdr2hdr_st2084oetf_dxn_pow2 = sdr2hdr_st2084oetf_dxn_pow2,
33177+	.sdr2hdr_st2084oetf_dxn = sdr2hdr_st2084oetf_dxn,
33178+	.sdr2hdr_st2084oetf_xn = sdr2hdr_st2084oetf_xn,
33179+};
33180+
33181+static const struct vop_ctrl rk3328_ctrl_data = {
33182+	.standby = VOP_REG(RK3328_SYS_CTRL, 0x1, 22),
33183+	.dma_stop = VOP_REG(RK3328_SYS_CTRL, 0x1, 21),
33184+	.axi_outstanding_max_num = VOP_REG(RK3328_SYS_CTRL1, 0x1f, 13),
33185+	.axi_max_outstanding_en = VOP_REG(RK3328_SYS_CTRL1, 0x1, 12),
33186+	.reg_done_frm = VOP_REG(RK3328_SYS_CTRL1, 0x1, 24),
33187+	.auto_gate_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 23),
33188+	.htotal_pw = VOP_REG(RK3328_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
33189+	.hact_st_end = VOP_REG(RK3328_DSP_HACT_ST_END, 0x1fff1fff, 0),
33190+	.vtotal_pw = VOP_REG(RK3328_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
33191+	.vact_st_end = VOP_REG(RK3328_DSP_VACT_ST_END, 0x1fff1fff, 0),
33192+	.vact_st_end_f1 = VOP_REG(RK3328_DSP_VACT_ST_END_F1, 0x1fff1fff, 0),
33193+	.vs_st_end_f1 = VOP_REG(RK3328_DSP_VS_ST_END_F1, 0x1fff1fff, 0),
33194+	.hpost_st_end = VOP_REG(RK3328_POST_DSP_HACT_INFO, 0x1fff1fff, 0),
33195+	.vpost_st_end = VOP_REG(RK3328_POST_DSP_VACT_INFO, 0x1fff1fff, 0),
33196+	.vpost_st_end_f1 = VOP_REG(RK3328_POST_DSP_VACT_INFO_F1, 0x1fff1fff, 0),
33197+	.post_scl_factor = VOP_REG(RK3328_POST_SCL_FACTOR_YRGB, 0xffffffff, 0),
33198+	.post_scl_ctrl = VOP_REG(RK3328_POST_SCL_CTRL, 0x3, 0),
33199+	.dsp_out_yuv = VOP_REG(RK3328_POST_SCL_CTRL, 0x1, 2),
33200+	.dsp_interlace = VOP_REG(RK3328_DSP_CTRL0, 0x1, 10),
33201+	.dsp_layer_sel = VOP_REG(RK3328_DSP_CTRL1, 0xff, 8),
33202+	.post_lb_mode = VOP_REG(RK3328_SYS_CTRL, 0x1, 18),
33203+	.global_regdone_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 11),
33204+	.overlay_mode = VOP_REG(RK3328_SYS_CTRL, 0x1, 16),
33205+	.core_dclk_div = VOP_REG(RK3328_DSP_CTRL0, 0x1, 4),
33206+	.dclk_ddr = VOP_REG(RK3328_DSP_CTRL0, 0x1, 8),
33207+	.p2i_en = VOP_REG(RK3328_DSP_CTRL0, 0x1, 5),
33208+	.rgb_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 12),
33209+	.hdmi_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 13),
33210+	.edp_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 14),
33211+	.mipi_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 15),
33212+	.tve_dclk_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 24),
33213+	.tve_dclk_pol = VOP_REG(RK3328_SYS_CTRL, 0x1, 25),
33214+	.tve_sw_mode = VOP_REG(RK3328_SYS_CTRL, 0x1, 26),
33215+	.sw_uv_offset_en  = VOP_REG(RK3328_SYS_CTRL, 0x1, 27),
33216+	.sw_genlock   = VOP_REG(RK3328_SYS_CTRL, 0x1, 28),
33217+	.sw_dac_sel = VOP_REG(RK3328_SYS_CTRL, 0x1, 29),
33218+	.rgb_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 16),
33219+	.hdmi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 20),
33220+	.edp_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 24),
33221+	.mipi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 28),
33222+	.rgb_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 19),
33223+	.hdmi_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 23),
33224+	.edp_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 27),
33225+	.mipi_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 31),
33226+
33227+	.dither_down_sel = VOP_REG(RK3328_DSP_CTRL1, 0x1, 4),
33228+	.dither_down_mode = VOP_REG(RK3328_DSP_CTRL1, 0x1, 3),
33229+	.dither_down_en = VOP_REG(RK3328_DSP_CTRL1, 0x1, 2),
33230+	.pre_dither_down_en = VOP_REG(RK3328_DSP_CTRL1, 0x1, 1),
33231+	.dither_up_en = VOP_REG(RK3328_DSP_CTRL1, 0x1, 6),
33232+
33233+	.dsp_data_swap = VOP_REG(RK3328_DSP_CTRL0, 0x1f, 12),
33234+	.dsp_ccir656_avg = VOP_REG(RK3328_DSP_CTRL0, 0x1, 20),
33235+	.dsp_blank = VOP_REG(RK3328_DSP_CTRL0, 0x3, 18),
33236+	.dsp_lut_en = VOP_REG(RK3328_DSP_CTRL1, 0x1, 0),
33237+	.out_mode = VOP_REG(RK3328_DSP_CTRL0, 0xf, 0),
33238+
33239+	.xmirror = VOP_REG(RK3328_DSP_CTRL0, 0x1, 22),
33240+	.ymirror = VOP_REG(RK3328_DSP_CTRL0, 0x1, 23),
33241+
33242+	.dsp_background = VOP_REG(RK3328_DSP_BG, 0xffffffff, 0),
33243+
33244+	.alpha_hard_calc = VOP_REG(RK3328_SYS_CTRL1, 0x1, 27),
33245+	.level2_overlay_en = VOP_REG(RK3328_SYS_CTRL1, 0x1, 28),
33246+
33247+	.hdr2sdr_en = VOP_REG(RK3328_HDR2DR_CTRL, 0x1, 0),
33248+	.hdr2sdr_en_win0_csc = VOP_REG(RK3328_SDR2HDR_CTRL, 0x1, 9),
33249+	.hdr2sdr_src_min = VOP_REG(RK3328_HDR2DR_SRC_RANGE, 0x3fff, 0),
33250+	.hdr2sdr_src_max = VOP_REG(RK3328_HDR2DR_SRC_RANGE, 0x3fff, 16),
33251+	.hdr2sdr_normfaceetf = VOP_REG(RK3328_HDR2DR_NORMFACEETF, 0x7ff, 0),
33252+	.hdr2sdr_dst_min = VOP_REG(RK3328_HDR2DR_DST_RANGE, 0x3fff, 0),
33253+	.hdr2sdr_dst_max = VOP_REG(RK3328_HDR2DR_DST_RANGE, 0x3fff, 16),
33254+	.hdr2sdr_normfacgamma = VOP_REG(RK3328_HDR2DR_NORMFACGAMMA, 0xffff, 0),
33255+
33256+	.bt1886eotf_pre_conv_en = VOP_REG(RK3328_SDR2HDR_CTRL, 0x1, 0),
33257+	.rgb2rgb_pre_conv_en = VOP_REG(RK3328_SDR2HDR_CTRL, 0x1, 1),
33258+	.rgb2rgb_pre_conv_mode = VOP_REG(RK3328_SDR2HDR_CTRL, 0x1, 2),
33259+	.st2084oetf_pre_conv_en = VOP_REG(RK3328_SDR2HDR_CTRL, 0x1, 3),
33260+	.bt1886eotf_post_conv_en = VOP_REG(RK3328_SDR2HDR_CTRL, 0x1, 4),
33261+	.rgb2rgb_post_conv_en = VOP_REG(RK3328_SDR2HDR_CTRL, 0x1, 5),
33262+	.rgb2rgb_post_conv_mode = VOP_REG(RK3328_SDR2HDR_CTRL, 0x1, 6),
33263+	.st2084oetf_post_conv_en = VOP_REG(RK3328_SDR2HDR_CTRL, 0x1, 7),
33264+	.win_csc_mode_sel = VOP_REG(RK3328_SDR2HDR_CTRL, 0x1, 31),
33265+
33266+	.bcsh_brightness = VOP_REG(RK3328_BCSH_BCS, 0xff, 0),
33267+	.bcsh_contrast = VOP_REG(RK3328_BCSH_BCS, 0x1ff, 8),
33268+	.bcsh_sat_con = VOP_REG(RK3328_BCSH_BCS, 0x3ff, 20),
33269+	.bcsh_out_mode = VOP_REG(RK3328_BCSH_BCS, 0x3, 30),
33270+	.bcsh_sin_hue = VOP_REG(RK3328_BCSH_H, 0x1ff, 0),
33271+	.bcsh_cos_hue = VOP_REG(RK3328_BCSH_H, 0x1ff, 16),
33272+	.bcsh_r2y_csc_mode = VOP_REG(RK3328_BCSH_CTRL, 0x3, 6),
33273+	.bcsh_r2y_en = VOP_REG(RK3328_BCSH_CTRL, 0x1, 4),
33274+	.bcsh_y2r_csc_mode = VOP_REG(RK3328_BCSH_CTRL, 0x3, 2),
33275+	.bcsh_y2r_en = VOP_REG(RK3328_BCSH_CTRL, 0x1, 0),
33276+	.bcsh_color_bar = VOP_REG(RK3328_BCSH_COLOR_BAR, 0xffffff, 8),
33277+	.bcsh_en = VOP_REG(RK3328_BCSH_COLOR_BAR, 0x1, 0),
33278+
33279+	.cfg_done = VOP_REG(RK3328_REG_CFG_DONE, 0x1, 0),
33280 };
33281 
33282-static const struct vop_intr rk3368_vop_intr = {
33283+static const struct vop_intr rk3328_vop_intr = {
33284 	.intrs = rk3368_vop_intrs,
33285 	.nintrs = ARRAY_SIZE(rk3368_vop_intrs),
33286-	.line_flag_num[0] = VOP_REG(RK3368_LINE_FLAG, 0xffff, 0),
33287-	.line_flag_num[1] = VOP_REG(RK3368_LINE_FLAG, 0xffff, 16),
33288-	.status = VOP_REG_MASK_SYNC(RK3368_INTR_STATUS, 0x3fff, 0),
33289-	.enable = VOP_REG_MASK_SYNC(RK3368_INTR_EN, 0x3fff, 0),
33290-	.clear = VOP_REG_MASK_SYNC(RK3368_INTR_CLEAR, 0x3fff, 0),
33291+	.line_flag_num[0] = VOP_REG(RK3328_LINE_FLAG, 0xffff, 0),
33292+	.line_flag_num[1] = VOP_REG(RK3328_LINE_FLAG, 0xffff, 16),
33293+	.status = VOP_REG_MASK(RK3328_INTR_STATUS0, 0xffff, 0),
33294+	.enable = VOP_REG_MASK(RK3328_INTR_EN0, 0xffff, 0),
33295+	.clear = VOP_REG_MASK(RK3328_INTR_CLEAR0, 0xffff, 0),
33296 };
33297 
33298-static const struct vop_win_phy rk3368_win01_data = {
33299-	.scl = &rk3288_win_full_scl,
33300-	.data_formats = formats_win_full,
33301-	.nformats = ARRAY_SIZE(formats_win_full),
33302-	.format_modifiers = format_modifiers_win_full,
33303-	.enable = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 0),
33304-	.format = VOP_REG(RK3368_WIN0_CTRL0, 0x7, 1),
33305-	.rb_swap = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 12),
33306-	.x_mir_en = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 21),
33307-	.y_mir_en = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 22),
33308-	.act_info = VOP_REG(RK3368_WIN0_ACT_INFO, 0x1fff1fff, 0),
33309-	.dsp_info = VOP_REG(RK3368_WIN0_DSP_INFO, 0x0fff0fff, 0),
33310-	.dsp_st = VOP_REG(RK3368_WIN0_DSP_ST, 0x1fff1fff, 0),
33311-	.yrgb_mst = VOP_REG(RK3368_WIN0_YRGB_MST, 0xffffffff, 0),
33312-	.uv_mst = VOP_REG(RK3368_WIN0_CBR_MST, 0xffffffff, 0),
33313-	.yrgb_vir = VOP_REG(RK3368_WIN0_VIR, 0x3fff, 0),
33314-	.uv_vir = VOP_REG(RK3368_WIN0_VIR, 0x3fff, 16),
33315-	.src_alpha_ctl = VOP_REG(RK3368_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
33316-	.dst_alpha_ctl = VOP_REG(RK3368_WIN0_DST_ALPHA_CTRL, 0xff, 0),
33317-	.channel = VOP_REG(RK3368_WIN0_CTRL2, 0xff, 0),
33318+static const struct vop_csc rk3328_win0_csc = {
33319+	.r2y_en = VOP_REG(RK3328_SDR2HDR_CTRL, 0x1, 8),
33320+	.r2r_en = VOP_REG(RK3328_SDR2HDR_CTRL, 0x1, 5),
33321+	.y2r_en = VOP_REG(RK3328_SDR2HDR_CTRL, 0x1, 9),
33322 };
33323 
33324-static const struct vop_win_phy rk3368_win23_data = {
33325-	.data_formats = formats_win_lite,
33326-	.nformats = ARRAY_SIZE(formats_win_lite),
33327-	.format_modifiers = format_modifiers_win_lite,
33328-	.gate = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 0),
33329-	.enable = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 4),
33330-	.format = VOP_REG(RK3368_WIN2_CTRL0, 0x3, 5),
33331-	.rb_swap = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 20),
33332-	.y_mir_en = VOP_REG(RK3368_WIN2_CTRL1, 0x1, 15),
33333-	.dsp_info = VOP_REG(RK3368_WIN2_DSP_INFO0, 0x0fff0fff, 0),
33334-	.dsp_st = VOP_REG(RK3368_WIN2_DSP_ST0, 0x1fff1fff, 0),
33335-	.yrgb_mst = VOP_REG(RK3368_WIN2_MST0, 0xffffffff, 0),
33336-	.yrgb_vir = VOP_REG(RK3368_WIN2_VIR0_1, 0x1fff, 0),
33337-	.src_alpha_ctl = VOP_REG(RK3368_WIN2_SRC_ALPHA_CTRL, 0xff, 0),
33338-	.dst_alpha_ctl = VOP_REG(RK3368_WIN2_DST_ALPHA_CTRL, 0xff, 0),
33339+static const struct vop_csc rk3328_win1_csc = {
33340+	.r2y_en = VOP_REG(RK3328_SDR2HDR_CTRL, 0x1, 10),
33341+	.r2r_en = VOP_REG(RK3328_SDR2HDR_CTRL, 0x1, 1),
33342+	.y2r_en = VOP_REG(RK3328_SDR2HDR_CTRL, 0x1, 11),
33343 };
33344 
33345-static const struct vop_win_data rk3368_vop_win_data[] = {
33346-	{ .base = 0x00, .phy = &rk3368_win01_data,
33347-	  .type = DRM_PLANE_TYPE_PRIMARY },
33348-	{ .base = 0x40, .phy = &rk3368_win01_data,
33349-	  .type = DRM_PLANE_TYPE_OVERLAY },
33350-	{ .base = 0x00, .phy = &rk3368_win23_data,
33351-	  .type = DRM_PLANE_TYPE_OVERLAY },
33352-	{ .base = 0x50, .phy = &rk3368_win23_data,
33353-	  .type = DRM_PLANE_TYPE_CURSOR },
33354+static const struct vop_csc rk3328_win2_csc = {
33355+	.r2y_en = VOP_REG(RK3328_SDR2HDR_CTRL, 0x1, 12),
33356+	.r2r_en = VOP_REG(RK3328_SDR2HDR_CTRL, 0x1, 1),
33357+	.y2r_en = VOP_REG(RK3328_SDR2HDR_CTRL, 0x1, 13),
33358 };
33359 
33360-static const struct vop_output rk3368_output = {
33361-	.rgb_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 19),
33362-	.hdmi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 23),
33363-	.edp_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 27),
33364-	.mipi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 31),
33365-	.rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 16),
33366-	.hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 20),
33367-	.edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 24),
33368-	.mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 28),
33369-	.rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12),
33370-	.hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
33371-	.edp_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 14),
33372-	.mipi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 15),
33373+static const struct vop_win_data rk3328_vop_win_data[] = {
33374+	{ .base = 0xd0, .phy = &rk3288_win01_data,  .csc = &rk3328_win0_csc,
33375+	  .type = DRM_PLANE_TYPE_PRIMARY,
33376+	  .feature = WIN_FEATURE_HDR2SDR | WIN_FEATURE_SDR2HDR },
33377+	{ .base = 0x1d0, .phy = &rk3288_win01_data, .csc = &rk3328_win1_csc,
33378+	  .type = DRM_PLANE_TYPE_OVERLAY,
33379+	  .feature = WIN_FEATURE_SDR2HDR | WIN_FEATURE_PRE_OVERLAY },
33380+	{ .base = 0x2d0, .phy = &rk3288_win01_data, .csc = &rk3328_win2_csc,
33381+	  .type = DRM_PLANE_TYPE_CURSOR,
33382+	  .feature = WIN_FEATURE_SDR2HDR | WIN_FEATURE_PRE_OVERLAY },
33383 };
33384 
33385-static const struct vop_misc rk3368_misc = {
33386-	.global_regdone_en = VOP_REG(RK3368_SYS_CTRL, 0x1, 11),
33387+static const struct vop_data rk3328_vop = {
33388+	.soc_id = 0x3328,
33389+	.vop_id = 0,
33390+	.version = VOP_VERSION(3, 8),
33391+	.feature = VOP_FEATURE_OUTPUT_10BIT | VOP_FEATURE_HDR10 |
33392+			VOP_FEATURE_ALPHA_SCALE | VOP_FEATURE_OVERSCAN,
33393+	.hdr_table = &rk3328_hdr_table,
33394+	.max_input = {4096, 8192},
33395+	.max_output = {4096, 2160},
33396+	.intr = &rk3328_vop_intr,
33397+	.ctrl = &rk3328_ctrl_data,
33398+	.win = rk3328_vop_win_data,
33399+	.win_size = ARRAY_SIZE(rk3328_vop_win_data),
33400 };
33401 
33402-static const struct vop_data rk3368_vop = {
33403-	.version = VOP_VERSION(3, 2),
33404-	.intr = &rk3368_vop_intr,
33405-	.common = &rk3288_common,
33406-	.modeset = &rk3288_modeset,
33407-	.output = &rk3368_output,
33408-	.misc = &rk3368_misc,
33409-	.win = rk3368_vop_win_data,
33410-	.win_size = ARRAY_SIZE(rk3368_vop_win_data),
33411+static const struct vop_scl_regs rk3036_win0_scl = {
33412+	.scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
33413+	.scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
33414+	.scale_cbcr_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
33415+	.scale_cbcr_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
33416 };
33417 
33418-static const struct vop_intr rk3366_vop_intr = {
33419-	.intrs = rk3368_vop_intrs,
33420-	.nintrs = ARRAY_SIZE(rk3368_vop_intrs),
33421-	.line_flag_num[0] = VOP_REG(RK3366_LINE_FLAG, 0xffff, 0),
33422-	.line_flag_num[1] = VOP_REG(RK3366_LINE_FLAG, 0xffff, 16),
33423-	.status = VOP_REG_MASK_SYNC(RK3366_INTR_STATUS0, 0xffff, 0),
33424-	.enable = VOP_REG_MASK_SYNC(RK3366_INTR_EN0, 0xffff, 0),
33425-	.clear = VOP_REG_MASK_SYNC(RK3366_INTR_CLEAR0, 0xffff, 0),
33426+static const struct vop_scl_regs rk3036_win1_scl = {
33427+	.scale_yrgb_x = VOP_REG(RK3036_WIN1_SCL_FACTOR_YRGB, 0xffff, 0x0),
33428+	.scale_yrgb_y = VOP_REG(RK3036_WIN1_SCL_FACTOR_YRGB, 0xffff, 16),
33429 };
33430 
33431-static const struct vop_data rk3366_vop = {
33432-	.version = VOP_VERSION(3, 4),
33433-	.intr = &rk3366_vop_intr,
33434-	.common = &rk3288_common,
33435-	.modeset = &rk3288_modeset,
33436-	.output = &rk3368_output,
33437-	.misc = &rk3368_misc,
33438-	.win = rk3368_vop_win_data,
33439-	.win_size = ARRAY_SIZE(rk3368_vop_win_data),
33440+static const struct vop_win_phy rk3036_win0_data = {
33441+	.scl = &rk3036_win0_scl,
33442+	.data_formats = formats_win_full,
33443+	.nformats = ARRAY_SIZE(formats_win_full),
33444+	.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0),
33445+	.format = VOP_REG(RK3036_SYS_CTRL, 0x7, 3),
33446+	.rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 15),
33447+	.act_info = VOP_REG(RK3036_WIN0_ACT_INFO, 0x1fff1fff, 0),
33448+	.dsp_info = VOP_REG(RK3036_WIN0_DSP_INFO, 0x0fff0fff, 0),
33449+	.dsp_st = VOP_REG(RK3036_WIN0_DSP_ST, 0x1fff1fff, 0),
33450+	.yrgb_mst = VOP_REG(RK3036_WIN0_YRGB_MST, 0xffffffff, 0),
33451+	.uv_mst = VOP_REG(RK3036_WIN0_CBR_MST, 0xffffffff, 0),
33452+	.yrgb_vir = VOP_REG(RK3036_WIN0_VIR, 0xffff, 0),
33453+	.uv_vir = VOP_REG(RK3036_WIN0_VIR, 0x1fff, 16),
33454+	.alpha_mode = VOP_REG(RK3036_DSP_CTRL0, 0x1, 18),
33455+	.alpha_en = VOP_REG(RK3036_ALPHA_CTRL, 0x1, 0),
33456+	.alpha_pre_mul = VOP_REG(RK3036_DSP_CTRL0, 0x1, 29),
33457 };
33458 
33459-static const struct vop_output rk3399_output = {
33460-	.dp_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 19),
33461-	.rgb_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 19),
33462-	.hdmi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 23),
33463-	.edp_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 27),
33464-	.mipi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 31),
33465-	.dp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 16),
33466-	.rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 16),
33467-	.hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 20),
33468-	.edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 24),
33469-	.mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 28),
33470-	.dp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 11),
33471-	.rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12),
33472-	.hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
33473-	.edp_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 14),
33474-	.mipi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 15),
33475-	.mipi_dual_channel_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 3),
33476+static const struct vop_win_phy rk3036_win1_data = {
33477+	.scl = &rk3036_win1_scl,
33478+	.data_formats = formats_win_lite,
33479+	.nformats = ARRAY_SIZE(formats_win_lite),
33480+	.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1),
33481+	.format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6),
33482+	.rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19),
33483+	.act_info = VOP_REG(RK3036_WIN1_ACT_INFO, 0x1fff1fff, 0),
33484+	.dsp_info = VOP_REG(RK3036_WIN1_DSP_INFO, 0x0fff0fff, 0),
33485+	.dsp_st = VOP_REG(RK3036_WIN1_DSP_ST, 0x1fff1fff, 0),
33486+	.yrgb_mst = VOP_REG(RK3036_WIN1_MST, 0xffffffff, 0),
33487+	.yrgb_vir = VOP_REG(RK3036_WIN1_VIR, 0xffff, 0),
33488+	.alpha_mode = VOP_REG(RK3036_DSP_CTRL0, 0x1, 19),
33489+	.alpha_en = VOP_REG(RK3036_ALPHA_CTRL, 0x1, 1)
33490 };
33491 
33492-static const struct vop_yuv2yuv_phy rk3399_yuv2yuv_win01_data = {
33493-	.y2r_coefficients = {
33494-		VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 0, 0xffff, 0),
33495-		VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 0, 0xffff, 16),
33496-		VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 4, 0xffff, 0),
33497-		VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 4, 0xffff, 16),
33498-		VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 8, 0xffff, 0),
33499-		VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 8, 0xffff, 16),
33500-		VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 12, 0xffff, 0),
33501-		VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 12, 0xffff, 16),
33502-		VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 16, 0xffff, 0),
33503-		VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 20, 0xffffffff, 0),
33504-		VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 24, 0xffffffff, 0),
33505-		VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 28, 0xffffffff, 0),
33506-	},
33507+static const struct vop_win_data rk3036_vop_win_data[] = {
33508+	{ .base = 0x00, .phy = &rk3036_win0_data,
33509+	  .type = DRM_PLANE_TYPE_PRIMARY },
33510+	{ .base = 0x00, .phy = &rk3036_win1_data,
33511+	  .type = DRM_PLANE_TYPE_OVERLAY },
33512 };
33513 
33514-static const struct vop_yuv2yuv_phy rk3399_yuv2yuv_win23_data = { };
33515+static const int rk3036_vop_intrs[] = {
33516+	DSP_HOLD_VALID_INTR,
33517+	FS_INTR,
33518+	LINE_FLAG_INTR,
33519+	BUS_ERROR_INTR,
33520+};
33521 
33522-static const struct vop_win_yuv2yuv_data rk3399_vop_big_win_yuv2yuv_data[] = {
33523-	{ .base = 0x00, .phy = &rk3399_yuv2yuv_win01_data,
33524-	  .y2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 1) },
33525-	{ .base = 0x60, .phy = &rk3399_yuv2yuv_win01_data,
33526-	  .y2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 9) },
33527-	{ .base = 0xC0, .phy = &rk3399_yuv2yuv_win23_data },
33528-	{ .base = 0x120, .phy = &rk3399_yuv2yuv_win23_data },
33529+static const struct vop_intr rk3036_intr = {
33530+	.intrs = rk3036_vop_intrs,
33531+	.nintrs = ARRAY_SIZE(rk3036_vop_intrs),
33532+	.line_flag_num[0] = VOP_REG(RK3036_INT_STATUS, 0xfff, 12),
33533+	.status = VOP_REG(RK3036_INT_STATUS, 0xf, 0),
33534+	.enable = VOP_REG(RK3036_INT_STATUS, 0xf, 4),
33535+	.clear = VOP_REG(RK3036_INT_STATUS, 0xf, 8),
33536+};
33537+
33538+static const struct vop_ctrl rk3036_ctrl_data = {
33539+	.standby = VOP_REG(RK3036_SYS_CTRL, 0x1, 30),
33540+	.out_mode = VOP_REG(RK3036_DSP_CTRL0, 0xf, 0),
33541+	.dsp_blank = VOP_REG(RK3036_DSP_CTRL1, 0x1, 24),
33542+	.dclk_pol = VOP_REG(RK3036_DSP_CTRL0, 0x1, 7),
33543+	.pin_pol = VOP_REG(RK3036_DSP_CTRL0, 0x7, 4),
33544+	.dither_down_sel = VOP_REG(RK3036_DSP_CTRL0, 0x1, 27),
33545+	.dither_down_en = VOP_REG(RK3036_DSP_CTRL0, 0x1, 11),
33546+	.dither_down_mode = VOP_REG(RK3036_DSP_CTRL0, 0x1, 10),
33547+	.dither_up_en = VOP_REG(RK3036_DSP_CTRL0, 0x1, 9),
33548+	.dsp_layer_sel = VOP_REG(RK3036_DSP_CTRL0, 0x1, 8),
33549+	.htotal_pw = VOP_REG(RK3036_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
33550+	.hact_st_end = VOP_REG(RK3036_DSP_HACT_ST_END, 0x1fff1fff, 0),
33551+	.hdmi_en = VOP_REG(RK3036_AXI_BUS_CTRL, 0x1, 22),
33552+	.hdmi_dclk_pol = VOP_REG(RK3036_AXI_BUS_CTRL, 0x1, 23),
33553+	.hdmi_pin_pol = VOP_REG(RK3036_INT_SCALER, 0x7, 4),
33554+	.rgb_en = VOP_REG(RK3036_AXI_BUS_CTRL, 0x1, 24),
33555+	.rgb_dclk_pol = VOP_REG(RK3036_AXI_BUS_CTRL, 0x1, 25),
33556+	.lvds_en = VOP_REG(RK3036_AXI_BUS_CTRL, 0x1, 26),
33557+	.lvds_dclk_pol = VOP_REG(RK3036_AXI_BUS_CTRL, 0x1, 27),
33558+	.mipi_en = VOP_REG(RK3036_AXI_BUS_CTRL, 0x1, 28),
33559+	.mipi_dclk_pol = VOP_REG(RK3036_AXI_BUS_CTRL, 0x1, 29),
33560+	.vtotal_pw = VOP_REG(RK3036_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
33561+	.vact_st_end = VOP_REG(RK3036_DSP_VACT_ST_END, 0x1fff1fff, 0),
33562+	.cfg_done = VOP_REG(RK3036_REG_CFG_DONE, 0x1, 0),
33563+};
33564 
33565+static const struct vop_data rk3036_vop = {
33566+	.soc_id = 0x3036,
33567+	.vop_id = 0,
33568+	.version = VOP_VERSION(2, 2),
33569+	.max_input = {1920, 1080},
33570+	.max_output = {1920, 1080},
33571+	.ctrl = &rk3036_ctrl_data,
33572+	.intr = &rk3036_intr,
33573+	.win = rk3036_vop_win_data,
33574+	.win_size = ARRAY_SIZE(rk3036_vop_win_data),
33575 };
33576 
33577-static const struct vop_win_phy rk3399_win01_data = {
33578-	.scl = &rk3288_win_full_scl,
33579+static const struct vop_scl_regs rk3066_win_scl = {
33580+	.scale_yrgb_x = VOP_REG(RK3066_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
33581+	.scale_yrgb_y = VOP_REG(RK3066_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
33582+	.scale_cbcr_x = VOP_REG(RK3066_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
33583+	.scale_cbcr_y = VOP_REG(RK3066_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
33584+};
33585+
33586+static const struct vop_win_phy rk3066_win0_data = {
33587+	.scl = &rk3066_win_scl,
33588 	.data_formats = formats_win_full,
33589 	.nformats = ARRAY_SIZE(formats_win_full),
33590-	.format_modifiers = format_modifiers_win_full_afbc,
33591-	.enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
33592-	.format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
33593-	.rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
33594-	.x_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 21),
33595-	.y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22),
33596-	.act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0),
33597-	.dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0),
33598-	.dsp_st = VOP_REG(RK3288_WIN0_DSP_ST, 0x1fff1fff, 0),
33599-	.yrgb_mst = VOP_REG(RK3288_WIN0_YRGB_MST, 0xffffffff, 0),
33600-	.uv_mst = VOP_REG(RK3288_WIN0_CBR_MST, 0xffffffff, 0),
33601-	.yrgb_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 0),
33602-	.uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16),
33603-	.src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
33604-	.dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0),
33605-	.channel = VOP_REG(RK3288_WIN0_CTRL2, 0xff, 0),
33606+	.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 0),
33607+	.format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 4),
33608+	.rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 19),
33609+	.act_info = VOP_REG(RK3066_WIN0_ACT_INFO, 0x1fff1fff, 0),
33610+	.dsp_info = VOP_REG(RK3066_WIN0_DSP_INFO, 0x0fff0fff, 0),
33611+	.dsp_st = VOP_REG(RK3066_WIN0_DSP_ST, 0x1fff1fff, 0),
33612+	.yrgb_mst = VOP_REG(RK3066_WIN0_YRGB_MST0, 0xffffffff, 0),
33613+	.uv_mst = VOP_REG(RK3066_WIN0_CBR_MST0, 0xffffffff, 0),
33614+	.yrgb_vir = VOP_REG(RK3066_WIN0_VIR, 0xffff, 0),
33615+	.uv_vir = VOP_REG(RK3066_WIN0_VIR, 0x1fff, 16),
33616+	.alpha_mode = VOP_REG(RK3066_DSP_CTRL0, 0x1, 21),
33617+	.alpha_en = VOP_REG(RK3066_BLEND_CTRL, 0x1, 0)
33618 };
33619 
33620-/*
33621- * rk3399 vop big windows register layout is same as rk3288, but we
33622- * have a separate rk3399 win data array here so that we can advertise
33623- * AFBC on the primary plane.
33624- */
33625-static const struct vop_win_data rk3399_vop_win_data[] = {
33626-	{ .base = 0x00, .phy = &rk3399_win01_data,
33627+static const struct vop_win_phy rk3066_win1_data = {
33628+	.scl = &rk3066_win_scl,
33629+	.data_formats = formats_win_full,
33630+	.nformats = ARRAY_SIZE(formats_win_full),
33631+	.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 1),
33632+	.format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 7),
33633+	.rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 23),
33634+	.act_info = VOP_REG(RK3066_WIN1_ACT_INFO, 0x1fff1fff, 0),
33635+	.dsp_info = VOP_REG(RK3066_WIN1_DSP_INFO, 0x0fff0fff, 0),
33636+	.dsp_st = VOP_REG(RK3066_WIN1_DSP_ST, 0x1fff1fff, 0),
33637+	.yrgb_mst = VOP_REG(RK3066_WIN1_YRGB_MST, 0xffffffff, 0),
33638+	.uv_mst = VOP_REG(RK3066_WIN1_CBR_MST, 0xffffffff, 0),
33639+	.yrgb_vir = VOP_REG(RK3066_WIN1_VIR, 0xffff, 0),
33640+	.uv_vir = VOP_REG(RK3066_WIN1_VIR, 0x1fff, 16),
33641+	.alpha_mode = VOP_REG(RK3066_DSP_CTRL0, 0x1, 22),
33642+	.alpha_en = VOP_REG(RK3066_BLEND_CTRL, 0x1, 1)
33643+};
33644+
33645+static const struct vop_win_phy rk3066_win2_data = {
33646+	.data_formats = formats_win_lite,
33647+	.nformats = ARRAY_SIZE(formats_win_lite),
33648+	.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 2),
33649+	.format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 10),
33650+	.rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 27),
33651+	.dsp_info = VOP_REG(RK3066_WIN2_DSP_INFO, 0x0fff0fff, 0),
33652+	.dsp_st = VOP_REG(RK3066_WIN2_DSP_ST, 0x1fff1fff, 0),
33653+	.yrgb_mst = VOP_REG(RK3066_WIN2_MST, 0xffffffff, 0),
33654+	.yrgb_vir = VOP_REG(RK3066_WIN2_VIR, 0xffff, 0),
33655+	.alpha_mode = VOP_REG(RK3066_DSP_CTRL0, 0x1, 23),
33656+	.alpha_en = VOP_REG(RK3066_BLEND_CTRL, 0x1, 2)
33657+};
33658+
33659+static const struct vop_win_data rk3066_vop_win_data[] = {
33660+	{ .base = 0x00, .phy = &rk3066_win0_data,
33661 	  .type = DRM_PLANE_TYPE_PRIMARY },
33662-	{ .base = 0x40, .phy = &rk3368_win01_data,
33663-	  .type = DRM_PLANE_TYPE_OVERLAY },
33664-	{ .base = 0x00, .phy = &rk3368_win23_data,
33665+	{ .base = 0x00, .phy = &rk3066_win1_data,
33666 	  .type = DRM_PLANE_TYPE_OVERLAY },
33667-	{ .base = 0x50, .phy = &rk3368_win23_data,
33668+	{ .base = 0x00, .phy = &rk3066_win2_data,
33669 	  .type = DRM_PLANE_TYPE_CURSOR },
33670 };
33671 
33672-static const struct vop_afbc rk3399_vop_afbc = {
33673-	.rstn = VOP_REG(RK3399_AFBCD0_CTRL, 0x1, 3),
33674-	.enable = VOP_REG(RK3399_AFBCD0_CTRL, 0x1, 0),
33675-	.win_sel = VOP_REG(RK3399_AFBCD0_CTRL, 0x3, 1),
33676-	.format = VOP_REG(RK3399_AFBCD0_CTRL, 0x1f, 16),
33677-	.hreg_block_split = VOP_REG(RK3399_AFBCD0_CTRL, 0x1, 21),
33678-	.hdr_ptr = VOP_REG(RK3399_AFBCD0_HDR_PTR, 0xffffffff, 0),
33679-	.pic_size = VOP_REG(RK3399_AFBCD0_PIC_SIZE, 0xffffffff, 0),
33680+static const int rk3066_vop_intrs[] = {
33681+	0,
33682+	FS_INTR,
33683+	LINE_FLAG_INTR,
33684+	BUS_ERROR_INTR,
33685 };
33686 
33687-static const struct vop_data rk3399_vop_big = {
33688-	.version = VOP_VERSION(3, 5),
33689-	.feature = VOP_FEATURE_OUTPUT_RGB10,
33690-	.intr = &rk3366_vop_intr,
33691-	.common = &rk3288_common,
33692-	.modeset = &rk3288_modeset,
33693-	.output = &rk3399_output,
33694-	.afbc = &rk3399_vop_afbc,
33695-	.misc = &rk3368_misc,
33696-	.win = rk3399_vop_win_data,
33697-	.win_size = ARRAY_SIZE(rk3399_vop_win_data),
33698-	.win_yuv2yuv = rk3399_vop_big_win_yuv2yuv_data,
33699+static const struct vop_intr rk3066_intr = {
33700+	.intrs = rk3066_vop_intrs,
33701+	.nintrs = ARRAY_SIZE(rk3066_vop_intrs),
33702+	.line_flag_num[0] = VOP_REG(RK3066_INT_STATUS, 0xfff, 12),
33703+	.status = VOP_REG(RK3066_INT_STATUS, 0xf, 0),
33704+	.enable = VOP_REG(RK3066_INT_STATUS, 0xf, 4),
33705+	.clear = VOP_REG(RK3066_INT_STATUS, 0xf, 8),
33706 };
33707 
33708-static const struct vop_win_data rk3399_vop_lit_win_data[] = {
33709-	{ .base = 0x00, .phy = &rk3368_win01_data,
33710-	  .type = DRM_PLANE_TYPE_PRIMARY },
33711-	{ .base = 0x00, .phy = &rk3368_win23_data,
33712-	  .type = DRM_PLANE_TYPE_CURSOR},
33713+static const struct vop_ctrl rk3066_ctrl_data = {
33714+	.standby = VOP_REG(RK3066_SYS_CTRL0, 0x1, 1),
33715+	.out_mode = VOP_REG(RK3066_DSP_CTRL0, 0xf, 0),
33716+	.dsp_blank = VOP_REG(RK3066_DSP_CTRL1, 0x1, 24),
33717+	.dclk_pol = VOP_REG(RK3066_DSP_CTRL0, 0x1, 7),
33718+	.pin_pol = VOP_REG(RK3066_DSP_CTRL0, 0x7, 4),
33719+	.dsp_layer_sel = VOP_REG(RK3066_DSP_CTRL0, 0x1, 8),
33720+	.htotal_pw = VOP_REG(RK3066_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
33721+	.hact_st_end = VOP_REG(RK3066_DSP_HACT_ST_END, 0x1fff1fff, 0),
33722+	.vtotal_pw = VOP_REG(RK3066_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
33723+	.vact_st_end = VOP_REG(RK3066_DSP_VACT_ST_END, 0x1fff1fff, 0),
33724+	.cfg_done = VOP_REG(RK3066_REG_CFG_DONE, 0x1, 0),
33725 };
33726 
33727-static const struct vop_win_yuv2yuv_data rk3399_vop_lit_win_yuv2yuv_data[] = {
33728-	{ .base = 0x00, .phy = &rk3399_yuv2yuv_win01_data,
33729-	  .y2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 1)},
33730-	{ .base = 0x60, .phy = &rk3399_yuv2yuv_win23_data },
33731+static const struct vop_data rk3066_vop = {
33732+	.soc_id = 0x3066,
33733+	.vop_id = 0,
33734+	.version = VOP_VERSION(2, 1),
33735+	.max_input = {1920, 4096},
33736+	.max_output = {1920, 1080},
33737+	.ctrl = &rk3066_ctrl_data,
33738+	.intr = &rk3066_intr,
33739+	.win = rk3066_vop_win_data,
33740+	.win_size = ARRAY_SIZE(rk3066_vop_win_data),
33741 };
33742 
33743-static const struct vop_data rk3399_vop_lit = {
33744-	.version = VOP_VERSION(3, 6),
33745-	.intr = &rk3366_vop_intr,
33746-	.common = &rk3288_common,
33747-	.modeset = &rk3288_modeset,
33748-	.output = &rk3399_output,
33749-	.misc = &rk3368_misc,
33750-	.win = rk3399_vop_lit_win_data,
33751-	.win_size = ARRAY_SIZE(rk3399_vop_lit_win_data),
33752-	.win_yuv2yuv = rk3399_vop_lit_win_yuv2yuv_data,
33753+static const int rk3366_vop_lit_intrs[] = {
33754+	FS_INTR,
33755+	FS_NEW_INTR,
33756+	ADDR_SAME_INTR,
33757+	LINE_FLAG_INTR,
33758+	LINE_FLAG1_INTR,
33759+	BUS_ERROR_INTR,
33760+	WIN0_EMPTY_INTR,
33761+	WIN1_EMPTY_INTR,
33762+	DSP_HOLD_VALID_INTR,
33763+	DMA_FINISH_INTR,
33764+	WIN2_EMPTY_INTR,
33765+	POST_BUF_EMPTY_INTR
33766 };
33767 
33768-static const struct vop_win_data rk3228_vop_win_data[] = {
33769-	{ .base = 0x00, .phy = &rk3288_win01_data,
33770+static const struct vop_scl_regs rk3366_lit_win_scl = {
33771+	.scale_yrgb_x = VOP_REG(RK3366_LIT_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
33772+	.scale_yrgb_y = VOP_REG(RK3366_LIT_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
33773+	.scale_cbcr_x = VOP_REG(RK3366_LIT_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
33774+	.scale_cbcr_y = VOP_REG(RK3366_LIT_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
33775+};
33776+
33777+static const struct vop_win_phy rk3366_lit_win0_data = {
33778+	.scl = &rk3366_lit_win_scl,
33779+	.data_formats = formats_win_full,
33780+	.nformats = ARRAY_SIZE(formats_win_full),
33781+
33782+	.enable = VOP_REG(RK3366_LIT_WIN0_CTRL0, 0x1, 0),
33783+	.format = VOP_REG(RK3366_LIT_WIN0_CTRL0, 0x7, 1),
33784+	.rb_swap = VOP_REG(RK3366_LIT_WIN0_CTRL0, 0x1, 12),
33785+	.act_info = VOP_REG(RK3366_LIT_WIN0_ACT_INFO, 0xffffffff, 0),
33786+	.dsp_info = VOP_REG(RK3366_LIT_WIN0_DSP_INFO, 0xffffffff, 0),
33787+	.dsp_st = VOP_REG(RK3366_LIT_WIN0_DSP_ST, 0xffffffff, 0),
33788+	.yrgb_mst = VOP_REG(RK3366_LIT_WIN0_YRGB_MST0, 0xffffffff, 0),
33789+	.uv_mst = VOP_REG(RK3366_LIT_WIN0_CBR_MST0, 0xffffffff, 0),
33790+	.yrgb_vir = VOP_REG(RK3366_LIT_WIN0_VIR, 0x1fff, 0),
33791+	.uv_vir = VOP_REG(RK3366_LIT_WIN0_VIR, 0x1fff, 16),
33792+
33793+	.alpha_pre_mul = VOP_REG(RK3366_LIT_WIN0_ALPHA_CTRL, 0x1, 2),
33794+	.alpha_mode = VOP_REG(RK3366_LIT_WIN0_ALPHA_CTRL, 0x1, 1),
33795+	.alpha_en = VOP_REG(RK3366_LIT_WIN0_ALPHA_CTRL, 0x1, 0),
33796+	.global_alpha_val = VOP_REG(RK3366_LIT_WIN0_ALPHA_CTRL, 0xff, 4),
33797+	.key_color = VOP_REG(RK3366_LIT_WIN0_COLOR_KEY, 0xffffff, 0),
33798+	.key_en = VOP_REG(RK3366_LIT_WIN0_COLOR_KEY, 0x1, 24),
33799+};
33800+
33801+static const struct vop_win_phy rk3366_lit_win1_data = {
33802+	.data_formats = formats_win_lite,
33803+	.nformats = ARRAY_SIZE(formats_win_lite),
33804+
33805+	.enable = VOP_REG(RK3366_LIT_WIN1_CTRL0, 0x1, 0),
33806+	.format = VOP_REG(RK3366_LIT_WIN1_CTRL0, 0x7, 4),
33807+	.rb_swap = VOP_REG(RK3366_LIT_WIN1_CTRL0, 0x1, 12),
33808+	.dsp_info = VOP_REG(RK3366_LIT_WIN1_DSP_INFO, 0xffffffff, 0),
33809+	.dsp_st = VOP_REG(RK3366_LIT_WIN1_DSP_ST, 0xffffffff, 0),
33810+	.yrgb_mst = VOP_REG(RK3366_LIT_WIN1_MST, 0xffffffff, 0),
33811+	.yrgb_vir = VOP_REG(RK3366_LIT_WIN1_VIR, 0x1fff, 0),
33812+
33813+	.alpha_pre_mul = VOP_REG(RK3366_LIT_WIN1_ALPHA_CTRL, 0x1, 2),
33814+	.alpha_mode = VOP_REG(RK3366_LIT_WIN1_ALPHA_CTRL, 0x1, 1),
33815+	.alpha_en = VOP_REG(RK3366_LIT_WIN1_ALPHA_CTRL, 0x1, 0),
33816+	.global_alpha_val = VOP_REG(RK3366_LIT_WIN1_ALPHA_CTRL, 0xff, 4),
33817+	.key_color = VOP_REG(RK3366_LIT_WIN1_COLOR_KEY, 0xffffff, 0),
33818+	.key_en = VOP_REG(RK3366_LIT_WIN1_COLOR_KEY, 0x1, 24),
33819+};
33820+
33821+static const struct vop_win_data rk3366_vop_lit_win_data[] = {
33822+	{ .base = 0x00, .phy = &rk3366_lit_win0_data,
33823 	  .type = DRM_PLANE_TYPE_PRIMARY },
33824-	{ .base = 0x40, .phy = &rk3288_win01_data,
33825+	{ .base = 0x00, .phy = &rk3366_lit_win1_data,
33826 	  .type = DRM_PLANE_TYPE_CURSOR },
33827 };
33828 
33829-static const struct vop_data rk3228_vop = {
33830-	.version = VOP_VERSION(3, 7),
33831-	.feature = VOP_FEATURE_OUTPUT_RGB10,
33832-	.intr = &rk3366_vop_intr,
33833-	.common = &rk3288_common,
33834-	.modeset = &rk3288_modeset,
33835-	.output = &rk3399_output,
33836-	.misc = &rk3368_misc,
33837-	.win = rk3228_vop_win_data,
33838-	.win_size = ARRAY_SIZE(rk3228_vop_win_data),
33839+static const struct vop_intr rk3366_lit_intr = {
33840+	.intrs = rk3366_vop_lit_intrs,
33841+	.nintrs = ARRAY_SIZE(rk3366_vop_lit_intrs),
33842+	.line_flag_num[0] = VOP_REG(RK3366_LIT_LINE_FLAG, 0xfff, 0),
33843+	.line_flag_num[1] = VOP_REG(RK3366_LIT_LINE_FLAG, 0xfff, 16),
33844+	.status = VOP_REG_MASK(RK3366_LIT_INTR_STATUS, 0xffff, 0),
33845+	.enable = VOP_REG_MASK(RK3366_LIT_INTR_EN, 0xffff, 0),
33846+	.clear = VOP_REG_MASK(RK3366_LIT_INTR_CLEAR, 0xffff, 0),
33847 };
33848 
33849-static const struct vop_modeset rk3328_modeset = {
33850-	.htotal_pw = VOP_REG(RK3328_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
33851-	.hact_st_end = VOP_REG(RK3328_DSP_HACT_ST_END, 0x1fff1fff, 0),
33852-	.vtotal_pw = VOP_REG(RK3328_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
33853-	.vact_st_end = VOP_REG(RK3328_DSP_VACT_ST_END, 0x1fff1fff, 0),
33854-	.hpost_st_end = VOP_REG(RK3328_POST_DSP_HACT_INFO, 0x1fff1fff, 0),
33855-	.vpost_st_end = VOP_REG(RK3328_POST_DSP_VACT_INFO, 0x1fff1fff, 0),
33856+static const struct vop_win_phy rk3126_win1_data = {
33857+	.data_formats = formats_win_lite,
33858+	.nformats = ARRAY_SIZE(formats_win_lite),
33859+	.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1),
33860+	.format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6),
33861+	.rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19),
33862+	.dsp_info = VOP_REG(RK3126_WIN1_DSP_INFO, 0x0fff0fff, 0),
33863+	.dsp_st = VOP_REG(RK3126_WIN1_DSP_ST, 0x1fff1fff, 0),
33864+	.yrgb_mst = VOP_REG(RK3126_WIN1_MST, 0xffffffff, 0),
33865+	.yrgb_vir = VOP_REG(RK3036_WIN1_VIR, 0xffff, 0),
33866+	.alpha_mode = VOP_REG(RK3036_DSP_CTRL0, 0x1, 19),
33867+	.alpha_en = VOP_REG(RK3036_ALPHA_CTRL, 0x1, 1),
33868+	.alpha_pre_mul = VOP_REG(RK3036_DSP_CTRL0, 0x1, 29),
33869 };
33870 
33871-static const struct vop_output rk3328_output = {
33872-	.rgb_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 19),
33873-	.hdmi_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 23),
33874-	.edp_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 27),
33875-	.mipi_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 31),
33876-	.rgb_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 12),
33877-	.hdmi_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 13),
33878-	.edp_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 14),
33879-	.mipi_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 15),
33880-	.rgb_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 16),
33881-	.hdmi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 20),
33882-	.edp_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 24),
33883-	.mipi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 28),
33884+static const struct vop_win_data rk3126_vop_win_data[] = {
33885+	{ .base = 0x00, .phy = &rk3036_win0_data,
33886+	  .type = DRM_PLANE_TYPE_OVERLAY },
33887+	{ .base = 0x00, .phy = &rk3126_win1_data,
33888+	  .type = DRM_PLANE_TYPE_PRIMARY },
33889 };
33890 
33891-static const struct vop_misc rk3328_misc = {
33892-	.global_regdone_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 11),
33893+static const struct vop_data rk3126_vop = {
33894+	.soc_id = 0x3126,
33895+	.vop_id = 0,
33896+	.version = VOP_VERSION(2, 4),
33897+	.max_input = {1920, 8192},
33898+	.max_output = {1920, 1080},
33899+	.ctrl = &rk3036_ctrl_data,
33900+	.intr = &rk3036_intr,
33901+	.win = rk3126_vop_win_data,
33902+	.win_size = ARRAY_SIZE(rk3126_vop_win_data),
33903 };
33904 
33905-static const struct vop_common rk3328_common = {
33906-	.standby = VOP_REG_SYNC(RK3328_SYS_CTRL, 0x1, 22),
33907-	.dither_down_sel = VOP_REG(RK3328_DSP_CTRL1, 0x1, 4),
33908-	.dither_down_mode = VOP_REG(RK3328_DSP_CTRL1, 0x1, 3),
33909-	.dither_down_en = VOP_REG(RK3328_DSP_CTRL1, 0x1, 2),
33910-	.pre_dither_down = VOP_REG(RK3328_DSP_CTRL1, 0x1, 1),
33911-	.dither_up = VOP_REG(RK3328_DSP_CTRL1, 0x1, 6),
33912-	.dsp_blank = VOP_REG(RK3328_DSP_CTRL0, 0x3, 18),
33913-	.out_mode = VOP_REG(RK3328_DSP_CTRL0, 0xf, 0),
33914-	.cfg_done = VOP_REG_SYNC(RK3328_REG_CFG_DONE, 0x1, 0),
33915+/* PX30 VOPB win2 is same with RK3368,
33916+ * but RK3368 win2 register offset is 0xb0 and px30 is 0x190,
33917+ * so we set the PX30 VOPB win2 base = 0x190 - 0xb0 = 0xe0
33918+ */
33919+
33920+static const struct vop_ctrl px30_ctrl_data = {
33921+	.standby = VOP_REG(RK3366_LIT_SYS_CTRL2, 0x1, 1),
33922+	.axi_outstanding_max_num = VOP_REG(RK3366_LIT_SYS_CTRL1, 0x1f, 16),
33923+	.axi_max_outstanding_en = VOP_REG(RK3366_LIT_SYS_CTRL1, 0x1, 12),
33924+	.htotal_pw = VOP_REG(RK3366_LIT_DSP_HTOTAL_HS_END, 0x0fff0fff, 0),
33925+	.hact_st_end = VOP_REG(RK3366_LIT_DSP_HACT_ST_END, 0x0fff0fff, 0),
33926+	.vtotal_pw = VOP_REG(RK3366_LIT_DSP_VTOTAL_VS_END, 0x0fff0fff, 0),
33927+	.vact_st_end = VOP_REG(RK3366_LIT_DSP_VACT_ST_END, 0x0fff0fff, 0),
33928+	.vact_st_end_f1 = VOP_REG(RK3366_LIT_DSP_VACT_ST_END_F1, 0x0fff0fff, 0),
33929+	.vs_st_end_f1 = VOP_REG(RK3366_LIT_DSP_VS_ST_END_F1, 0x0fff0fff, 0),
33930+	.dsp_interlace = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1, 0),
33931+	.global_regdone_en = VOP_REG(RK3366_LIT_SYS_CTRL2, 0x1, 13),
33932+	.auto_gate_en = VOP_REG(RK3366_LIT_SYS_CTRL2, 0x1, 0),
33933+	.dsp_layer_sel = VOP_REG(RK3366_LIT_DSP_CTRL2, 0xff, 22),
33934+	.overlay_mode = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1, 4),
33935+	.core_dclk_div = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x1, 13),
33936+	.dclk_ddr = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x1, 14),
33937+	.rgb_en = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x1, 0),
33938+	.rgb_pin_pol = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x7, 2),
33939+	.hdmi_en = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x1, 8),
33940+	.hdmi_pin_pol = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x7, 10),
33941+	.lvds_en = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x1, 16),
33942+	.lvds_pin_pol = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x7, 18),
33943+	.mipi_en = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x1, 24),
33944+	.mipi_pin_pol = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x7, 26),
33945+	.mipi_dclk_pol = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x1, 25),
33946+	.lvds_dclk_pol = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x1, 17),
33947+	.hdmi_dclk_pol = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x1, 9),
33948+	.rgb_dclk_pol = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x1, 1),
33949+	.dither_down_en = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1, 8),
33950+	.dither_down_sel = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1, 7),
33951+	.dither_down_mode = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1, 6),
33952+	.dither_up_en = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1, 2),
33953+	.dsp_data_swap = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1f, 9),
33954+	.dsp_ccir656_avg = VOP_REG(RK3366_LIT_SYS_CTRL2, 0x1, 5),
33955+	.dsp_black = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1, 15),
33956+	.dsp_blank = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1, 14),
33957+	.dsp_outzero = VOP_REG(RK3366_LIT_SYS_CTRL2, 0x1, 3),
33958+	.dsp_lut_en = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1, 5),
33959+	.out_mode = VOP_REG(RK3366_LIT_DSP_CTRL2, 0xf, 16),
33960+	.dsp_background = VOP_REG(RK3366_LIT_DSP_BG, 0x00ffffff, 0),
33961+	.cfg_done = VOP_REG(RK3366_LIT_REG_CFG_DONE, 0x1, 0),
33962+
33963+	.bcsh_en = VOP_REG(RK3366_LIT_BCSH_CTRL, 0x1, 0),
33964+	.bcsh_r2y_csc_mode = VOP_REG(RK3366_LIT_BCSH_CTRL, 0x1, 1),
33965+	.bcsh_out_mode = VOP_REG(RK3366_LIT_BCSH_CTRL, 0x3, 2),
33966+	.bcsh_y2r_csc_mode = VOP_REG(RK3366_LIT_BCSH_CTRL, 0x3, 4),
33967+	.bcsh_y2r_en = VOP_REG(RK3366_LIT_BCSH_CTRL, 0x1, 6),
33968+	.bcsh_r2y_en = VOP_REG(RK3366_LIT_BCSH_CTRL, 0x1, 7),
33969+	.bcsh_color_bar = VOP_REG(RK3366_LIT_BCSH_COL_BAR, 0xffffff, 0),
33970+	.bcsh_brightness = VOP_REG(RK3366_LIT_BCSH_BCS, 0xff, 0),
33971+	.bcsh_contrast = VOP_REG(RK3366_LIT_BCSH_BCS, 0x1ff, 8),
33972+	.bcsh_sat_con = VOP_REG(RK3366_LIT_BCSH_BCS, 0x3ff, 20),
33973+	.bcsh_sin_hue = VOP_REG(RK3366_LIT_BCSH_H, 0x1ff, 0),
33974+	.bcsh_cos_hue = VOP_REG(RK3366_LIT_BCSH_H, 0x1ff, 16),
33975+
33976+	.afbdc_en = VOP_REG(PX30_AFBCD0_CTRL, 0x1, 0),
33977+	.afbdc_format = VOP_REG(PX30_AFBCD0_CTRL, 0x1f, 4),
33978+	.afbdc_pic_vir_width = VOP_REG(PX30_AFBCD0_CTRL, 0xffff, 16),
33979+	.afbdc_hdr_ptr = VOP_REG(PX30_AFBCD0_HDR_PTR, 0xffffffff, 0),
33980+	.afbdc_pic_size = VOP_REG(PX30_AFBCD0_PIC_SIZE, 0xffffffff, 0),
33981+	.afbdc_pic_offset = VOP_REG(PX30_AFBCD0_PIC_OFFSET, 0xffffffff, 0),
33982+	.afbdc_axi_ctrl =  VOP_REG(PX30_AFBCD0_AXI_CTRL, 0xffffffff, 0),
33983+
33984+	.mcu_pix_total = VOP_REG(RK3366_LIT_MCU_CTRL, 0x3f, 0),
33985+	.mcu_cs_pst = VOP_REG(RK3366_LIT_MCU_CTRL, 0xf, 6),
33986+	.mcu_cs_pend = VOP_REG(RK3366_LIT_MCU_CTRL, 0x3f, 10),
33987+	.mcu_rw_pst = VOP_REG(RK3366_LIT_MCU_CTRL, 0xf, 16),
33988+	.mcu_rw_pend = VOP_REG(RK3366_LIT_MCU_CTRL, 0x3f, 20),
33989+	.mcu_clk_sel = VOP_REG(RK3366_LIT_MCU_CTRL, 0x1, 26),
33990+	.mcu_hold_mode = VOP_REG(RK3366_LIT_MCU_CTRL, 0x1, 27),
33991+	.mcu_frame_st = VOP_REG(RK3366_LIT_MCU_CTRL, 0x1, 28),
33992+	.mcu_rs = VOP_REG(RK3366_LIT_MCU_CTRL, 0x1, 29),
33993+	.mcu_bypass = VOP_REG(RK3366_LIT_MCU_CTRL, 0x1, 30),
33994+	.mcu_type = VOP_REG(RK3366_LIT_MCU_CTRL, 0x1, 31),
33995+	.mcu_rw_bypass_port = VOP_REG(RK3366_LIT_MCU_RW_BYPASS_PORT,
33996+				      0xffffffff, 0),
33997+};
33998+
33999+static const struct vop_win_phy px30_win23_data = {
34000+	.data_formats = formats_win_lite,
34001+	.nformats = ARRAY_SIZE(formats_win_lite),
34002+	.gate = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 0),
34003+	.enable = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 4),
34004+	.format = VOP_REG(RK3368_WIN2_CTRL0, 0x3, 5),
34005+	.rb_swap = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 20),
34006+	.dsp_info = VOP_REG(RK3368_WIN2_DSP_INFO0, 0x0fff0fff, 0),
34007+	.dsp_st = VOP_REG(RK3368_WIN2_DSP_ST0, 0x1fff1fff, 0),
34008+	.yrgb_mst = VOP_REG(RK3368_WIN2_MST0, 0xffffffff, 0),
34009+	.yrgb_vir = VOP_REG(RK3368_WIN2_VIR0_1, 0x1fff, 0),
34010+	.alpha_pre_mul = VOP_REG(RK3368_WIN2_SRC_ALPHA_CTRL, 0x1, 2),
34011+	.alpha_mode = VOP_REG(RK3368_WIN2_SRC_ALPHA_CTRL, 0x1, 1),
34012+	.alpha_en = VOP_REG(RK3368_WIN2_SRC_ALPHA_CTRL, 0x1, 0),
34013+	.global_alpha_val = VOP_REG(RK3368_WIN2_SRC_ALPHA_CTRL, 0xff, 4),
34014+	.channel = VOP_REG(RK3368_WIN2_CTRL1, 0xf, 8),
34015 };
34016 
34017-static const struct vop_intr rk3328_vop_intr = {
34018-	.intrs = rk3368_vop_intrs,
34019-	.nintrs = ARRAY_SIZE(rk3368_vop_intrs),
34020-	.line_flag_num[0] = VOP_REG(RK3328_LINE_FLAG, 0xffff, 0),
34021-	.line_flag_num[1] = VOP_REG(RK3328_LINE_FLAG, 0xffff, 16),
34022-	.status = VOP_REG_MASK_SYNC(RK3328_INTR_STATUS0, 0xffff, 0),
34023-	.enable = VOP_REG_MASK_SYNC(RK3328_INTR_EN0, 0xffff, 0),
34024-	.clear = VOP_REG_MASK_SYNC(RK3328_INTR_CLEAR0, 0xffff, 0),
34025+static const struct vop_win_data px30_vop_big_win_data[] = {
34026+	{ .base = 0x00, .phy = &rk3366_lit_win0_data,
34027+	  .type = DRM_PLANE_TYPE_OVERLAY },
34028+	{ .base = 0x00, .phy = &rk3366_lit_win1_data,
34029+	  .type = DRM_PLANE_TYPE_PRIMARY,
34030+	  .feature = WIN_FEATURE_AFBDC },
34031+	{ .base = 0xe0, .phy = &px30_win23_data,
34032+	  .type = DRM_PLANE_TYPE_CURSOR,
34033+	  .area = rk3368_area_data,
34034+	  .area_size = ARRAY_SIZE(rk3368_area_data), },
34035 };
34036 
34037-static const struct vop_win_data rk3328_vop_win_data[] = {
34038-	{ .base = 0xd0, .phy = &rk3368_win01_data,
34039+static const struct vop_win_data px30_vop_lit_win_data[] = {
34040+	{ .phy = NULL },
34041+	{ .base = 0x00, .phy = &rk3366_lit_win1_data,
34042 	  .type = DRM_PLANE_TYPE_PRIMARY },
34043-	{ .base = 0x1d0, .phy = &rk3368_win01_data,
34044+	{ .phy = NULL },
34045+};
34046+
34047+static const struct vop_grf_ctrl px30_grf_ctrl = {
34048+	.grf_dclk_inv = VOP_REG(PX30_GRF_PD_VO_CON1, 0x1, 4),
34049+};
34050+
34051+static const struct vop_data px30_vop_lit = {
34052+	.soc_id = 0x3326,
34053+	.vop_id = 1,
34054+	.version = VOP_VERSION(2, 5),
34055+	.max_input = {1920, 8192},
34056+	.max_output = {1920, 1080},
34057+	.ctrl = &px30_ctrl_data,
34058+	.intr = &rk3366_lit_intr,
34059+	.grf_ctrl = &px30_grf_ctrl,
34060+	.win = px30_vop_lit_win_data,
34061+	.win_size = ARRAY_SIZE(px30_vop_lit_win_data),
34062+};
34063+
34064+static const struct vop_data px30_vop_big = {
34065+	.soc_id = 0x3326,
34066+	.vop_id = 0,
34067+	.version = VOP_VERSION(2, 6),
34068+	.max_input = {1920, 8192},
34069+	.max_output = {1920, 1080},
34070+	.ctrl = &px30_ctrl_data,
34071+	.intr = &rk3366_lit_intr,
34072+	.grf_ctrl = &px30_grf_ctrl,
34073+	.win = px30_vop_big_win_data,
34074+	.win_size = ARRAY_SIZE(px30_vop_big_win_data),
34075+};
34076+
34077+static const struct vop_ctrl rk3308_ctrl_data = {
34078+	.standby = VOP_REG(RK3366_LIT_SYS_CTRL2, 0x1, 1),
34079+	.axi_outstanding_max_num = VOP_REG(RK3366_LIT_SYS_CTRL1, 0x1f, 16),
34080+	.axi_max_outstanding_en = VOP_REG(RK3366_LIT_SYS_CTRL1, 0x1, 12),
34081+	.htotal_pw = VOP_REG(RK3366_LIT_DSP_HTOTAL_HS_END, 0x0fff0fff, 0),
34082+	.hact_st_end = VOP_REG(RK3366_LIT_DSP_HACT_ST_END, 0x0fff0fff, 0),
34083+	.vtotal_pw = VOP_REG(RK3366_LIT_DSP_VTOTAL_VS_END, 0x0fff0fff, 0),
34084+	.vact_st_end = VOP_REG(RK3366_LIT_DSP_VACT_ST_END, 0x0fff0fff, 0),
34085+	.vact_st_end_f1 = VOP_REG(RK3366_LIT_DSP_VACT_ST_END_F1, 0x0fff0fff, 0),
34086+	.vs_st_end_f1 = VOP_REG(RK3366_LIT_DSP_VS_ST_END_F1, 0x0fff0fff, 0),
34087+	.global_regdone_en = VOP_REG(RK3366_LIT_SYS_CTRL2, 0x1, 13),
34088+	.auto_gate_en = VOP_REG(RK3366_LIT_SYS_CTRL2, 0x1, 0),
34089+	.dsp_layer_sel = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1, 3),
34090+	.overlay_mode = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1, 4),
34091+	.dclk_ddr = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x1, 14),
34092+	.rgb_en = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x1, 0),
34093+	.rgb_pin_pol = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x7, 2),
34094+	.rgb_dclk_pol = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x1, 1),
34095+	.dither_down_en = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1, 8),
34096+	.dither_down_sel = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1, 7),
34097+	.dither_down_mode = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1, 6),
34098+	.dither_up_en = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1, 2),
34099+	.dsp_data_swap = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1f, 9),
34100+	.dsp_ccir656_avg = VOP_REG(RK3366_LIT_SYS_CTRL2, 0x1, 5),
34101+	.dsp_black = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1, 15),
34102+	.dsp_blank = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1, 14),
34103+	.dsp_outzero = VOP_REG(RK3366_LIT_SYS_CTRL2, 0x1, 3),
34104+	.dsp_lut_en = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1, 5),
34105+	.out_mode = VOP_REG(RK3366_LIT_DSP_CTRL2, 0xf, 16),
34106+	.dsp_background = VOP_REG(RK3366_LIT_DSP_BG, 0x00ffffff, 0),
34107+	.cfg_done = VOP_REG(RK3366_LIT_REG_CFG_DONE, 0x1, 0),
34108+
34109+	.bcsh_en = VOP_REG(RK3366_LIT_BCSH_CTRL, 0x1, 0),
34110+	.bcsh_r2y_csc_mode = VOP_REG(RK3366_LIT_BCSH_CTRL, 0x1, 1),
34111+	.bcsh_out_mode = VOP_REG(RK3366_LIT_BCSH_CTRL, 0x3, 2),
34112+	.bcsh_y2r_csc_mode = VOP_REG(RK3366_LIT_BCSH_CTRL, 0x3, 4),
34113+	.bcsh_y2r_en = VOP_REG(RK3366_LIT_BCSH_CTRL, 0x1, 6),
34114+	.bcsh_r2y_en = VOP_REG(RK3366_LIT_BCSH_CTRL, 0x1, 7),
34115+	.bcsh_color_bar = VOP_REG(RK3366_LIT_BCSH_COL_BAR, 0xffffff, 0),
34116+	.bcsh_brightness = VOP_REG(RK3366_LIT_BCSH_BCS, 0x3f, 0),
34117+	.bcsh_contrast = VOP_REG(RK3366_LIT_BCSH_BCS, 0xff, 8),
34118+	.bcsh_sat_con = VOP_REG(RK3366_LIT_BCSH_BCS, 0x1ff, 16),
34119+	.bcsh_sin_hue = VOP_REG(RK3366_LIT_BCSH_H, 0xff, 0),
34120+	.bcsh_cos_hue = VOP_REG(RK3366_LIT_BCSH_H, 0xff, 8),
34121+
34122+	.mcu_pix_total = VOP_REG(RK3366_LIT_MCU_CTRL, 0x3f, 0),
34123+	.mcu_cs_pst = VOP_REG(RK3366_LIT_MCU_CTRL, 0xf, 6),
34124+	.mcu_cs_pend = VOP_REG(RK3366_LIT_MCU_CTRL, 0x3f, 10),
34125+	.mcu_rw_pst = VOP_REG(RK3366_LIT_MCU_CTRL, 0xf, 16),
34126+	.mcu_rw_pend = VOP_REG(RK3366_LIT_MCU_CTRL, 0x3f, 20),
34127+	.mcu_clk_sel = VOP_REG(RK3366_LIT_MCU_CTRL, 0x1, 26),
34128+	.mcu_hold_mode = VOP_REG(RK3366_LIT_MCU_CTRL, 0x1, 27),
34129+	.mcu_frame_st = VOP_REG(RK3366_LIT_MCU_CTRL, 0x1, 28),
34130+	.mcu_rs = VOP_REG(RK3366_LIT_MCU_CTRL, 0x1, 29),
34131+	.mcu_bypass = VOP_REG(RK3366_LIT_MCU_CTRL, 0x1, 30),
34132+	.mcu_type = VOP_REG(RK3366_LIT_MCU_CTRL, 0x1, 31),
34133+	.mcu_rw_bypass_port = VOP_REG(RK3366_LIT_MCU_RW_BYPASS_PORT,
34134+				      0xffffffff, 0),
34135+};
34136+
34137+static const int rk3308_vop_intrs[] = {
34138+	FS_INTR,
34139+	FS_NEW_INTR,
34140+	ADDR_SAME_INTR,
34141+	LINE_FLAG_INTR,
34142+	LINE_FLAG1_INTR,
34143+	BUS_ERROR_INTR,
34144+	0,
34145+	0,
34146+	DSP_HOLD_VALID_INTR,
34147+	DMA_FINISH_INTR,
34148+	0,
34149+	POST_BUF_EMPTY_INTR
34150+};
34151+
34152+static const struct vop_intr rk3308_vop_intr = {
34153+	.intrs = rk3308_vop_intrs,
34154+	.nintrs = ARRAY_SIZE(rk3308_vop_intrs),
34155+	.line_flag_num[0] = VOP_REG(RK3366_LIT_LINE_FLAG, 0xfff, 0),
34156+	.line_flag_num[1] = VOP_REG(RK3366_LIT_LINE_FLAG, 0xfff, 16),
34157+	.status = VOP_REG_MASK(RK3366_LIT_INTR_STATUS, 0xffff, 0),
34158+	.enable = VOP_REG_MASK(RK3366_LIT_INTR_EN, 0xffff, 0),
34159+	.clear = VOP_REG_MASK(RK3366_LIT_INTR_CLEAR, 0xffff, 0),
34160+};
34161+
34162+static const struct vop_data rk3308_vop = {
34163+	.soc_id = 0x3308,
34164+	.vop_id = 0,
34165+	.version = VOP_VERSION(2, 7),
34166+	.max_input = {1920, 8192},
34167+	.max_output = {1920, 1080},
34168+	.ctrl = &rk3308_ctrl_data,
34169+	.intr = &rk3308_vop_intr,
34170+	.win = rk3366_vop_lit_win_data,
34171+	.win_size = ARRAY_SIZE(rk3366_vop_lit_win_data),
34172+};
34173+
34174+static const struct vop_ctrl rv1126_ctrl_data = {
34175+	.standby = VOP_REG(RK3366_LIT_SYS_CTRL2, 0x1, 1),
34176+	.axi_outstanding_max_num = VOP_REG(RK3366_LIT_SYS_CTRL1, 0x1f, 16),
34177+	.axi_max_outstanding_en = VOP_REG(RK3366_LIT_SYS_CTRL1, 0x1, 12),
34178+	.htotal_pw = VOP_REG(RK3366_LIT_DSP_HTOTAL_HS_END, 0x0fff0fff, 0),
34179+	.hact_st_end = VOP_REG(RK3366_LIT_DSP_HACT_ST_END, 0x0fff0fff, 0),
34180+	.vtotal_pw = VOP_REG(RK3366_LIT_DSP_VTOTAL_VS_END, 0x0fff0fff, 0),
34181+	.vact_st_end = VOP_REG(RK3366_LIT_DSP_VACT_ST_END, 0x0fff0fff, 0),
34182+	.vact_st_end_f1 = VOP_REG(RK3366_LIT_DSP_VACT_ST_END_F1, 0x0fff0fff, 0),
34183+	.vs_st_end_f1 = VOP_REG(RK3366_LIT_DSP_VS_ST_END_F1, 0x0fff0fff, 0),
34184+	.dsp_interlace = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1, 0),
34185+	.global_regdone_en = VOP_REG(RK3366_LIT_SYS_CTRL2, 0x1, 13),
34186+	.auto_gate_en = VOP_REG(RK3366_LIT_SYS_CTRL2, 0x1, 0),
34187+	.dsp_layer_sel = VOP_REG(RK3366_LIT_DSP_CTRL2, 0xff, 22),
34188+	.overlay_mode = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1, 4),
34189+	.core_dclk_div = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x1, 13),
34190+	.dclk_ddr = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x1, 14),
34191+	.rgb_en = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x1, 0),
34192+	.rgb_pin_pol = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x7, 2),
34193+	.hdmi_en = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x1, 8),
34194+	.hdmi_pin_pol = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x7, 10),
34195+	.lvds_en = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x1, 16),
34196+	.lvds_pin_pol = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x7, 18),
34197+	.mipi_en = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x1, 24),
34198+	.mipi_pin_pol = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x7, 26),
34199+	.mipi_dclk_pol = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x1, 25),
34200+	.lvds_dclk_pol = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x1, 17),
34201+	.hdmi_dclk_pol = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x1, 9),
34202+	.rgb_dclk_pol = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x1, 1),
34203+	.dither_down_en = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1, 8),
34204+	.dither_down_sel = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1, 7),
34205+	.dither_down_mode = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1, 6),
34206+	.dither_up_en = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1, 2),
34207+	.dsp_data_swap = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1f, 9),
34208+	.yuv_clip = VOP_REG(RK3366_LIT_SYS_CTRL2, 0x1, 4),
34209+	.dsp_ccir656_avg = VOP_REG(RK3366_LIT_SYS_CTRL2, 0x1, 5),
34210+	.dsp_black = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1, 15),
34211+	.dsp_blank = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1, 14),
34212+	.dsp_outzero = VOP_REG(RK3366_LIT_SYS_CTRL2, 0x1, 3),
34213+	.dsp_lut_en = VOP_REG(RK3366_LIT_DSP_CTRL2, 0x1, 5),
34214+	.out_mode = VOP_REG(RK3366_LIT_DSP_CTRL2, 0xf, 16),
34215+	.dsp_background = VOP_REG(RK3366_LIT_DSP_BG, 0x00ffffff, 0),
34216+	.cfg_done = VOP_REG(RK3366_LIT_REG_CFG_DONE, 0x1, 0),
34217+
34218+	.bcsh_en = VOP_REG(RK3366_LIT_BCSH_CTRL, 0x1, 0),
34219+	.bcsh_r2y_csc_mode = VOP_REG(RK3366_LIT_BCSH_CTRL, 0x1, 1),
34220+	.bcsh_out_mode = VOP_REG(RK3366_LIT_BCSH_CTRL, 0x3, 2),
34221+	.bcsh_y2r_csc_mode = VOP_REG(RK3366_LIT_BCSH_CTRL, 0x3, 4),
34222+	.bcsh_y2r_en = VOP_REG(RK3366_LIT_BCSH_CTRL, 0x1, 6),
34223+	.bcsh_r2y_en = VOP_REG(RK3366_LIT_BCSH_CTRL, 0x1, 7),
34224+	.bcsh_color_bar = VOP_REG(RK3366_LIT_BCSH_COL_BAR, 0xffffff, 0),
34225+	.bcsh_brightness = VOP_REG(RK3366_LIT_BCSH_BCS, 0xff, 0),
34226+	.bcsh_contrast = VOP_REG(RK3366_LIT_BCSH_BCS, 0x1ff, 8),
34227+	.bcsh_sat_con = VOP_REG(RK3366_LIT_BCSH_BCS, 0x3ff, 20),
34228+	.bcsh_sin_hue = VOP_REG(RK3366_LIT_BCSH_H, 0x1ff, 0),
34229+	.bcsh_cos_hue = VOP_REG(RK3366_LIT_BCSH_H, 0x1ff, 16),
34230+
34231+	.mcu_pix_total = VOP_REG(RK3366_LIT_MCU_CTRL, 0x3f, 0),
34232+	.mcu_cs_pst = VOP_REG(RK3366_LIT_MCU_CTRL, 0xf, 6),
34233+	.mcu_cs_pend = VOP_REG(RK3366_LIT_MCU_CTRL, 0x3f, 10),
34234+	.mcu_rw_pst = VOP_REG(RK3366_LIT_MCU_CTRL, 0xf, 16),
34235+	.mcu_rw_pend = VOP_REG(RK3366_LIT_MCU_CTRL, 0x3f, 20),
34236+	.mcu_clk_sel = VOP_REG(RK3366_LIT_MCU_CTRL, 0x1, 26),
34237+	.mcu_hold_mode = VOP_REG(RK3366_LIT_MCU_CTRL, 0x1, 27),
34238+	.mcu_frame_st = VOP_REG(RK3366_LIT_MCU_CTRL, 0x1, 28),
34239+	.mcu_rs = VOP_REG(RK3366_LIT_MCU_CTRL, 0x1, 29),
34240+	.mcu_bypass = VOP_REG(RK3366_LIT_MCU_CTRL, 0x1, 30),
34241+	.mcu_type = VOP_REG(RK3366_LIT_MCU_CTRL, 0x1, 31),
34242+	.mcu_rw_bypass_port = VOP_REG(RK3366_LIT_MCU_RW_BYPASS_PORT,
34243+				      0xffffffff, 0),
34244+	.bt1120_yc_swap = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x1, 30),
34245+	.bt1120_en = VOP_REG(RK3366_LIT_DSP_CTRL0, 0x1, 31),
34246+};
34247+
34248+static const struct vop_win_data rv1126_vop_win_data[] = {
34249+	{ .base = 0x00, .phy = &rk3366_lit_win0_data,
34250 	  .type = DRM_PLANE_TYPE_OVERLAY },
34251-	{ .base = 0x2d0, .phy = &rk3368_win01_data,
34252-	  .type = DRM_PLANE_TYPE_CURSOR },
34253+	{ .phy = NULL },
34254+	{ .base = 0xe0, .phy = &px30_win23_data,
34255+	  .type = DRM_PLANE_TYPE_PRIMARY,
34256+	  .area = rk3368_area_data,
34257+	  .area_size = ARRAY_SIZE(rk3368_area_data), },
34258 };
34259 
34260-static const struct vop_data rk3328_vop = {
34261-	.version = VOP_VERSION(3, 8),
34262-	.feature = VOP_FEATURE_OUTPUT_RGB10,
34263-	.intr = &rk3328_vop_intr,
34264-	.common = &rk3328_common,
34265-	.modeset = &rk3328_modeset,
34266-	.output = &rk3328_output,
34267-	.misc = &rk3328_misc,
34268-	.win = rk3328_vop_win_data,
34269-	.win_size = ARRAY_SIZE(rk3328_vop_win_data),
34270+static const struct vop_grf_ctrl rv1126_grf_ctrl = {
34271+	.grf_dclk_inv = VOP_REG(RV1126_GRF_IOFUNC_CON3, 0x1, 2),
34272+};
34273+
34274+static const struct vop_data rv1126_vop = {
34275+	.soc_id = 0x1126,
34276+	.vop_id = 0,
34277+	.version = VOP_VERSION(2, 0xb),
34278+	.max_input = {1920, 1920},
34279+	.max_output = {1920, 1080},
34280+	.ctrl = &rv1126_ctrl_data,
34281+	.intr = &rk3366_lit_intr,
34282+	.grf_ctrl = &rv1126_grf_ctrl,
34283+	.win = rv1126_vop_win_data,
34284+	.win_size = ARRAY_SIZE(rv1126_vop_win_data),
34285 };
34286 
34287 static const struct of_device_id vop_driver_dt_match[] = {
34288 	{ .compatible = "rockchip,rk3036-vop",
34289 	  .data = &rk3036_vop },
34290+	{ .compatible = "rockchip,rk3066-vop",
34291+	  .data = &rk3066_vop },
34292 	{ .compatible = "rockchip,rk3126-vop",
34293 	  .data = &rk3126_vop },
34294-	{ .compatible = "rockchip,px30-vop-big",
34295-	  .data = &px30_vop_big },
34296 	{ .compatible = "rockchip,px30-vop-lit",
34297 	  .data = &px30_vop_lit },
34298-	{ .compatible = "rockchip,rk3066-vop",
34299-	  .data = &rk3066_vop },
34300-	{ .compatible = "rockchip,rk3188-vop",
34301-	  .data = &rk3188_vop },
34302-	{ .compatible = "rockchip,rk3288-vop",
34303-	  .data = &rk3288_vop },
34304+	{ .compatible = "rockchip,px30-vop-big",
34305+	  .data = &px30_vop_big },
34306+	{ .compatible = "rockchip,rk3308-vop",
34307+	  .data = &rk3308_vop },
34308+	{ .compatible = "rockchip,rv1126-vop",
34309+	  .data = &rv1126_vop },
34310+	{ .compatible = "rockchip,rk3288-vop-big",
34311+	  .data = &rk3288_vop_big },
34312+	{ .compatible = "rockchip,rk3288-vop-lit",
34313+	  .data = &rk3288_vop_lit },
34314 	{ .compatible = "rockchip,rk3368-vop",
34315 	  .data = &rk3368_vop },
34316 	{ .compatible = "rockchip,rk3366-vop",
34317diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
34318index 6e9fa5815..ffd49a862 100644
34319--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
34320+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
34321@@ -113,6 +113,11 @@
34322 #define RK3288_DSP_VACT_ST_END			0x0194
34323 #define RK3288_DSP_VS_ST_END_F1			0x0198
34324 #define RK3288_DSP_VACT_ST_END_F1		0x019c
34325+
34326+#define RK3288_BCSH_COLOR_BAR			0x01b0
34327+#define RK3288_BCSH_BCS				0x01b4
34328+#define RK3288_BCSH_H				0x01b8
34329+#define RK3288_GRF_SOC_CON15			0x03a4
34330 /* register definition end */
34331 
34332 /* rk3368 register definition */
34333@@ -300,6 +305,7 @@
34334 #define RK3368_CABC_GAMMA_LUT_ADDR		0x1800
34335 #define RK3368_MCU_BYPASS_WPORT			0x2200
34336 #define RK3368_MCU_BYPASS_RPORT			0x2300
34337+#define RK3368_GRF_SOC_CON6			0x0418
34338 /* rk3368 register definition end */
34339 
34340 #define RK3366_REG_CFG_DONE			0x0000
34341@@ -628,6 +634,7 @@
34342 #define RK3399_YUV2YUV_WIN			0x02c0
34343 #define RK3399_YUV2YUV_POST			0x02c4
34344 #define RK3399_AUTO_GATING_EN			0x02cc
34345+#define RK3399_DBG_POST_REG1			0x036c
34346 #define RK3399_WIN0_CSC_COE			0x03a0
34347 #define RK3399_WIN1_CSC_COE			0x03c0
34348 #define RK3399_WIN2_CSC_COE			0x03e0
34349@@ -798,6 +805,21 @@
34350 #define RK3328_DBG_POST_RESERVED		0x000006ec
34351 #define RK3328_DBG_DATAO			0x000006f0
34352 #define RK3328_DBG_DATAO_2			0x000006f4
34353+#define RK3328_SDR2HDR_CTRL			0x00000700
34354+#define RK3328_SDR2HDR_EOTF_OETF_Y0		0x00000704
34355+#define RK3328_SDR2HDR_EOTF_OETF_Y1		0x00000710
34356+#define RK3328_SDR2HDR_OETF_DX_DXPOW1		0x00000810
34357+#define RK3328_SDR2HDR_OETF_XN1			0x00000910
34358+
34359+#define RK3328_HDR2DR_CTRL			0x00000a10
34360+#define RK3328_HDR2DR_SRC_RANGE			0x00000a14
34361+#define RK3328_HDR2DR_NORMFACEETF		0x00000a18
34362+#define RK3328_HDR2DR_DST_RANGE			0x00000a20
34363+#define RK3328_HDR2DR_NORMFACGAMMA		0x00000a24
34364+#define RK3328_HDR2SDR_EETF_OETF_Y0		0x00000a28
34365+#define RK3328_HDR2DR_SAT_Y0			0x00000a2C
34366+#define RK3328_HDR2SDR_EETF_OETF_Y1		0x00000a30
34367+#define RK3328_HDR2DR_SAT_Y1			0x00000ab0
34368 
34369 /* sdr to hdr */
34370 #define RK3328_SDR2HDR_CTRL			0x00000700
34371@@ -830,6 +852,7 @@
34372 #define RK3036_SYS_CTRL			0x00
34373 #define RK3036_DSP_CTRL0		0x04
34374 #define RK3036_DSP_CTRL1		0x08
34375+#define RK3036_INT_SCALER		0x0c
34376 #define RK3036_INT_STATUS		0x10
34377 #define RK3036_ALPHA_CTRL		0x14
34378 #define RK3036_WIN0_COLOR_KEY		0x18
34379@@ -870,112 +893,6 @@
34380 #define RK3036_HWC_LUT_ADDR		0x800
34381 /* rk3036 register definition end */
34382 
34383-/* rk3126 register definition */
34384-#define RK3126_WIN1_MST			0x4c
34385-#define RK3126_WIN1_DSP_INFO		0x50
34386-#define RK3126_WIN1_DSP_ST		0x54
34387-/* rk3126 register definition end */
34388-
34389-/* px30 register definition */
34390-#define PX30_REG_CFG_DONE			0x00000
34391-#define PX30_VERSION				0x00004
34392-#define PX30_DSP_BG				0x00008
34393-#define PX30_MCU_CTRL				0x0000c
34394-#define PX30_SYS_CTRL0				0x00010
34395-#define PX30_SYS_CTRL1				0x00014
34396-#define PX30_SYS_CTRL2				0x00018
34397-#define PX30_DSP_CTRL0				0x00020
34398-#define PX30_DSP_CTRL2				0x00028
34399-#define PX30_VOP_STATUS				0x0002c
34400-#define PX30_LINE_FLAG				0x00030
34401-#define PX30_INTR_EN				0x00034
34402-#define PX30_INTR_CLEAR				0x00038
34403-#define PX30_INTR_STATUS			0x0003c
34404-#define PX30_WIN0_CTRL0				0x00050
34405-#define PX30_WIN0_CTRL1				0x00054
34406-#define PX30_WIN0_COLOR_KEY			0x00058
34407-#define PX30_WIN0_VIR				0x0005c
34408-#define PX30_WIN0_YRGB_MST0			0x00060
34409-#define PX30_WIN0_CBR_MST0			0x00064
34410-#define PX30_WIN0_ACT_INFO			0x00068
34411-#define PX30_WIN0_DSP_INFO			0x0006c
34412-#define PX30_WIN0_DSP_ST			0x00070
34413-#define PX30_WIN0_SCL_FACTOR_YRGB		0x00074
34414-#define PX30_WIN0_SCL_FACTOR_CBR		0x00078
34415-#define PX30_WIN0_SCL_OFFSET			0x0007c
34416-#define PX30_WIN0_ALPHA_CTRL			0x00080
34417-#define PX30_WIN1_CTRL0				0x00090
34418-#define PX30_WIN1_CTRL1				0x00094
34419-#define PX30_WIN1_VIR				0x00098
34420-#define PX30_WIN1_MST				0x000a0
34421-#define PX30_WIN1_DSP_INFO			0x000a4
34422-#define PX30_WIN1_DSP_ST			0x000a8
34423-#define PX30_WIN1_COLOR_KEY			0x000ac
34424-#define PX30_WIN1_ALPHA_CTRL			0x000bc
34425-#define PX30_HWC_CTRL0				0x000e0
34426-#define PX30_HWC_CTRL1				0x000e4
34427-#define PX30_HWC_MST				0x000e8
34428-#define PX30_HWC_DSP_ST				0x000ec
34429-#define PX30_HWC_ALPHA_CTRL			0x000f0
34430-#define PX30_DSP_HTOTAL_HS_END			0x00100
34431-#define PX30_DSP_HACT_ST_END			0x00104
34432-#define PX30_DSP_VTOTAL_VS_END			0x00108
34433-#define PX30_DSP_VACT_ST_END			0x0010c
34434-#define PX30_DSP_VS_ST_END_F1			0x00110
34435-#define PX30_DSP_VACT_ST_END_F1			0x00114
34436-#define PX30_BCSH_CTRL				0x00160
34437-#define PX30_BCSH_COL_BAR			0x00164
34438-#define PX30_BCSH_BCS				0x00168
34439-#define PX30_BCSH_H				0x0016c
34440-#define PX30_FRC_LOWER01_0			0x00170
34441-#define PX30_FRC_LOWER01_1			0x00174
34442-#define PX30_FRC_LOWER10_0			0x00178
34443-#define PX30_FRC_LOWER10_1			0x0017c
34444-#define PX30_FRC_LOWER11_0			0x00180
34445-#define PX30_FRC_LOWER11_1			0x00184
34446-#define PX30_MCU_RW_BYPASS_PORT			0x0018c
34447-#define PX30_WIN2_CTRL0				0x00190
34448-#define PX30_WIN2_CTRL1				0x00194
34449-#define PX30_WIN2_VIR0_1			0x00198
34450-#define PX30_WIN2_VIR2_3			0x0019c
34451-#define PX30_WIN2_MST0				0x001a0
34452-#define PX30_WIN2_DSP_INFO0			0x001a4
34453-#define PX30_WIN2_DSP_ST0			0x001a8
34454-#define PX30_WIN2_COLOR_KEY			0x001ac
34455-#define PX30_WIN2_ALPHA_CTRL			0x001bc
34456-#define PX30_BLANKING_VALUE			0x001f4
34457-#define PX30_FLAG_REG_FRM_VALID			0x001f8
34458-#define PX30_FLAG_REG				0x001fc
34459-#define PX30_HWC_LUT_ADDR			0x00600
34460-#define PX30_GAMMA_LUT_ADDR			0x00a00
34461-/* px30 register definition end */
34462-
34463-/* rk3188 register definition */
34464-#define RK3188_SYS_CTRL			0x00
34465-#define RK3188_DSP_CTRL0		0x04
34466-#define RK3188_DSP_CTRL1		0x08
34467-#define RK3188_INT_STATUS		0x10
34468-#define RK3188_WIN0_YRGB_MST0		0x20
34469-#define RK3188_WIN0_CBR_MST0		0x24
34470-#define RK3188_WIN0_YRGB_MST1		0x28
34471-#define RK3188_WIN0_CBR_MST1		0x2c
34472-#define RK3188_WIN_VIR			0x30
34473-#define RK3188_WIN0_ACT_INFO		0x34
34474-#define RK3188_WIN0_DSP_INFO		0x38
34475-#define RK3188_WIN0_DSP_ST		0x3c
34476-#define RK3188_WIN0_SCL_FACTOR_YRGB	0x40
34477-#define RK3188_WIN0_SCL_FACTOR_CBR	0x44
34478-#define RK3188_WIN1_MST			0x4c
34479-#define RK3188_WIN1_DSP_INFO		0x50
34480-#define RK3188_WIN1_DSP_ST		0x54
34481-#define RK3188_DSP_HTOTAL_HS_END	0x6c
34482-#define RK3188_DSP_HACT_ST_END		0x70
34483-#define RK3188_DSP_VTOTAL_VS_END	0x74
34484-#define RK3188_DSP_VACT_ST_END		0x78
34485-#define RK3188_REG_CFG_DONE		0x90
34486-/* rk3188 register definition end */
34487-
34488-/* rk3066 register definition */
34489 #define RK3066_SYS_CTRL0		0x00
34490 #define RK3066_SYS_CTRL1		0x04
34491 #define RK3066_DSP_CTRL0		0x08
34492@@ -1026,6 +943,691 @@
34493 #define RK3066_MCU_BYPASS_RPORT		0x200
34494 #define RK3066_WIN2_LUT_ADDR		0x400
34495 #define RK3066_DSP_LUT_ADDR		0x800
34496-/* rk3066 register definition end */
34497+
34498+/* rk3366 register definition */
34499+#define RK3366_LIT_REG_CFG_DONE			0x00000
34500+#define RK3366_LIT_VERSION			0x00004
34501+#define RK3366_LIT_DSP_BG			0x00008
34502+#define RK3366_LIT_MCU_CTRL			0x0000c
34503+#define RK3366_LIT_SYS_CTRL0			0x00010
34504+#define RK3366_LIT_SYS_CTRL1			0x00014
34505+#define RK3366_LIT_SYS_CTRL2			0x00018
34506+#define RK3366_LIT_DSP_CTRL0			0x00020
34507+#define RK3366_LIT_DSP_CTRL2			0x00028
34508+#define RK3366_LIT_VOP_STATUS			0x0002c
34509+#define RK3366_LIT_LINE_FLAG			0x00030
34510+#define RK3366_LIT_INTR_EN			0x00034
34511+#define RK3366_LIT_INTR_CLEAR			0x00038
34512+#define RK3366_LIT_INTR_STATUS			0x0003c
34513+#define RK3366_LIT_WIN0_CTRL0			0x00050
34514+#define RK3366_LIT_WIN0_CTRL1			0x00054
34515+#define RK3366_LIT_WIN0_COLOR_KEY		0x00058
34516+#define RK3366_LIT_WIN0_VIR			0x0005c
34517+#define RK3366_LIT_WIN0_YRGB_MST0		0x00060
34518+#define RK3366_LIT_WIN0_CBR_MST0		0x00064
34519+#define RK3366_LIT_WIN0_ACT_INFO		0x00068
34520+#define RK3366_LIT_WIN0_DSP_INFO		0x0006c
34521+#define RK3366_LIT_WIN0_DSP_ST			0x00070
34522+#define RK3366_LIT_WIN0_SCL_FACTOR_YRGB		0x00074
34523+#define RK3366_LIT_WIN0_SCL_FACTOR_CBR		0x00078
34524+#define RK3366_LIT_WIN0_SCL_OFFSET		0x0007c
34525+#define RK3366_LIT_WIN0_ALPHA_CTRL		0x00080
34526+#define RK3366_LIT_WIN1_CTRL0			0x00090
34527+#define RK3366_LIT_WIN1_CTRL1			0x00094
34528+#define RK3366_LIT_WIN1_VIR			0x00098
34529+#define RK3366_LIT_WIN1_MST			0x000a0
34530+#define RK3366_LIT_WIN1_DSP_INFO		0x000a4
34531+#define RK3366_LIT_WIN1_DSP_ST			0x000a8
34532+#define RK3366_LIT_WIN1_COLOR_KEY		0x000ac
34533+#define RK3366_LIT_WIN1_ALPHA_CTRL		0x000bc
34534+#define RK3366_LIT_HWC_CTRL0			0x000e0
34535+#define RK3366_LIT_HWC_CTRL1			0x000e4
34536+#define RK3366_LIT_HWC_MST			0x000e8
34537+#define RK3366_LIT_HWC_DSP_ST			0x000ec
34538+#define RK3366_LIT_HWC_ALPHA_CTRL		0x000f0
34539+#define RK3366_LIT_DSP_HTOTAL_HS_END		0x00100
34540+#define RK3366_LIT_DSP_HACT_ST_END		0x00104
34541+#define RK3366_LIT_DSP_VTOTAL_VS_END		0x00108
34542+#define RK3366_LIT_DSP_VACT_ST_END		0x0010c
34543+#define RK3366_LIT_DSP_VS_ST_END_F1		0x00110
34544+#define RK3366_LIT_DSP_VACT_ST_END_F1		0x00114
34545+#define RK3366_LIT_BCSH_CTRL			0x00160
34546+#define RK3366_LIT_BCSH_COL_BAR			0x00164
34547+#define RK3366_LIT_BCSH_BCS			0x00168
34548+#define RK3366_LIT_BCSH_H			0x0016c
34549+#define RK3366_LIT_FRC_LOWER01_0		0x00170
34550+#define RK3366_LIT_FRC_LOWER01_1		0x00174
34551+#define RK3366_LIT_FRC_LOWER10_0		0x00178
34552+#define RK3366_LIT_FRC_LOWER10_1		0x0017c
34553+#define RK3366_LIT_FRC_LOWER11_0		0x00180
34554+#define RK3366_LIT_FRC_LOWER11_1		0x00184
34555+#define RK3366_LIT_MCU_RW_BYPASS_PORT		0x0018c
34556+#define RK3366_LIT_DBG_REG_000			0x00190
34557+#define RK3366_LIT_BLANKING_VALUE		0x001f4
34558+#define RK3366_LIT_FLAG_REG_FRM_VALID		0x001f8
34559+#define RK3366_LIT_FLAG_REG			0x001fc
34560+#define RK3366_LIT_HWC_LUT_ADDR			0x00600
34561+#define RK3366_LIT_GAMMA_LUT_ADDR		0x00a00
34562+/* rk3366 register definition end */
34563+
34564+/* rk3126 register definition */
34565+#define RK3126_WIN1_MST				0x0004c
34566+#define RK3126_WIN1_DSP_INFO			0x00050
34567+#define RK3126_WIN1_DSP_ST			0x00054
34568+/* rk3126 register definition end */
34569+
34570+/* px30 register definition */
34571+#define PX30_CABC_CTRL0				0x00200
34572+#define PX30_CABC_CTRL1				0x00204
34573+#define PX30_CABC_CTRL2				0x00208
34574+#define PX30_CABC_CTRL3				0x0020c
34575+#define PX30_CABC_GAUSS_LINE0_0			0x00210
34576+#define PX30_CABC_GAUSS_LINE0_1			0x00214
34577+#define PX30_CABC_GAUSS_LINE1_0			0x00218
34578+#define PX30_CABC_GAUSS_LINE1_1			0x0021c
34579+#define PX30_CABC_GAUSS_LINE2_0			0x00220
34580+#define PX30_CABC_GAUSS_LINE2_1			0x00224
34581+#define PX30_AFBCD0_CTRL			0x00240
34582+#define PX30_AFBCD0_HDR_PTR			0x00244
34583+#define PX30_AFBCD0_PIC_SIZE			0x00248
34584+#define PX30_AFBCD0_PIC_OFFSET			0x0024c
34585+#define PX30_AFBCD0_AXI_CTRL			0x00250
34586+#define PX30_GRF_PD_VO_CON1			0x00438
34587+/* px30 register definition end */
34588+
34589+#define RV1126_GRF_IOFUNC_CON3			0x1026c
34590+
34591+/* rk3568 vop registers definition */
34592+
34593+#define RK3568_GRF_VO_CON1			0x0364
34594+/* System registers definition */
34595+#define RK3568_REG_CFG_DONE			0x000
34596+#define RK3568_VOP2_WB_CFG_DONE			BIT(14)
34597+#define RK3568_VOP2_GLB_CFG_DONE_EN		BIT(15)
34598+#define RK3568_VERSION_INFO			0x004
34599+#define RK3568_SYS_AUTO_GATING_CTRL		0x008
34600+#define RK3568_SYS_AXI_LUT_CTRL			0x024
34601+#define RK3568_DSP_IF_EN			0x028
34602+#define RK3568_DSP_IF_CTRL			0x02c
34603+#define RK3568_DSP_IF_POL			0x030
34604+#define RK3568_SYS_PD_CTRL			0x034
34605+#define RK3568_WB_CTRL				0x40
34606+#define RK3568_WB_XSCAL_FACTOR			0x44
34607+#define RK3568_WB_YRGB_MST			0x48
34608+#define RK3568_WB_CBR_MST			0x4C
34609+#define RK3568_OTP_WIN_EN			0x50
34610+#define RK3568_LUT_PORT_SEL			0x58
34611+#define RK3568_SYS_STATUS0			0x60
34612+#define RK3568_SYS_STATUS1			0x64
34613+#define RK3568_SYS_STATUS2			0x68
34614+#define RK3568_SYS_STATUS3			0x6C
34615+#define RK3568_VP0_LINE_FLAG			0x70
34616+#define RK3568_VP1_LINE_FLAG			0x74
34617+#define RK3568_VP2_LINE_FLAG			0x78
34618+#define RK3588_VP3_LINE_FLAG			0x7C
34619+#define RK3568_SYS0_INT_EN			0x80
34620+#define RK3568_SYS0_INT_CLR			0x84
34621+#define RK3568_SYS0_INT_STATUS			0x88
34622+#define RK3568_SYS1_INT_EN			0x90
34623+#define RK3568_SYS1_INT_CLR			0x94
34624+#define RK3568_SYS1_INT_STATUS			0x98
34625+#define RK3568_VP0_INT_EN			0xA0
34626+#define RK3568_VP0_INT_CLR			0xA4
34627+#define RK3568_VP0_INT_STATUS			0xA8
34628+#define RK3568_VP0_INT_RAW_STATUS		0xAC
34629+#define RK3568_VP1_INT_EN			0xB0
34630+#define RK3568_VP1_INT_CLR			0xB4
34631+#define RK3568_VP1_INT_STATUS			0xB8
34632+#define RK3568_VP1_INT_RAW_STATUS		0xBC
34633+#define RK3568_VP2_INT_EN			0xC0
34634+#define RK3568_VP2_INT_CLR			0xC4
34635+#define RK3568_VP2_INT_STATUS			0xC8
34636+#define RK3568_VP2_INT_RAW_STATUS		0xCC
34637+#define RK3588_VP3_INT_EN			0xD0
34638+#define RK3588_VP3_INT_CLR			0xD4
34639+#define RK3588_VP3_INT_STATUS			0xD8
34640+
34641+#define RK3588_DSC_8K_SYS_CTRL			0x200
34642+#define RK3588_DSC_8K_RST			0x204
34643+#define RK3588_DSC_8K_CFG_DONE			0x208
34644+#define RK3588_DSC_8K_INIT_DLY			0x20C
34645+#define RK3588_DSC_8K_HTOTAL_HS_END		0x210
34646+#define RK3588_DSC_8K_HACT_ST_END		0x214
34647+#define RK3588_DSC_8K_VTOTAL_VS_END		0x218
34648+#define RK3588_DSC_8K_VACT_ST_END		0x21C
34649+#define RK3588_DSC_8K_STATUS			0x220
34650+#define RK3588_DSC_4K_SYS_CTRL			0x230
34651+#define RK3588_DSC_4K_RST			0x234
34652+#define RK3588_DSC_4K_CFG_DONE			0x238
34653+#define RK3588_DSC_4K_INIT_DLY			0x23C
34654+#define RK3588_DSC_4K_HTOTAL_HS_END		0x240
34655+#define RK3588_DSC_4K_HACT_ST_END		0x244
34656+#define RK3588_DSC_4K_VTOTAL_VS_END		0x248
34657+#define RK3588_DSC_4K_VACT_ST_END		0x24C
34658+#define RK3588_DSC_4K_STATUS			0x250
34659+
34660+/* Video Port registers definition */
34661+#define RK3568_VP0_DSP_CTRL				0xC00
34662+#define RK3568_VP0_DUAL_CHANNEL_CTRL			0xC04
34663+#define RK3568_VP0_COLOR_BAR_CTRL			0xC08
34664+#define RK3568_VP0_CLK_CTRL				0xC0C
34665+#define RK3568_VP0_3D_LUT_CTRL				0xC10
34666+#define RK3568_VP0_3D_LUT_MST				0xC20
34667+#define RK3568_VP0_DSP_BG				0xC2C
34668+#define RK3568_VP0_PRE_SCAN_HTIMING			0xC30
34669+#define RK3568_VP0_POST_DSP_HACT_INFO			0xC34
34670+#define RK3568_VP0_POST_DSP_VACT_INFO			0xC38
34671+#define RK3568_VP0_POST_SCL_FACTOR_YRGB			0xC3C
34672+#define RK3568_VP0_POST_SCL_CTRL			0xC40
34673+#define RK3568_VP0_POST_DSP_VACT_INFO_F1		0xC44
34674+#define RK3568_VP0_DSP_HTOTAL_HS_END			0xC48
34675+#define RK3568_VP0_DSP_HACT_ST_END			0xC4C
34676+#define RK3568_VP0_DSP_VTOTAL_VS_END			0xC50
34677+#define RK3568_VP0_DSP_VACT_ST_END			0xC54
34678+#define RK3568_VP0_DSP_VS_ST_END_F1			0xC58
34679+#define RK3568_VP0_DSP_VACT_ST_END_F1			0xC5C
34680+#define RK3568_VP0_BCSH_CTRL				0xC60
34681+#define RK3568_VP0_BCSH_BCS				0xC64
34682+#define RK3568_VP0_BCSH_H				0xC68
34683+#define RK3568_VP0_BCSH_COLOR_BAR			0xC6C
34684+
34685+#define RK3568_VP1_DSP_CTRL				0xD00
34686+#define RK3568_VP1_DUAL_CHANNEL_CTRL			0xD04
34687+#define RK3568_VP1_COLOR_BAR_CTRL			0xD08
34688+#define RK3568_VP1_CLK_CTRL				0xD0C
34689+#define RK3588_VP1_3D_LUT_CTRL				0xD10
34690+#define RK3588_VP1_3D_LUT_MST				0xD20
34691+#define RK3568_VP1_DSP_BG				0xD2C
34692+#define RK3568_VP1_PRE_SCAN_HTIMING			0xD30
34693+#define RK3568_VP1_POST_DSP_HACT_INFO			0xD34
34694+#define RK3568_VP1_POST_DSP_VACT_INFO			0xD38
34695+#define RK3568_VP1_POST_SCL_FACTOR_YRGB			0xD3C
34696+#define RK3568_VP1_POST_SCL_CTRL			0xD40
34697+#define RK3568_VP1_DSP_HACT_INFO			0xD34
34698+#define RK3568_VP1_DSP_VACT_INFO			0xD38
34699+#define RK3568_VP1_POST_DSP_VACT_INFO_F1		0xD44
34700+#define RK3568_VP1_DSP_HTOTAL_HS_END			0xD48
34701+#define RK3568_VP1_DSP_HACT_ST_END			0xD4C
34702+#define RK3568_VP1_DSP_VTOTAL_VS_END			0xD50
34703+#define RK3568_VP1_DSP_VACT_ST_END			0xD54
34704+#define RK3568_VP1_DSP_VS_ST_END_F1			0xD58
34705+#define RK3568_VP1_DSP_VACT_ST_END_F1			0xD5C
34706+#define RK3568_VP1_BCSH_CTRL				0xD60
34707+#define RK3568_VP1_BCSH_BCS				0xD64
34708+#define RK3568_VP1_BCSH_H				0xD68
34709+#define RK3568_VP1_BCSH_COLOR_BAR			0xD6C
34710+
34711+#define RK3568_VP2_DSP_CTRL				0xE00
34712+#define RK3568_VP2_DUAL_CHANNEL_CTRL			0xE04
34713+#define RK3568_VP2_COLOR_BAR_CTRL			0xE08
34714+#define RK3568_VP2_CLK_CTRL				0xE0C
34715+#define RK3588_VP2_3D_LUT_CTRL				0xE10
34716+#define RK3588_VP2_3D_LUT_MST				0xE20
34717+#define RK3568_VP2_DSP_BG				0xE2C
34718+#define RK3568_VP2_PRE_SCAN_HTIMING			0xE30
34719+#define RK3568_VP2_POST_DSP_HACT_INFO			0xE34
34720+#define RK3568_VP2_POST_DSP_VACT_INFO			0xE38
34721+#define RK3568_VP2_POST_SCL_FACTOR_YRGB			0xE3C
34722+#define RK3568_VP2_POST_SCL_CTRL			0xE40
34723+#define RK3568_VP2_DSP_HACT_INFO			0xE34
34724+#define RK3568_VP2_DSP_VACT_INFO			0xE38
34725+#define RK3568_VP2_POST_DSP_VACT_INFO_F1		0xE44
34726+#define RK3568_VP2_DSP_HTOTAL_HS_END			0xE48
34727+#define RK3568_VP2_DSP_HACT_ST_END			0xE4C
34728+#define RK3568_VP2_DSP_VTOTAL_VS_END			0xE50
34729+#define RK3568_VP2_DSP_VACT_ST_END			0xE54
34730+#define RK3568_VP2_DSP_VS_ST_END_F1			0xE58
34731+#define RK3568_VP2_DSP_VACT_ST_END_F1			0xE5C
34732+#define RK3568_VP2_BCSH_CTRL				0xE60
34733+#define RK3568_VP2_BCSH_BCS				0xE64
34734+#define RK3568_VP2_BCSH_H				0xE68
34735+#define RK3568_VP2_BCSH_COLOR_BAR			0xE6C
34736+
34737+#define RK3588_VP3_DSP_CTRL				0xF00
34738+#define RK3588_VP3_DUAL_CHANNEL_CTRL			0xF04
34739+#define RK3588_VP3_COLOR_BAR_CTRL			0xF08
34740+#define RK3568_VP3_CLK_CTRL				0xF0C
34741+#define RK3588_VP3_DSP_BG				0xF2C
34742+#define RK3588_VP3_PRE_SCAN_HTIMING			0xF30
34743+#define RK3588_VP3_POST_DSP_HACT_INFO			0xF34
34744+#define RK3588_VP3_POST_DSP_VACT_INFO			0xF38
34745+#define RK3588_VP3_POST_SCL_FACTOR_YRGB			0xF3C
34746+#define RK3588_VP3_POST_SCL_CTRL			0xF40
34747+#define RK3588_VP3_DSP_HACT_INFO			0xF34
34748+#define RK3588_VP3_DSP_VACT_INFO			0xF38
34749+#define RK3588_VP3_POST_DSP_VACT_INFO_F1		0xF44
34750+#define RK3588_VP3_DSP_HTOTAL_HS_END			0xF48
34751+#define RK3588_VP3_DSP_HACT_ST_END			0xF4C
34752+#define RK3588_VP3_DSP_VTOTAL_VS_END			0xF50
34753+#define RK3588_VP3_DSP_VACT_ST_END			0xF54
34754+#define RK3588_VP3_DSP_VS_ST_END_F1			0xF58
34755+#define RK3588_VP3_DSP_VACT_ST_END_F1			0xF5C
34756+#define RK3588_VP3_BCSH_CTRL				0xF60
34757+#define RK3588_VP3_BCSH_BCS				0xF64
34758+#define RK3588_VP3_BCSH_H				0xF68
34759+#define RK3588_VP3_BCSH_COLOR_BAR			0xF6C
34760+
34761+/* Overlay registers definition    */
34762+#define RK3568_OVL_CTRL				0x600
34763+#define RK3568_OVL_LAYER_SEL			0x604
34764+#define RK3568_OVL_PORT_SEL			0x608
34765+#define RK3568_CLUSTER0_MIX_SRC_COLOR_CTRL	0x610
34766+#define RK3568_CLUSTER0_MIX_DST_COLOR_CTRL	0x614
34767+#define RK3568_CLUSTER0_MIX_SRC_ALPHA_CTRL	0x618
34768+#define RK3568_CLUSTER0_MIX_DST_ALPHA_CTRL	0x61C
34769+#define RK3568_MIX0_SRC_COLOR_CTRL		0x650
34770+#define RK3568_MIX0_DST_COLOR_CTRL		0x654
34771+#define RK3568_MIX0_SRC_ALPHA_CTRL		0x658
34772+#define RK3568_MIX0_DST_ALPHA_CTRL		0x65C
34773+#define RK3568_HDR0_SRC_COLOR_CTRL		0x6C0
34774+#define RK3568_HDR0_DST_COLOR_CTRL		0x6C4
34775+#define RK3568_HDR0_SRC_ALPHA_CTRL		0x6C8
34776+#define RK3568_HDR0_DST_ALPHA_CTRL		0x6CC
34777+#define RK3568_HDR1_SRC_COLOR_CTRL		0x6D0
34778+#define RK3568_HDR1_DST_COLOR_CTRL		0x6D4
34779+#define RK3568_HDR1_SRC_ALPHA_CTRL		0x6D8
34780+#define RK3568_HDR1_DST_ALPHA_CTRL		0x6DC
34781+#define RK3568_VP0_BG_MIX_CTRL			0x6E0
34782+#define RK3568_VP1_BG_MIX_CTRL			0x6E4
34783+#define RK3568_VP2_BG_MIX_CTRL			0x6E8
34784+#define RK3588_VP3_BG_MIX_CTRL			0x6EC
34785+#define RK3568_CLUSTER_DLY_NUM			0x6F0
34786+#define RK3568_CLUSTER_DLY_NUM1			0x6F4
34787+#define RK3568_SMART_DLY_NUM			0x6F8
34788+
34789+/* Cluster0 register definition */
34790+#define RK3568_CLUSTER0_WIN0_CTRL0		0x1000
34791+#define RK3568_CLUSTER0_WIN0_CTRL1		0x1004
34792+#define RK3568_CLUSTER0_WIN0_CTRL2		0x1008
34793+#define RK3568_CLUSTER0_WIN0_YRGB_MST		0x1010
34794+#define RK3568_CLUSTER0_WIN0_CBR_MST		0x1014
34795+#define RK3568_CLUSTER0_WIN0_VIR		0x1018
34796+#define RK3568_CLUSTER0_WIN0_ACT_INFO		0x1020
34797+#define RK3568_CLUSTER0_WIN0_DSP_INFO		0x1024
34798+#define RK3568_CLUSTER0_WIN0_DSP_ST		0x1028
34799+#define RK3568_CLUSTER0_WIN0_SCL_FACTOR_YRGB	0x1030
34800+#define RK3568_CLUSTER0_WIN0_AFBCD_TRANSFORM_OFFSET	0x103C
34801+#define RK3568_CLUSTER0_WIN0_AFBCD_OUTPUT_CTRL	0x1050
34802+#define RK3568_CLUSTER0_WIN0_AFBCD_ROTATE_MODE	0x1054
34803+#define RK3568_CLUSTER0_WIN0_AFBCD_HDR_PTR	0x1058
34804+#define RK3568_CLUSTER0_WIN0_AFBCD_VIR_WIDTH	0x105C
34805+#define RK3568_CLUSTER0_WIN0_AFBCD_PIC_SIZE	0x1060
34806+#define RK3568_CLUSTER0_WIN0_AFBCD_PIC_OFFSET	0x1064
34807+#define RK3568_CLUSTER0_WIN0_AFBCD_DSP_OFFSET	0x1068
34808+#define RK3568_CLUSTER0_WIN0_AFBCD_CTRL		0x106C
34809+
34810+#define RK3568_CLUSTER0_WIN1_CTRL0		0x1080
34811+#define RK3568_CLUSTER0_WIN1_CTRL1		0x1084
34812+#define RK3568_CLUSTER0_WIN1_YRGB_MST		0x1090
34813+#define RK3568_CLUSTER0_WIN1_CBR_MST		0x1094
34814+#define RK3568_CLUSTER0_WIN1_VIR		0x1098
34815+#define RK3568_CLUSTER0_WIN1_ACT_INFO		0x10A0
34816+#define RK3568_CLUSTER0_WIN1_DSP_INFO		0x10A4
34817+#define RK3568_CLUSTER0_WIN1_DSP_ST		0x10A8
34818+#define RK3568_CLUSTER0_WIN1_SCL_FACTOR_YRGB	0x10B0
34819+#define RK3568_CLUSTER0_WIN1_AFBCD_OUTPUT_CTRL	0x10D0
34820+#define RK3568_CLUSTER0_WIN1_AFBCD_ROTATE_MODE	0x10D4
34821+#define RK3568_CLUSTER0_WIN1_AFBCD_HDR_PTR	0x10D8
34822+#define RK3568_CLUSTER0_WIN1_AFBCD_VIR_WIDTH	0x10DC
34823+#define RK3568_CLUSTER0_WIN1_AFBCD_PIC_SIZE	0x10E0
34824+#define RK3568_CLUSTER0_WIN1_AFBCD_PIC_OFFSET	0x10E4
34825+#define RK3568_CLUSTER0_WIN1_AFBCD_DSP_OFFSET	0x10E8
34826+#define RK3568_CLUSTER0_WIN1_AFBCD_CTRL		0x10EC
34827+
34828+#define RK3568_CLUSTER0_CTRL			0x1100
34829+
34830+#define RK3568_CLUSTER1_WIN0_CTRL0		0x1200
34831+#define RK3568_CLUSTER1_WIN0_CTRL1		0x1204
34832+#define RK3568_CLUSTER1_WIN0_CTRL2		0x1208
34833+#define RK3568_CLUSTER1_WIN0_YRGB_MST		0x1210
34834+#define RK3568_CLUSTER1_WIN0_CBR_MST		0x1214
34835+#define RK3568_CLUSTER1_WIN0_VIR		0x1218
34836+#define RK3568_CLUSTER1_WIN0_ACT_INFO		0x1220
34837+#define RK3568_CLUSTER1_WIN0_DSP_INFO		0x1224
34838+#define RK3568_CLUSTER1_WIN0_DSP_ST		0x1228
34839+#define RK3568_CLUSTER1_WIN0_SCL_FACTOR_YRGB	0x1230
34840+#define RK3568_CLUSTER1_WIN0_AFBCD_TRANSFORM_OFFSET	0x123C
34841+#define RK3568_CLUSTER1_WIN0_AFBCD_OUTPUT_CTRL	0x1250
34842+#define RK3568_CLUSTER1_WIN0_AFBCD_ROTATE_MODE	0x1254
34843+#define RK3568_CLUSTER1_WIN0_AFBCD_HDR_PTR	0x1258
34844+#define RK3568_CLUSTER1_WIN0_AFBCD_VIR_WIDTH	0x125C
34845+#define RK3568_CLUSTER1_WIN0_AFBCD_PIC_SIZE	0x1260
34846+#define RK3568_CLUSTER1_WIN0_AFBCD_PIC_OFFSET	0x1264
34847+#define RK3568_CLUSTER1_WIN0_AFBCD_DSP_OFFSET	0x1268
34848+#define RK3568_CLUSTER1_WIN0_AFBCD_CTRL		0x126C
34849+
34850+#define RK3568_CLUSTER1_WIN1_CTRL0		0x1280
34851+#define RK3568_CLUSTER1_WIN1_CTRL1		0x1284
34852+#define RK3568_CLUSTER1_WIN1_YRGB_MST		0x1290
34853+#define RK3568_CLUSTER1_WIN1_CBR_MST		0x1294
34854+#define RK3568_CLUSTER1_WIN1_VIR		0x1298
34855+#define RK3568_CLUSTER1_WIN1_ACT_INFO		0x12A0
34856+#define RK3568_CLUSTER1_WIN1_DSP_INFO		0x12A4
34857+#define RK3568_CLUSTER1_WIN1_DSP_ST		0x12A8
34858+#define RK3568_CLUSTER1_WIN1_SCL_FACTOR_YRGB	0x12B0
34859+#define RK3568_CLUSTER1_WIN1_AFBCD_OUTPUT_CTRL	0x12D0
34860+#define RK3568_CLUSTER1_WIN1_AFBCD_ROTATE_MODE	0x12D4
34861+#define RK3568_CLUSTER1_WIN1_AFBCD_HDR_PTR	0x12D8
34862+#define RK3568_CLUSTER1_WIN1_AFBCD_VIR_WIDTH	0x12DC
34863+#define RK3568_CLUSTER1_WIN1_AFBCD_PIC_SIZE	0x12E0
34864+#define RK3568_CLUSTER1_WIN1_AFBCD_PIC_OFFSET	0x12E4
34865+#define RK3568_CLUSTER1_WIN1_AFBCD_DSP_OFFSET	0x12E8
34866+#define RK3568_CLUSTER1_WIN1_AFBCD_CTRL		0x12EC
34867+
34868+#define RK3568_CLUSTER1_CTRL			0x1300
34869+
34870+#define RK3588_CLUSTER2_WIN0_CTRL0		0x1400
34871+#define RK3588_CLUSTER2_WIN0_CTRL1		0x1404
34872+#define RK3588_CLUSTER2_WIN0_CTRL2		0x1408
34873+#define RK3588_CLUSTER2_WIN0_YRGB_MST		0x1410
34874+#define RK3588_CLUSTER2_WIN0_CBR_MST		0x1414
34875+#define RK3588_CLUSTER2_WIN0_VIR		0x1418
34876+#define RK3588_CLUSTER2_WIN0_ACT_INFO		0x1420
34877+#define RK3588_CLUSTER2_WIN0_DSP_INFO		0x1424
34878+#define RK3588_CLUSTER2_WIN0_DSP_ST		0x1428
34879+#define RK3588_CLUSTER2_WIN0_SCL_FACTOR_YRGB	0x1430
34880+#define RK3588_CLUSTER2_WIN0_AFBCD_TRANSFORM_OFFSET	0x143C
34881+#define RK3588_CLUSTER2_WIN0_AFBCD_OUTPUT_CTRL	0x1450
34882+#define RK3588_CLUSTER2_WIN0_AFBCD_ROTATE_MODE	0x1454
34883+#define RK3588_CLUSTER2_WIN0_AFBCD_HDR_PTR	0x1458
34884+#define RK3588_CLUSTER2_WIN0_AFBCD_VIR_WIDTH	0x145C
34885+#define RK3588_CLUSTER2_WIN0_AFBCD_PIC_SIZE	0x1460
34886+#define RK3588_CLUSTER2_WIN0_AFBCD_PIC_OFFSET	0x1464
34887+#define RK3588_CLUSTER2_WIN0_AFBCD_DSP_OFFSET	0x1468
34888+#define RK3588_CLUSTER2_WIN0_AFBCD_CTRL		0x146C
34889+
34890+#define RK3588_CLUSTER2_WIN1_CTRL0		0x1480
34891+#define RK3588_CLUSTER2_WIN1_CTRL1		0x1484
34892+#define RK3588_CLUSTER2_WIN1_YRGB_MST		0x1490
34893+#define RK3588_CLUSTER2_WIN1_CBR_MST		0x1494
34894+#define RK3588_CLUSTER2_WIN1_VIR		0x1498
34895+#define RK3588_CLUSTER2_WIN1_ACT_INFO		0x14A0
34896+#define RK3588_CLUSTER2_WIN1_DSP_INFO		0x14A4
34897+#define RK3588_CLUSTER2_WIN1_DSP_ST		0x14A8
34898+#define RK3588_CLUSTER2_WIN1_SCL_FACTOR_YRGB	0x14B0
34899+#define RK3588_CLUSTER2_WIN1_AFBCD_OUTPUT_CTRL	0x14D0
34900+#define RK3588_CLUSTER2_WIN1_AFBCD_ROTATE_MODE	0x14D4
34901+#define RK3588_CLUSTER2_WIN1_AFBCD_HDR_PTR	0x14D8
34902+#define RK3588_CLUSTER2_WIN1_AFBCD_VIR_WIDTH	0x14DC
34903+#define RK3588_CLUSTER2_WIN1_AFBCD_PIC_SIZE	0x14E0
34904+#define RK3588_CLUSTER2_WIN1_AFBCD_PIC_OFFSET	0x14E4
34905+#define RK3588_CLUSTER2_WIN1_AFBCD_DSP_OFFSET	0x14E8
34906+#define RK3588_CLUSTER2_WIN1_AFBCD_CTRL		0x14EC
34907+
34908+#define RK3588_CLUSTER2_CTRL			0x1500
34909+
34910+#define RK3588_CLUSTER3_WIN0_CTRL0		0x1600
34911+#define RK3588_CLUSTER3_WIN0_CTRL1		0x1604
34912+#define RK3588_CLUSTER3_WIN0_CTRL2		0x1608
34913+#define RK3588_CLUSTER3_WIN0_YRGB_MST		0x1610
34914+#define RK3588_CLUSTER3_WIN0_CBR_MST		0x1614
34915+#define RK3588_CLUSTER3_WIN0_VIR		0x1618
34916+#define RK3588_CLUSTER3_WIN0_ACT_INFO		0x1620
34917+#define RK3588_CLUSTER3_WIN0_DSP_INFO		0x1624
34918+#define RK3588_CLUSTER3_WIN0_DSP_ST		0x1628
34919+#define RK3588_CLUSTER3_WIN0_SCL_FACTOR_YRGB	0x1630
34920+#define RK3588_CLUSTER3_WIN0_AFBCD_TRANSFORM_OFFSET	0x163C
34921+#define RK3588_CLUSTER3_WIN0_AFBCD_OUTPUT_CTRL	0x1650
34922+#define RK3588_CLUSTER3_WIN0_AFBCD_ROTATE_MODE	0x1654
34923+#define RK3588_CLUSTER3_WIN0_AFBCD_HDR_PTR	0x1658
34924+#define RK3588_CLUSTER3_WIN0_AFBCD_VIR_WIDTH	0x165C
34925+#define RK3588_CLUSTER3_WIN0_AFBCD_PIC_SIZE	0x1660
34926+#define RK3588_CLUSTER3_WIN0_AFBCD_PIC_OFFSET	0x1664
34927+#define RK3588_CLUSTER3_WIN0_AFBCD_DSP_OFFSET	0x1668
34928+#define RK3588_CLUSTER3_WIN0_AFBCD_CTRL		0x166C
34929+
34930+#define RK3588_CLUSTER3_WIN1_CTRL0		0x1680
34931+#define RK3588_CLUSTER3_WIN1_CTRL1		0x1684
34932+#define RK3588_CLUSTER3_WIN1_YRGB_MST		0x1690
34933+#define RK3588_CLUSTER3_WIN1_CBR_MST		0x1694
34934+#define RK3588_CLUSTER3_WIN1_VIR		0x1698
34935+#define RK3588_CLUSTER3_WIN1_ACT_INFO		0x16A0
34936+#define RK3588_CLUSTER3_WIN1_DSP_INFO		0x16A4
34937+#define RK3588_CLUSTER3_WIN1_DSP_ST		0x16A8
34938+#define RK3588_CLUSTER3_WIN1_SCL_FACTOR_YRGB	0x16B0
34939+#define RK3588_CLUSTER3_WIN1_AFBCD_OUTPUT_CTRL	0x16D0
34940+#define RK3588_CLUSTER3_WIN1_AFBCD_ROTATE_MODE	0x16D4
34941+#define RK3588_CLUSTER3_WIN1_AFBCD_HDR_PTR	0x16D8
34942+#define RK3588_CLUSTER3_WIN1_AFBCD_VIR_WIDTH	0x16DC
34943+#define RK3588_CLUSTER3_WIN1_AFBCD_PIC_SIZE	0x16E0
34944+#define RK3588_CLUSTER3_WIN1_AFBCD_PIC_OFFSET	0x16E4
34945+#define RK3588_CLUSTER3_WIN1_AFBCD_DSP_OFFSET	0x16E8
34946+#define RK3588_CLUSTER3_WIN1_AFBCD_CTRL		0x16EC
34947+
34948+#define RK3588_CLUSTER3_CTRL			0x1700
34949+
34950+/* Esmart register definition */
34951+#define RK3568_ESMART0_CTRL0			0x1800
34952+#define RK3568_ESMART0_CTRL1			0x1804
34953+#define RK3568_ESMART0_AXI_CTRL			0x1808
34954+#define RK3568_ESMART0_REGION0_CTRL		0x1810
34955+#define RK3568_ESMART0_REGION0_YRGB_MST		0x1814
34956+#define RK3568_ESMART0_REGION0_CBR_MST		0x1818
34957+#define RK3568_ESMART0_REGION0_VIR		0x181C
34958+#define RK3568_ESMART0_REGION0_ACT_INFO		0x1820
34959+#define RK3568_ESMART0_REGION0_DSP_INFO		0x1824
34960+#define RK3568_ESMART0_REGION0_DSP_ST		0x1828
34961+#define RK3568_ESMART0_REGION0_SCL_CTRL		0x1830
34962+#define RK3568_ESMART0_REGION0_SCL_FACTOR_YRGB	0x1834
34963+#define RK3568_ESMART0_REGION0_SCL_FACTOR_CBR	0x1838
34964+#define RK3568_ESMART0_REGION0_SCL_OFFSET	0x183C
34965+#define RK3568_ESMART0_REGION1_CTRL		0x1840
34966+#define RK3568_ESMART0_REGION1_YRGB_MST		0x1844
34967+#define RK3568_ESMART0_REGION1_CBR_MST		0x1848
34968+#define RK3568_ESMART0_REGION1_VIR		0x184C
34969+#define RK3568_ESMART0_REGION1_ACT_INFO		0x1850
34970+#define RK3568_ESMART0_REGION1_DSP_INFO		0x1854
34971+#define RK3568_ESMART0_REGION1_DSP_ST		0x1858
34972+#define RK3568_ESMART0_REGION1_SCL_CTRL		0x1860
34973+#define RK3568_ESMART0_REGION1_SCL_FACTOR_YRGB	0x1864
34974+#define RK3568_ESMART0_REGION1_SCL_FACTOR_CBR	0x1868
34975+#define RK3568_ESMART0_REGION1_SCL_OFFSET	0x186C
34976+#define RK3568_ESMART0_REGION2_CTRL		0x1870
34977+#define RK3568_ESMART0_REGION2_YRGB_MST		0x1874
34978+#define RK3568_ESMART0_REGION2_CBR_MST		0x1878
34979+#define RK3568_ESMART0_REGION2_VIR		0x187C
34980+#define RK3568_ESMART0_REGION2_ACT_INFO		0x1880
34981+#define RK3568_ESMART0_REGION2_DSP_INFO		0x1884
34982+#define RK3568_ESMART0_REGION2_DSP_ST		0x1888
34983+#define RK3568_ESMART0_REGION2_SCL_CTRL		0x1890
34984+#define RK3568_ESMART0_REGION2_SCL_FACTOR_YRGB	0x1894
34985+#define RK3568_ESMART0_REGION2_SCL_FACTOR_CBR	0x1898
34986+#define RK3568_ESMART0_REGION2_SCL_OFFSET	0x189C
34987+#define RK3568_ESMART0_REGION3_CTRL		0x18A0
34988+#define RK3568_ESMART0_REGION3_YRGB_MST		0x18A4
34989+#define RK3568_ESMART0_REGION3_CBR_MST		0x18A8
34990+#define RK3568_ESMART0_REGION3_VIR		0x18AC
34991+#define RK3568_ESMART0_REGION3_ACT_INFO		0x18B0
34992+#define RK3568_ESMART0_REGION3_DSP_INFO		0x18B4
34993+#define RK3568_ESMART0_REGION3_DSP_ST		0x18B8
34994+#define RK3568_ESMART0_REGION3_SCL_CTRL		0x18C0
34995+#define RK3568_ESMART0_REGION3_SCL_FACTOR_YRGB	0x18C4
34996+#define RK3568_ESMART0_REGION3_SCL_FACTOR_CBR	0x18C8
34997+#define RK3568_ESMART0_REGION3_SCL_OFFSET	0x18CC
34998+#define RK3568_ESMART0_COLOR_KEY_CTRL		0x18D0
34999+
35000+#define RK3568_ESMART1_CTRL0			0x1A00
35001+#define RK3568_ESMART1_CTRL1			0x1A04
35002+#define RK3568_ESMART1_REGION0_CTRL		0x1A10
35003+#define RK3568_ESMART1_REGION0_YRGB_MST		0x1A14
35004+#define RK3568_ESMART1_REGION0_CBR_MST		0x1A18
35005+#define RK3568_ESMART1_REGION0_VIR		0x1A1C
35006+#define RK3568_ESMART1_REGION0_ACT_INFO		0x1A20
35007+#define RK3568_ESMART1_REGION0_DSP_INFO		0x1A24
35008+#define RK3568_ESMART1_REGION0_DSP_ST		0x1A28
35009+#define RK3568_ESMART1_REGION0_SCL_CTRL		0x1A30
35010+#define RK3568_ESMART1_REGION0_SCL_FACTOR_YRGB	0x1A34
35011+#define RK3568_ESMART1_REGION0_SCL_FACTOR_CBR	0x1A38
35012+#define RK3568_ESMART1_REGION0_SCL_OFFSET	0x1A3C
35013+#define RK3568_ESMART1_REGION1_CTRL		0x1A40
35014+#define RK3568_ESMART1_REGION1_YRGB_MST		0x1A44
35015+#define RK3568_ESMART1_REGION1_CBR_MST		0x1A48
35016+#define RK3568_ESMART1_REGION1_VIR		0x1A4C
35017+#define RK3568_ESMART1_REGION1_ACT_INFO		0x1A50
35018+#define RK3568_ESMART1_REGION1_DSP_INFO		0x1A54
35019+#define RK3568_ESMART1_REGION1_DSP_ST		0x1A58
35020+#define RK3568_ESMART1_REGION1_SCL_CTRL		0x1A60
35021+#define RK3568_ESMART1_REGION1_SCL_FACTOR_YRGB	0x1A64
35022+#define RK3568_ESMART1_REGION1_SCL_FACTOR_CBR	0x1A68
35023+#define RK3568_ESMART1_REGION1_SCL_OFFSET	0x1A6C
35024+#define RK3568_ESMART1_REGION2_CTRL		0x1A70
35025+#define RK3568_ESMART1_REGION2_YRGB_MST		0x1A74
35026+#define RK3568_ESMART1_REGION2_CBR_MST		0x1A78
35027+#define RK3568_ESMART1_REGION2_VIR		0x1A7C
35028+#define RK3568_ESMART1_REGION2_ACT_INFO		0x1A80
35029+#define RK3568_ESMART1_REGION2_DSP_INFO		0x1A84
35030+#define RK3568_ESMART1_REGION2_DSP_ST		0x1A88
35031+#define RK3568_ESMART1_REGION2_SCL_CTRL		0x1A90
35032+#define RK3568_ESMART1_REGION2_SCL_FACTOR_YRGB	0x1A94
35033+#define RK3568_ESMART1_REGION2_SCL_FACTOR_CBR	0x1A98
35034+#define RK3568_ESMART1_REGION2_SCL_OFFSET	0x1A9C
35035+#define RK3568_ESMART1_REGION3_CTRL		0x1AA0
35036+#define RK3568_ESMART1_REGION3_YRGB_MST		0x1AA4
35037+#define RK3568_ESMART1_REGION3_CBR_MST		0x1AA8
35038+#define RK3568_ESMART1_REGION3_VIR		0x1AAC
35039+#define RK3568_ESMART1_REGION3_ACT_INFO		0x1AB0
35040+#define RK3568_ESMART1_REGION3_DSP_INFO		0x1AB4
35041+#define RK3568_ESMART1_REGION3_DSP_ST		0x1AB8
35042+#define RK3568_ESMART1_REGION3_SCL_CTRL		0x1AC0
35043+#define RK3568_ESMART1_REGION3_SCL_FACTOR_YRGB	0x1AC4
35044+#define RK3568_ESMART1_REGION3_SCL_FACTOR_CBR	0x1AC8
35045+#define RK3568_ESMART1_REGION3_SCL_OFFSET	0x1ACC
35046+
35047+#define RK3568_SMART0_CTRL0			0x1C00
35048+#define RK3568_SMART0_CTRL1			0x1C04
35049+#define RK3568_SMART0_REGION0_CTRL		0x1C10
35050+#define RK3568_SMART0_REGION0_YRGB_MST		0x1C14
35051+#define RK3568_SMART0_REGION0_CBR_MST		0x1C18
35052+#define RK3568_SMART0_REGION0_VIR		0x1C1C
35053+#define RK3568_SMART0_REGION0_ACT_INFO		0x1C20
35054+#define RK3568_SMART0_REGION0_DSP_INFO		0x1C24
35055+#define RK3568_SMART0_REGION0_DSP_ST		0x1C28
35056+#define RK3568_SMART0_REGION0_SCL_CTRL		0x1C30
35057+#define RK3568_SMART0_REGION0_SCL_FACTOR_YRGB	0x1C34
35058+#define RK3568_SMART0_REGION0_SCL_FACTOR_CBR	0x1C38
35059+#define RK3568_SMART0_REGION0_SCL_OFFSET	0x1C3C
35060+#define RK3568_SMART0_REGION1_CTRL		0x1C40
35061+#define RK3568_SMART0_REGION1_YRGB_MST		0x1C44
35062+#define RK3568_SMART0_REGION1_CBR_MST		0x1C48
35063+#define RK3568_SMART0_REGION1_VIR		0x1C4C
35064+#define RK3568_SMART0_REGION1_ACT_INFO		0x1C50
35065+#define RK3568_SMART0_REGION1_DSP_INFO		0x1C54
35066+#define RK3568_SMART0_REGION1_DSP_ST		0x1C58
35067+#define RK3568_SMART0_REGION1_SCL_CTRL		0x1C60
35068+#define RK3568_SMART0_REGION1_SCL_FACTOR_YRGB	0x1C64
35069+#define RK3568_SMART0_REGION1_SCL_FACTOR_CBR	0x1C68
35070+#define RK3568_SMART0_REGION1_SCL_OFFSET	0x1C6C
35071+#define RK3568_SMART0_REGION2_CTRL		0x1C70
35072+#define RK3568_SMART0_REGION2_YRGB_MST		0x1C74
35073+#define RK3568_SMART0_REGION2_CBR_MST		0x1C78
35074+#define RK3568_SMART0_REGION2_VIR		0x1C7C
35075+#define RK3568_SMART0_REGION2_ACT_INFO		0x1C80
35076+#define RK3568_SMART0_REGION2_DSP_INFO		0x1C84
35077+#define RK3568_SMART0_REGION2_DSP_ST		0x1C88
35078+#define RK3568_SMART0_REGION2_SCL_CTRL		0x1C90
35079+#define RK3568_SMART0_REGION2_SCL_FACTOR_YRGB	0x1C94
35080+#define RK3568_SMART0_REGION2_SCL_FACTOR_CBR	0x1C98
35081+#define RK3568_SMART0_REGION2_SCL_OFFSET	0x1C9C
35082+#define RK3568_SMART0_REGION3_CTRL		0x1CA0
35083+#define RK3568_SMART0_REGION3_YRGB_MST		0x1CA4
35084+#define RK3568_SMART0_REGION3_CBR_MST		0x1CA8
35085+#define RK3568_SMART0_REGION3_VIR		0x1CAC
35086+#define RK3568_SMART0_REGION3_ACT_INFO		0x1CB0
35087+#define RK3568_SMART0_REGION3_DSP_INFO		0x1CB4
35088+#define RK3568_SMART0_REGION3_DSP_ST		0x1CB8
35089+#define RK3568_SMART0_REGION3_SCL_CTRL		0x1CC0
35090+#define RK3568_SMART0_REGION3_SCL_FACTOR_YRGB	0x1CC4
35091+#define RK3568_SMART0_REGION3_SCL_FACTOR_CBR	0x1CC8
35092+#define RK3568_SMART0_REGION3_SCL_OFFSET	0x1CCC
35093+
35094+#define RK3568_SMART1_CTRL0			0x1E00
35095+#define RK3568_SMART1_CTRL1			0x1E04
35096+#define RK3568_SMART1_REGION0_CTRL		0x1E10
35097+#define RK3568_SMART1_REGION0_YRGB_MST		0x1E14
35098+#define RK3568_SMART1_REGION0_CBR_MST		0x1E18
35099+#define RK3568_SMART1_REGION0_VIR		0x1E1C
35100+#define RK3568_SMART1_REGION0_ACT_INFO		0x1E20
35101+#define RK3568_SMART1_REGION0_DSP_INFO		0x1E24
35102+#define RK3568_SMART1_REGION0_DSP_ST		0x1E28
35103+#define RK3568_SMART1_REGION0_SCL_CTRL		0x1E30
35104+#define RK3568_SMART1_REGION0_SCL_FACTOR_YRGB	0x1E34
35105+#define RK3568_SMART1_REGION0_SCL_FACTOR_CBR	0x1E38
35106+#define RK3568_SMART1_REGION0_SCL_OFFSET	0x1E3C
35107+#define RK3568_SMART1_REGION1_CTRL		0x1E40
35108+#define RK3568_SMART1_REGION1_YRGB_MST		0x1E44
35109+#define RK3568_SMART1_REGION1_CBR_MST		0x1E48
35110+#define RK3568_SMART1_REGION1_VIR		0x1E4C
35111+#define RK3568_SMART1_REGION1_ACT_INFO		0x1E50
35112+#define RK3568_SMART1_REGION1_DSP_INFO		0x1E54
35113+#define RK3568_SMART1_REGION1_DSP_ST		0x1E58
35114+#define RK3568_SMART1_REGION1_SCL_CTRL		0x1E60
35115+#define RK3568_SMART1_REGION1_SCL_FACTOR_YRGB	0x1E64
35116+#define RK3568_SMART1_REGION1_SCL_FACTOR_CBR	0x1E68
35117+#define RK3568_SMART1_REGION1_SCL_OFFSET	0x1E6C
35118+#define RK3568_SMART1_REGION2_CTRL		0x1E70
35119+#define RK3568_SMART1_REGION2_YRGB_MST		0x1E74
35120+#define RK3568_SMART1_REGION2_CBR_MST		0x1E78
35121+#define RK3568_SMART1_REGION2_VIR		0x1E7C
35122+#define RK3568_SMART1_REGION2_ACT_INFO		0x1E80
35123+#define RK3568_SMART1_REGION2_DSP_INFO		0x1E84
35124+#define RK3568_SMART1_REGION2_DSP_ST		0x1E88
35125+#define RK3568_SMART1_REGION2_SCL_CTRL		0x1E90
35126+#define RK3568_SMART1_REGION2_SCL_FACTOR_YRGB	0x1E94
35127+#define RK3568_SMART1_REGION2_SCL_FACTOR_CBR	0x1E98
35128+#define RK3568_SMART1_REGION2_SCL_OFFSET	0x1E9C
35129+#define RK3568_SMART1_REGION3_CTRL		0x1EA0
35130+#define RK3568_SMART1_REGION3_YRGB_MST		0x1EA4
35131+#define RK3568_SMART1_REGION3_CBR_MST		0x1EA8
35132+#define RK3568_SMART1_REGION3_VIR		0x1EAC
35133+#define RK3568_SMART1_REGION3_ACT_INFO		0x1EB0
35134+#define RK3568_SMART1_REGION3_DSP_INFO		0x1EB4
35135+#define RK3568_SMART1_REGION3_DSP_ST		0x1EB8
35136+#define RK3568_SMART1_REGION3_SCL_CTRL		0x1EC0
35137+#define RK3568_SMART1_REGION3_SCL_FACTOR_YRGB	0x1EC4
35138+#define RK3568_SMART1_REGION3_SCL_FACTOR_CBR	0x1EC8
35139+#define RK3568_SMART1_REGION3_SCL_OFFSET	0x1ECC
35140+
35141+/* HDR register definition */
35142+#define RK3568_HDR_LUT_CTRL				0x2000
35143+#define RK3568_HDR_LUT_MST				0x2004
35144+#define RK3568_SDR2HDR_CTRL				0x2010
35145+/* for HDR10 controller1 */
35146+#define RK3568_SDR2HDR_CTRL1				0x2018
35147+#define RK3568_HDR2SDR_CTRL1				0x201C
35148+#define RK3568_HDR2SDR_CTRL				0x2020
35149+#define RK3568_HDR2SDR_SRC_RANGE			0x2024
35150+#define RK3568_HDR2SDR_NORMFACEETF			0x2028
35151+#define RK3568_HDR2SDR_DST_RANGE			0x202C
35152+#define RK3568_HDR2SDR_NORMFACCGAMMA			0x2030
35153+#define RK3568_HDR_EETF_OETF_Y0				0x203C
35154+#define RK3568_HDR_SAT_Y0				0x20C0
35155+#define RK3568_HDR_EOTF_OETF_Y0				0x20F0
35156+#define RK3568_HDR_OETF_DX_POW1				0x2200
35157+#define RK3568_HDR_OETF_XN1				0x2300
35158+
35159+/* DSC register definition */
35160+#define RK3588_DSC_8K_PPS0_3				0x4000
35161+#define RK3588_DSC_8K_CTRL0				0x40A0
35162+#define RK3588_DSC_8K_CTRL1				0x40A4
35163+#define RK3588_DSC_8K_STS0				0x40A8
35164+#define RK3588_DSC_8K_ERS				0x40C4
35165+
35166+#define RK3588_DSC_4K_PPS0_3				0x4100
35167+#define RK3588_DSC_4K_CTRL0				0x41A0
35168+#define RK3588_DSC_4K_CTRL1				0x41A4
35169+#define RK3588_DSC_4K_STS0				0x41A8
35170+#define RK3588_DSC_4K_ERS				0x41C4
35171+
35172+#define RK3588_GRF_SOC_CON1				0x0304
35173+#define RK3588_GRF_VOP_CON2				0x08
35174+#define RK3588_GRF_VO1_CON0				0x00
35175+
35176+
35177+#define RK3588_PMU_PWR_GATE_CON1			0x150
35178+#define RK3588_PMU_SUBMEM_PWR_GATE_CON1			0x1B4
35179+#define RK3588_PMU_SUBMEM_PWR_GATE_CON2			0x1B8
35180+#define RK3588_PMU_SUBMEM_PWR_GATE_STATUS		0x1BC
35181+#define RK3588_PMU_BISR_CON3				0x20C
35182+#define RK3588_PMU_BISR_STATUS5				0x294
35183 
35184 #endif /* _ROCKCHIP_VOP_REG_H */
35185diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
35186index 13c14eb17..17766c207 100644
35187--- a/drivers/i2c/busses/i2c-rk3x.c
35188+++ b/drivers/i2c/busses/i2c-rk3x.c
35189@@ -6,6 +6,7 @@
35190  * based on the patches by Rockchip Inc.
35191  */
35192 
35193+#include <linux/acpi.h>
35194 #include <linux/kernel.h>
35195 #include <linux/module.h>
35196 #include <linux/i2c.h>
35197@@ -23,6 +24,9 @@
35198 #include <linux/mfd/syscon.h>
35199 #include <linux/regmap.h>
35200 #include <linux/math64.h>
35201+#include <linux/reboot.h>
35202+#include <linux/delay.h>
35203+#include <linux/soc/rockchip/rockchip_thunderboot_service.h>
35204 
35205 
35206 /* Register Map */
35207@@ -35,6 +39,7 @@
35208 #define REG_IEN        0x18 /* interrupt enable */
35209 #define REG_IPD        0x1c /* interrupt pending */
35210 #define REG_FCNT       0x20 /* finished count */
35211+#define REG_CON1       0x228 /* control register1 */
35212 
35213 /* Data buffer offsets */
35214 #define TXBUFFER_BASE 0x100
35215@@ -62,6 +67,15 @@ enum {
35216 #define REG_CON_STA_CFG(cfg) ((cfg) << 12)
35217 #define REG_CON_STO_CFG(cfg) ((cfg) << 14)
35218 
35219+enum {
35220+	RK_I2C_VERSION0 = 0,
35221+	RK_I2C_VERSION1,
35222+	RK_I2C_VERSION5 = 5,
35223+};
35224+
35225+#define REG_CON_VERSION GENMASK_ULL(24, 16)
35226+#define REG_CON_VERSION_SHIFT 16
35227+
35228 /* REG_MRXADDR bits */
35229 #define REG_MRXADDR_VALID(x) BIT(24 + (x)) /* [x*8+7:x*8] of MRX[R]ADDR valid */
35230 
35231@@ -73,14 +87,21 @@ enum {
35232 #define REG_INT_START     BIT(4) /* START condition generated */
35233 #define REG_INT_STOP      BIT(5) /* STOP condition generated */
35234 #define REG_INT_NAKRCV    BIT(6) /* NACK received */
35235-#define REG_INT_ALL       0x7f
35236+#define REG_INT_ALL       0xff
35237+
35238+/* Disable i2c all irqs */
35239+#define IEN_ALL_DISABLE   0
35240+
35241+#define REG_CON1_AUTO_STOP BIT(0)
35242+#define REG_CON1_TRANSFER_AUTO_STOP BIT(1)
35243+#define REG_CON1_NACK_AUTO_STOP BIT(2)
35244 
35245 /* Constants */
35246 #define WAIT_TIMEOUT      1000 /* ms */
35247 #define DEFAULT_SCL_RATE  (100 * 1000) /* Hz */
35248 
35249 /**
35250- * struct i2c_spec_values - I2C specification values for various modes
35251+ * struct i2c_spec_values:
35252  * @min_hold_start_ns: min hold time (repeated) START condition
35253  * @min_low_ns: min LOW period of the SCL clock
35254  * @min_high_ns: min HIGH period of the SCL cloc
35255@@ -136,7 +157,7 @@ static const struct i2c_spec_values fast_mode_plus_spec = {
35256 };
35257 
35258 /**
35259- * struct rk3x_i2c_calced_timings - calculated V1 timings
35260+ * struct rk3x_i2c_calced_timings:
35261  * @div_low: Divider output for low
35262  * @div_high: Divider output for high
35263  * @tuning: Used to adjust setup/hold data time,
35264@@ -152,14 +173,13 @@ struct rk3x_i2c_calced_timings {
35265 
35266 enum rk3x_i2c_state {
35267 	STATE_IDLE,
35268-	STATE_START,
35269 	STATE_READ,
35270 	STATE_WRITE,
35271 	STATE_STOP
35272 };
35273 
35274 /**
35275- * struct rk3x_i2c_soc_data - SOC-specific data
35276+ * struct rk3x_i2c_soc_data:
35277  * @grf_offset: offset inside the grf regmap for setting the i2c type
35278  * @calc_timings: Callback function for i2c timing information calculated
35279  */
35280@@ -189,6 +209,9 @@ struct rk3x_i2c_soc_data {
35281  * @state: state of i2c transfer
35282  * @processed: byte length which has been send or received
35283  * @error: error code for i2c transfer
35284+ * @i2c_restart_nb: make sure the i2c transfer to be finished
35285+ * @system_restarting: true if system is restarting
35286+ * @tb_cl: client for rockchip thunder boot service
35287  */
35288 struct rk3x_i2c {
35289 	struct i2c_adapter adap;
35290@@ -200,6 +223,7 @@ struct rk3x_i2c {
35291 	struct clk *clk;
35292 	struct clk *pclk;
35293 	struct notifier_block clk_rate_nb;
35294+	bool autostop_supported;
35295 
35296 	/* Settings */
35297 	struct i2c_timings t;
35298@@ -219,8 +243,22 @@ struct rk3x_i2c {
35299 	enum rk3x_i2c_state state;
35300 	unsigned int processed;
35301 	int error;
35302+	unsigned int suspended:1;
35303+
35304+	struct notifier_block i2c_restart_nb;
35305+	bool system_restarting;
35306+	struct rk_tb_client tb_cl;
35307 };
35308 
35309+static void rk3x_i2c_prepare_read(struct rk3x_i2c *i2c);
35310+static int rk3x_i2c_fill_transmit_buf(struct rk3x_i2c *i2c, bool sended);
35311+
35312+static inline void rk3x_i2c_wake_up(struct rk3x_i2c *i2c)
35313+{
35314+	if (!i2c->system_restarting)
35315+		wake_up(&i2c->wait);
35316+}
35317+
35318 static inline void i2c_writel(struct rk3x_i2c *i2c, u32 value,
35319 			      unsigned int offset)
35320 {
35321@@ -238,15 +276,75 @@ static inline void rk3x_i2c_clean_ipd(struct rk3x_i2c *i2c)
35322 	i2c_writel(i2c, REG_INT_ALL, REG_IPD);
35323 }
35324 
35325+static inline void rk3x_i2c_disable_irq(struct rk3x_i2c *i2c)
35326+{
35327+	i2c_writel(i2c, IEN_ALL_DISABLE, REG_IEN);
35328+}
35329+
35330+static inline void rk3x_i2c_disable(struct rk3x_i2c *i2c)
35331+{
35332+	u32 val = i2c_readl(i2c, REG_CON) & REG_CON_TUNING_MASK;
35333+
35334+	i2c_writel(i2c, val, REG_CON);
35335+}
35336+
35337+static bool rk3x_i2c_auto_stop(struct rk3x_i2c *i2c)
35338+{
35339+	unsigned int len, con1 = 0;
35340+
35341+	if (!i2c->autostop_supported)
35342+		return false;
35343+
35344+	if (!(i2c->msg->flags & I2C_M_IGNORE_NAK))
35345+		con1 = REG_CON1_NACK_AUTO_STOP | REG_CON1_AUTO_STOP;
35346+
35347+	if (!i2c->is_last_msg)
35348+		goto out;
35349+
35350+	len = i2c->msg->len - i2c->processed;
35351+
35352+	if (len > 32)
35353+		goto out;
35354+
35355+	i2c->state = STATE_STOP;
35356+
35357+	con1 |= REG_CON1_TRANSFER_AUTO_STOP | REG_CON1_AUTO_STOP;
35358+	i2c_writel(i2c, con1, REG_CON1);
35359+	if (con1 & REG_CON1_NACK_AUTO_STOP)
35360+		i2c_writel(i2c, REG_INT_STOP, REG_IEN);
35361+	else
35362+		i2c_writel(i2c, REG_INT_STOP | REG_INT_NAKRCV, REG_IEN);
35363+
35364+	return true;
35365+
35366+out:
35367+	i2c_writel(i2c, con1, REG_CON1);
35368+	return false;
35369+}
35370+
35371 /**
35372- * rk3x_i2c_start - Generate a START condition, which triggers a REG_INT_START interrupt.
35373- * @i2c: target controller data
35374+ * Generate a START condition, which triggers a REG_INT_START interrupt.
35375  */
35376 static void rk3x_i2c_start(struct rk3x_i2c *i2c)
35377 {
35378 	u32 val = i2c_readl(i2c, REG_CON) & REG_CON_TUNING_MASK;
35379+	bool auto_stop = rk3x_i2c_auto_stop(i2c);
35380+	int length = 0;
35381 
35382-	i2c_writel(i2c, REG_INT_START, REG_IEN);
35383+	/* enable appropriate interrupts */
35384+	if (i2c->mode == REG_CON_MOD_TX) {
35385+		if (!auto_stop) {
35386+			i2c_writel(i2c, REG_INT_MBTF | REG_INT_NAKRCV, REG_IEN);
35387+			i2c->state = STATE_WRITE;
35388+		}
35389+		length = rk3x_i2c_fill_transmit_buf(i2c, false);
35390+	} else {
35391+		/* in any other case, we are going to be reading. */
35392+		if (!auto_stop) {
35393+			i2c_writel(i2c, REG_INT_MBRF | REG_INT_NAKRCV, REG_IEN);
35394+			i2c->state = STATE_READ;
35395+		}
35396+	}
35397 
35398 	/* enable adapter with correct mode, send START condition */
35399 	val |= REG_CON_EN | REG_CON_MOD(i2c->mode) | REG_CON_START;
35400@@ -256,11 +354,17 @@ static void rk3x_i2c_start(struct rk3x_i2c *i2c)
35401 		val |= REG_CON_ACTACK;
35402 
35403 	i2c_writel(i2c, val, REG_CON);
35404+
35405+	/* enable transition */
35406+	if (i2c->mode == REG_CON_MOD_TX)
35407+		i2c_writel(i2c, length, REG_MTXCNT);
35408+	else
35409+		rk3x_i2c_prepare_read(i2c);
35410 }
35411 
35412 /**
35413- * rk3x_i2c_stop - Generate a STOP condition, which triggers a REG_INT_STOP interrupt.
35414- * @i2c: target controller data
35415+ * Generate a STOP condition, which triggers a REG_INT_STOP interrupt.
35416+ *
35417  * @error: Error code to return in rk3x_i2c_xfer
35418  */
35419 static void rk3x_i2c_stop(struct rk3x_i2c *i2c, int error)
35420@@ -279,6 +383,7 @@ static void rk3x_i2c_stop(struct rk3x_i2c *i2c, int error)
35421 
35422 		ctrl = i2c_readl(i2c, REG_CON);
35423 		ctrl |= REG_CON_STOP;
35424+		ctrl &= ~REG_CON_START;
35425 		i2c_writel(i2c, ctrl, REG_CON);
35426 	} else {
35427 		/* Signal rk3x_i2c_xfer to start the next message. */
35428@@ -294,13 +399,12 @@ static void rk3x_i2c_stop(struct rk3x_i2c *i2c, int error)
35429 		i2c_writel(i2c, ctrl, REG_CON);
35430 
35431 		/* signal that we are finished with the current msg */
35432-		wake_up(&i2c->wait);
35433+		rk3x_i2c_wake_up(i2c);
35434 	}
35435 }
35436 
35437 /**
35438- * rk3x_i2c_prepare_read - Setup a read according to i2c->msg
35439- * @i2c: target controller data
35440+ * Setup a read according to i2c->msg
35441  */
35442 static void rk3x_i2c_prepare_read(struct rk3x_i2c *i2c)
35443 {
35444@@ -324,6 +428,8 @@ static void rk3x_i2c_prepare_read(struct rk3x_i2c *i2c)
35445 	if (i2c->processed != 0) {
35446 		con &= ~REG_CON_MOD_MASK;
35447 		con |= REG_CON_MOD(REG_CON_MOD_RX);
35448+		if (con & REG_CON_START)
35449+			con &= ~REG_CON_START;
35450 	}
35451 
35452 	i2c_writel(i2c, con, REG_CON);
35453@@ -331,10 +437,9 @@ static void rk3x_i2c_prepare_read(struct rk3x_i2c *i2c)
35454 }
35455 
35456 /**
35457- * rk3x_i2c_fill_transmit_buf - Fill the transmit buffer with data from i2c->msg
35458- * @i2c: target controller data
35459+ * Fill the transmit buffer with data from i2c->msg
35460  */
35461-static void rk3x_i2c_fill_transmit_buf(struct rk3x_i2c *i2c)
35462+static int rk3x_i2c_fill_transmit_buf(struct rk3x_i2c *i2c, bool sendend)
35463 {
35464 	unsigned int i, j;
35465 	u32 cnt = 0;
35466@@ -362,45 +467,20 @@ static void rk3x_i2c_fill_transmit_buf(struct rk3x_i2c *i2c)
35467 			break;
35468 	}
35469 
35470-	i2c_writel(i2c, cnt, REG_MTXCNT);
35471+	if (sendend)
35472+		i2c_writel(i2c, cnt, REG_MTXCNT);
35473+
35474+	return cnt;
35475 }
35476 
35477 
35478 /* IRQ handlers for individual states */
35479 
35480-static void rk3x_i2c_handle_start(struct rk3x_i2c *i2c, unsigned int ipd)
35481-{
35482-	if (!(ipd & REG_INT_START)) {
35483-		rk3x_i2c_stop(i2c, -EIO);
35484-		dev_warn(i2c->dev, "unexpected irq in START: 0x%x\n", ipd);
35485-		rk3x_i2c_clean_ipd(i2c);
35486-		return;
35487-	}
35488-
35489-	/* ack interrupt */
35490-	i2c_writel(i2c, REG_INT_START, REG_IPD);
35491-
35492-	/* disable start bit */
35493-	i2c_writel(i2c, i2c_readl(i2c, REG_CON) & ~REG_CON_START, REG_CON);
35494-
35495-	/* enable appropriate interrupts and transition */
35496-	if (i2c->mode == REG_CON_MOD_TX) {
35497-		i2c_writel(i2c, REG_INT_MBTF | REG_INT_NAKRCV, REG_IEN);
35498-		i2c->state = STATE_WRITE;
35499-		rk3x_i2c_fill_transmit_buf(i2c);
35500-	} else {
35501-		/* in any other case, we are going to be reading. */
35502-		i2c_writel(i2c, REG_INT_MBRF | REG_INT_NAKRCV, REG_IEN);
35503-		i2c->state = STATE_READ;
35504-		rk3x_i2c_prepare_read(i2c);
35505-	}
35506-}
35507-
35508 static void rk3x_i2c_handle_write(struct rk3x_i2c *i2c, unsigned int ipd)
35509 {
35510 	if (!(ipd & REG_INT_MBTF)) {
35511 		rk3x_i2c_stop(i2c, -EIO);
35512-		dev_err(i2c->dev, "unexpected irq in WRITE: 0x%x\n", ipd);
35513+		dev_warn_ratelimited(i2c->dev, "unexpected irq in WRITE: 0x%x\n", ipd);
35514 		rk3x_i2c_clean_ipd(i2c);
35515 		return;
35516 	}
35517@@ -408,27 +488,21 @@ static void rk3x_i2c_handle_write(struct rk3x_i2c *i2c, unsigned int ipd)
35518 	/* ack interrupt */
35519 	i2c_writel(i2c, REG_INT_MBTF, REG_IPD);
35520 
35521+	rk3x_i2c_auto_stop(i2c);
35522 	/* are we finished? */
35523 	if (i2c->processed == i2c->msg->len)
35524 		rk3x_i2c_stop(i2c, i2c->error);
35525 	else
35526-		rk3x_i2c_fill_transmit_buf(i2c);
35527+		rk3x_i2c_fill_transmit_buf(i2c, true);
35528 }
35529 
35530-static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd)
35531+static void rk3x_i2c_read(struct rk3x_i2c *i2c)
35532 {
35533 	unsigned int i;
35534 	unsigned int len = i2c->msg->len - i2c->processed;
35535 	u32 val;
35536 	u8 byte;
35537 
35538-	/* we only care for MBRF here. */
35539-	if (!(ipd & REG_INT_MBRF))
35540-		return;
35541-
35542-	/* ack interrupt (read also produces a spurious START flag, clear it too) */
35543-	i2c_writel(i2c, REG_INT_MBRF | REG_INT_START, REG_IPD);
35544-
35545 	/* Can only handle a maximum of 32 bytes at a time */
35546 	if (len > 32)
35547 		len = 32;
35548@@ -441,7 +515,21 @@ static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd)
35549 		byte = (val >> ((i % 4) * 8)) & 0xff;
35550 		i2c->msg->buf[i2c->processed++] = byte;
35551 	}
35552+}
35553 
35554+static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd)
35555+{
35556+	/* we only care for MBRF here. */
35557+	if (!(ipd & REG_INT_MBRF))
35558+		return;
35559+
35560+	/* ack interrupt (read also produces a spurious START flag, clear it too) */
35561+	i2c_writel(i2c, REG_INT_MBRF | REG_INT_START, REG_IPD);
35562+
35563+	/* read the data from receive buffer */
35564+	rk3x_i2c_read(i2c);
35565+
35566+	rk3x_i2c_auto_stop(i2c);
35567 	/* are we finished? */
35568 	if (i2c->processed == i2c->msg->len)
35569 		rk3x_i2c_stop(i2c, i2c->error);
35570@@ -455,24 +543,36 @@ static void rk3x_i2c_handle_stop(struct rk3x_i2c *i2c, unsigned int ipd)
35571 
35572 	if (!(ipd & REG_INT_STOP)) {
35573 		rk3x_i2c_stop(i2c, -EIO);
35574-		dev_err(i2c->dev, "unexpected irq in STOP: 0x%x\n", ipd);
35575+		dev_warn_ratelimited(i2c->dev, "unexpected irq in STOP: 0x%x\n", ipd);
35576 		rk3x_i2c_clean_ipd(i2c);
35577 		return;
35578 	}
35579 
35580+	if (i2c->autostop_supported && !i2c->error) {
35581+		if (i2c->mode != REG_CON_MOD_TX && i2c->msg) {
35582+			if ((i2c->msg->len - i2c->processed) > 0)
35583+				rk3x_i2c_read(i2c);
35584+		}
35585+
35586+		i2c->processed = 0;
35587+		i2c->msg = NULL;
35588+	}
35589+
35590 	/* ack interrupt */
35591 	i2c_writel(i2c, REG_INT_STOP, REG_IPD);
35592 
35593 	/* disable STOP bit */
35594 	con = i2c_readl(i2c, REG_CON);
35595 	con &= ~REG_CON_STOP;
35596+	if (i2c->autostop_supported)
35597+		con &= ~REG_CON_START;
35598 	i2c_writel(i2c, con, REG_CON);
35599 
35600 	i2c->busy = false;
35601 	i2c->state = STATE_IDLE;
35602 
35603 	/* signal rk3x_i2c_xfer that we are finished */
35604-	wake_up(&i2c->wait);
35605+	rk3x_i2c_wake_up(i2c);
35606 }
35607 
35608 static irqreturn_t rk3x_i2c_irq(int irqno, void *dev_id)
35609@@ -484,7 +584,9 @@ static irqreturn_t rk3x_i2c_irq(int irqno, void *dev_id)
35610 
35611 	ipd = i2c_readl(i2c, REG_IPD);
35612 	if (i2c->state == STATE_IDLE) {
35613-		dev_warn(i2c->dev, "irq in STATE_IDLE, ipd = 0x%x\n", ipd);
35614+		dev_warn_ratelimited(i2c->dev,
35615+				     "irq in STATE_IDLE, ipd = 0x%x\n",
35616+				     ipd);
35617 		rk3x_i2c_clean_ipd(i2c);
35618 		goto out;
35619 	}
35620@@ -504,8 +606,15 @@ static irqreturn_t rk3x_i2c_irq(int irqno, void *dev_id)
35621 
35622 		ipd &= ~REG_INT_NAKRCV;
35623 
35624-		if (!(i2c->msg->flags & I2C_M_IGNORE_NAK))
35625-			rk3x_i2c_stop(i2c, -ENXIO);
35626+		if (!(i2c->msg->flags & I2C_M_IGNORE_NAK)) {
35627+			if (i2c->autostop_supported) {
35628+				i2c->error = -ENXIO;
35629+				i2c->state = STATE_STOP;
35630+			} else {
35631+				rk3x_i2c_stop(i2c, -ENXIO);
35632+				goto out;
35633+			}
35634+		}
35635 	}
35636 
35637 	/* is there anything left to handle? */
35638@@ -513,9 +622,6 @@ static irqreturn_t rk3x_i2c_irq(int irqno, void *dev_id)
35639 		goto out;
35640 
35641 	switch (i2c->state) {
35642-	case STATE_START:
35643-		rk3x_i2c_handle_start(i2c, ipd);
35644-		break;
35645 	case STATE_WRITE:
35646 		rk3x_i2c_handle_write(i2c, ipd);
35647 		break;
35648@@ -535,10 +641,11 @@ static irqreturn_t rk3x_i2c_irq(int irqno, void *dev_id)
35649 }
35650 
35651 /**
35652- * rk3x_i2c_get_spec - Get timing values of I2C specification
35653+ * Get timing values of I2C specification
35654+ *
35655  * @speed: Desired SCL frequency
35656  *
35657- * Return: Matched i2c_spec_values.
35658+ * Returns: Matched i2c spec values.
35659  */
35660 static const struct i2c_spec_values *rk3x_i2c_get_spec(unsigned int speed)
35661 {
35662@@ -551,12 +658,13 @@ static const struct i2c_spec_values *rk3x_i2c_get_spec(unsigned int speed)
35663 }
35664 
35665 /**
35666- * rk3x_i2c_v0_calc_timings - Calculate divider values for desired SCL frequency
35667+ * Calculate divider values for desired SCL frequency
35668+ *
35669  * @clk_rate: I2C input clock rate
35670  * @t: Known I2C timing information
35671  * @t_calc: Caculated rk3x private timings that would be written into regs
35672  *
35673- * Return: %0 on success, -%EINVAL if the goal SCL rate is too slow. In that case
35674+ * Returns: 0 on success, -EINVAL if the goal SCL rate is too slow. In that case
35675  * a best-effort divider value is returned in divs. If the target rate is
35676  * too high, we silently use the highest possible rate.
35677  */
35678@@ -711,12 +819,13 @@ static int rk3x_i2c_v0_calc_timings(unsigned long clk_rate,
35679 }
35680 
35681 /**
35682- * rk3x_i2c_v1_calc_timings - Calculate timing values for desired SCL frequency
35683+ * Calculate timing values for desired SCL frequency
35684+ *
35685  * @clk_rate: I2C input clock rate
35686  * @t: Known I2C timing information
35687  * @t_calc: Caculated rk3x private timings that would be written into regs
35688  *
35689- * Return: %0 on success, -%EINVAL if the goal SCL rate is too slow. In that case
35690+ * Returns: 0 on success, -EINVAL if the goal SCL rate is too slow. In that case
35691  * a best-effort divider value is returned in divs. If the target rate is
35692  * too high, we silently use the highest possible rate.
35693  * The following formulas are v1's method to calculate timings.
35694@@ -960,14 +1069,14 @@ static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
35695 }
35696 
35697 /**
35698- * rk3x_i2c_setup - Setup I2C registers for an I2C operation specified by msgs, num.
35699- * @i2c: target controller data
35700- * @msgs: I2C msgs to process
35701- * @num: Number of msgs
35702+ * Setup I2C registers for an I2C operation specified by msgs, num.
35703  *
35704  * Must be called with i2c->lock held.
35705  *
35706- * Return: Number of I2C msgs processed or negative in case of error
35707+ * @msgs: I2C msgs to process
35708+ * @num: Number of msgs
35709+ *
35710+ * returns: Number of I2C msgs processed or negative in case of error
35711  */
35712 static int rk3x_i2c_setup(struct rk3x_i2c *i2c, struct i2c_msg *msgs, int num)
35713 {
35714@@ -1032,18 +1141,19 @@ static int rk3x_i2c_setup(struct rk3x_i2c *i2c, struct i2c_msg *msgs, int num)
35715 
35716 	i2c->addr = msgs[0].addr;
35717 	i2c->busy = true;
35718-	i2c->state = STATE_START;
35719 	i2c->processed = 0;
35720 	i2c->error = 0;
35721 
35722 	rk3x_i2c_clean_ipd(i2c);
35723+	if (i2c->autostop_supported)
35724+		i2c_writel(i2c, 0, REG_CON1);
35725 
35726 	return ret;
35727 }
35728 
35729-static int rk3x_i2c_wait_xfer_poll(struct rk3x_i2c *i2c)
35730+static int rk3x_i2c_wait_xfer_poll(struct rk3x_i2c *i2c, unsigned long xfer_time)
35731 {
35732-	ktime_t timeout = ktime_add_ms(ktime_get(), WAIT_TIMEOUT);
35733+	ktime_t timeout = ktime_add_ms(ktime_get(), xfer_time);
35734 
35735 	while (READ_ONCE(i2c->busy) &&
35736 	       ktime_compare(ktime_get(), timeout) < 0) {
35737@@ -1063,6 +1173,9 @@ static int rk3x_i2c_xfer_common(struct i2c_adapter *adap,
35738 	int ret = 0;
35739 	int i;
35740 
35741+	if (i2c->suspended)
35742+		return -EACCES;
35743+
35744 	spin_lock_irqsave(&i2c->lock, flags);
35745 
35746 	clk_enable(i2c->clk);
35747@@ -1075,25 +1188,39 @@ static int rk3x_i2c_xfer_common(struct i2c_adapter *adap,
35748 	 * rk3x_i2c_setup()).
35749 	 */
35750 	for (i = 0; i < num; i += ret) {
35751-		ret = rk3x_i2c_setup(i2c, msgs + i, num - i);
35752+		unsigned long xfer_time = 100;
35753+		int len;
35754 
35755+		ret = rk3x_i2c_setup(i2c, msgs + i, num - i);
35756 		if (ret < 0) {
35757 			dev_err(i2c->dev, "rk3x_i2c_setup() failed\n");
35758 			break;
35759 		}
35760 
35761+		/*
35762+		 * Transfer time in mSec = Total bits / transfer rate + interval time
35763+		 * Total bits = 9 bits per byte (including ACK bit) + Start & stop bits
35764+		 */
35765+		if (ret == 2)
35766+			len = msgs[i + 1].len;
35767+		else
35768+			len = msgs[i].len;
35769+		xfer_time += len / 64;
35770+		xfer_time += DIV_ROUND_CLOSEST(((len * 9) + 2) * MSEC_PER_SEC,
35771+					       i2c->t.bus_freq_hz);
35772+
35773 		if (i + ret >= num)
35774 			i2c->is_last_msg = true;
35775 
35776-		spin_unlock_irqrestore(&i2c->lock, flags);
35777-
35778 		rk3x_i2c_start(i2c);
35779 
35780+		spin_unlock_irqrestore(&i2c->lock, flags);
35781+
35782 		if (!polling) {
35783 			timeout = wait_event_timeout(i2c->wait, !i2c->busy,
35784-						     msecs_to_jiffies(WAIT_TIMEOUT));
35785+						     msecs_to_jiffies(xfer_time));
35786 		} else {
35787-			timeout = rk3x_i2c_wait_xfer_poll(i2c);
35788+			timeout = rk3x_i2c_wait_xfer_poll(i2c, xfer_time);
35789 		}
35790 
35791 		spin_lock_irqsave(&i2c->lock, flags);
35792@@ -1103,7 +1230,7 @@ static int rk3x_i2c_xfer_common(struct i2c_adapter *adap,
35793 				i2c_readl(i2c, REG_IPD), i2c->state);
35794 
35795 			/* Force a STOP condition without interrupt */
35796-			i2c_writel(i2c, 0, REG_IEN);
35797+			rk3x_i2c_disable_irq(i2c);
35798 			val = i2c_readl(i2c, REG_CON) & REG_CON_TUNING_MASK;
35799 			val |= REG_CON_EN | REG_CON_STOP;
35800 			i2c_writel(i2c, val, REG_CON);
35801@@ -1120,6 +1247,9 @@ static int rk3x_i2c_xfer_common(struct i2c_adapter *adap,
35802 		}
35803 	}
35804 
35805+	rk3x_i2c_disable_irq(i2c);
35806+	rk3x_i2c_disable(i2c);
35807+
35808 	clk_disable(i2c->pclk);
35809 	clk_disable(i2c->clk);
35810 
35811@@ -1140,12 +1270,120 @@ static int rk3x_i2c_xfer_polling(struct i2c_adapter *adap,
35812 	return rk3x_i2c_xfer_common(adap, msgs, num, true);
35813 }
35814 
35815-static __maybe_unused int rk3x_i2c_resume(struct device *dev)
35816+static int rk3x_i2c_restart_notify(struct notifier_block *this,
35817+				   unsigned long mode, void *cmd)
35818+{
35819+	struct rk3x_i2c *i2c = container_of(this, struct rk3x_i2c,
35820+					    i2c_restart_nb);
35821+	int tmo = WAIT_TIMEOUT * USEC_PER_MSEC;
35822+	u32 val;
35823+
35824+	if (i2c->state != STATE_IDLE) {
35825+		i2c->system_restarting = true;
35826+		/* complete the unfinished job */
35827+		while (tmo-- && i2c->busy) {
35828+			udelay(1);
35829+			rk3x_i2c_irq(0, i2c);
35830+		}
35831+	}
35832+
35833+	if (tmo <= 0) {
35834+		dev_err(i2c->dev, "restart timeout, ipd: 0x%02x, state: %d\n",
35835+			i2c_readl(i2c, REG_IPD), i2c->state);
35836+
35837+		/* Force a STOP condition without interrupt */
35838+		i2c_writel(i2c, 0, REG_IEN);
35839+		val = i2c_readl(i2c, REG_CON) & REG_CON_TUNING_MASK;
35840+		val |= REG_CON_EN | REG_CON_STOP;
35841+		i2c_writel(i2c, val, REG_CON);
35842+
35843+		udelay(10);
35844+		i2c->state = STATE_IDLE;
35845+	}
35846+
35847+	return NOTIFY_DONE;
35848+}
35849+
35850+static unsigned int rk3x_i2c_get_version(struct rk3x_i2c *i2c)
35851+{
35852+	unsigned int version;
35853+
35854+	clk_enable(i2c->pclk);
35855+	version = i2c_readl(i2c, REG_CON) & REG_CON_VERSION;
35856+	clk_disable(i2c->pclk);
35857+	version >>= REG_CON_VERSION_SHIFT;
35858+
35859+	return version;
35860+}
35861+
35862+static int rk3x_i2c_of_get_bus_id(struct device *dev, struct rk3x_i2c *priv)
35863+{
35864+	int bus_id = -1;
35865+
35866+	if (IS_ENABLED(CONFIG_OF) && dev->of_node)
35867+		bus_id = of_alias_get_id(dev->of_node, "i2c");
35868+
35869+	return bus_id;
35870+}
35871+
35872+#ifdef CONFIG_ACPI
35873+static int rk3x_i2c_acpi_get_bus_id(struct device *dev, struct rk3x_i2c *priv)
35874+{
35875+	struct acpi_device *adev;
35876+	unsigned long bus_id = -1;
35877+	const char *uid;
35878+	int ret;
35879+
35880+	adev = ACPI_COMPANION(dev);
35881+	if (!adev)
35882+		return -ENXIO;
35883+
35884+	uid = acpi_device_uid(adev);
35885+	if (!uid || !(*uid)) {
35886+		dev_err(dev, "Cannot retrieve UID\n");
35887+		return -ENODEV;
35888+	}
35889+
35890+	ret = kstrtoul(uid, 0, &bus_id);
35891+
35892+	return !ret ? bus_id : -ERANGE;
35893+}
35894+#else
35895+static int rk3x_i2c_acpi_get_bus_id(struct device *dev, struct rk3x_i2c *priv)
35896+{
35897+	return -ENOENT;
35898+}
35899+#endif /* CONFIG_ACPI */
35900+
35901+static __maybe_unused int rk3x_i2c_suspend_noirq(struct device *dev)
35902+{
35903+	struct rk3x_i2c *i2c = dev_get_drvdata(dev);
35904+
35905+	/*
35906+	 * Below code is needed only to ensure that there are no
35907+	 * activities on I2C bus. if at this moment any driver
35908+	 * is trying to use I2C bus - this may cause i2c timeout.
35909+	 *
35910+	 * So forbid access to I2C device using i2c->suspended flag.
35911+	 */
35912+	i2c_lock_bus(&i2c->adap, I2C_LOCK_ROOT_ADAPTER);
35913+	i2c->suspended = 1;
35914+	i2c_unlock_bus(&i2c->adap, I2C_LOCK_ROOT_ADAPTER);
35915+
35916+	return 0;
35917+}
35918+
35919+static __maybe_unused int rk3x_i2c_resume_noirq(struct device *dev)
35920 {
35921 	struct rk3x_i2c *i2c = dev_get_drvdata(dev);
35922 
35923 	rk3x_i2c_adapt_div(i2c, clk_get_rate(i2c->clk));
35924 
35925+	/* Allow access to I2C bus */
35926+	i2c_lock_bus(&i2c->adap, I2C_LOCK_ROOT_ADAPTER);
35927+	i2c->suspended = 0;
35928+	i2c_unlock_bus(&i2c->adap, I2C_LOCK_ROOT_ADAPTER);
35929+
35930 	return 0;
35931 }
35932 
35933@@ -1161,7 +1399,12 @@ static const struct i2c_algorithm rk3x_i2c_algorithm = {
35934 };
35935 
35936 static const struct rk3x_i2c_soc_data rv1108_soc_data = {
35937-	.grf_offset = -1,
35938+	.grf_offset = 0x408,
35939+	.calc_timings = rk3x_i2c_v1_calc_timings,
35940+};
35941+
35942+static const struct rk3x_i2c_soc_data rv1126_soc_data = {
35943+	.grf_offset = 0x118,
35944 	.calc_timings = rk3x_i2c_v1_calc_timings,
35945 };
35946 
35947@@ -1195,6 +1438,10 @@ static const struct of_device_id rk3x_i2c_match[] = {
35948 		.compatible = "rockchip,rv1108-i2c",
35949 		.data = &rv1108_soc_data
35950 	},
35951+	{
35952+		.compatible = "rockchip,rv1126-i2c",
35953+		.data = &rv1126_soc_data
35954+	},
35955 	{
35956 		.compatible = "rockchip,rk3066-i2c",
35957 		.data = &rk3066_soc_data
35958@@ -1219,13 +1466,19 @@ static const struct of_device_id rk3x_i2c_match[] = {
35959 };
35960 MODULE_DEVICE_TABLE(of, rk3x_i2c_match);
35961 
35962+static void rk3x_i2c_tb_cb(void *data)
35963+{
35964+	unsigned int irq = (unsigned long)data;
35965+
35966+	enable_irq(irq);
35967+}
35968+
35969 static int rk3x_i2c_probe(struct platform_device *pdev)
35970 {
35971+	struct fwnode_handle *fw = dev_fwnode(&pdev->dev);
35972 	struct device_node *np = pdev->dev.of_node;
35973-	const struct of_device_id *match;
35974 	struct rk3x_i2c *i2c;
35975 	int ret = 0;
35976-	int bus_nr;
35977 	u32 value;
35978 	int irq;
35979 	unsigned long clk_rate;
35980@@ -1234,8 +1487,16 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
35981 	if (!i2c)
35982 		return -ENOMEM;
35983 
35984-	match = of_match_node(rk3x_i2c_match, np);
35985-	i2c->soc_data = match->data;
35986+	i2c->soc_data = (struct rk3x_i2c_soc_data *)device_get_match_data(&pdev->dev);
35987+
35988+	ret = rk3x_i2c_acpi_get_bus_id(&pdev->dev, i2c);
35989+	if (ret < 0) {
35990+		ret = rk3x_i2c_of_get_bus_id(&pdev->dev, i2c);
35991+		if (ret < 0)
35992+			return ret;
35993+	}
35994+
35995+	i2c->adap.nr = ret;
35996 
35997 	/* use common interface to get I2C timing properties */
35998 	i2c_parse_fw_timings(&pdev->dev, &i2c->t, true);
35999@@ -1244,22 +1505,28 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
36000 	i2c->adap.owner = THIS_MODULE;
36001 	i2c->adap.algo = &rk3x_i2c_algorithm;
36002 	i2c->adap.retries = 3;
36003-	i2c->adap.dev.of_node = np;
36004+	i2c->adap.dev.of_node = pdev->dev.of_node;
36005 	i2c->adap.algo_data = i2c;
36006 	i2c->adap.dev.parent = &pdev->dev;
36007+	i2c->adap.dev.fwnode = fw;
36008 
36009 	i2c->dev = &pdev->dev;
36010 
36011 	spin_lock_init(&i2c->lock);
36012 	init_waitqueue_head(&i2c->wait);
36013 
36014+	i2c->i2c_restart_nb.notifier_call = rk3x_i2c_restart_notify;
36015+	i2c->i2c_restart_nb.priority = 128;
36016+	ret = register_pre_restart_handler(&i2c->i2c_restart_nb);
36017+	if (ret) {
36018+		dev_err(&pdev->dev, "failed to setup i2c restart handler.\n");
36019+		return ret;
36020+	}
36021+
36022 	i2c->regs = devm_platform_ioremap_resource(pdev, 0);
36023 	if (IS_ERR(i2c->regs))
36024 		return PTR_ERR(i2c->regs);
36025 
36026-	/* Try to set the I2C adapter number from dt */
36027-	bus_nr = of_alias_get_id(np, "i2c");
36028-
36029 	/*
36030 	 * Switch to new interface if the SoC also offers the old one.
36031 	 * The control bit is located in the GRF register space.
36032@@ -1268,24 +1535,27 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
36033 		struct regmap *grf;
36034 
36035 		grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
36036-		if (IS_ERR(grf)) {
36037-			dev_err(&pdev->dev,
36038-				"rk3x-i2c needs 'rockchip,grf' property\n");
36039-			return PTR_ERR(grf);
36040-		}
36041-
36042-		if (bus_nr < 0) {
36043-			dev_err(&pdev->dev, "rk3x-i2c needs i2cX alias");
36044-			return -EINVAL;
36045-		}
36046-
36047-		/* 27+i: write mask, 11+i: value */
36048-		value = BIT(27 + bus_nr) | BIT(11 + bus_nr);
36049-
36050-		ret = regmap_write(grf, i2c->soc_data->grf_offset, value);
36051-		if (ret != 0) {
36052-			dev_err(i2c->dev, "Could not write to GRF: %d\n", ret);
36053-			return ret;
36054+		if (!IS_ERR(grf)) {
36055+			int bus_nr = i2c->adap.nr;
36056+
36057+			if (i2c->soc_data == &rv1108_soc_data && bus_nr == 2)
36058+				/* rv1108 i2c2 set grf offset-0x408, bit-10 */
36059+				value = BIT(26) | BIT(10);
36060+			else if (i2c->soc_data == &rv1126_soc_data &&
36061+				 bus_nr == 2)
36062+				/* rv1126 i2c2 set pmugrf offset-0x118, bit-4 */
36063+				value = BIT(20) | BIT(4);
36064+			else
36065+				/* rk3xxx 27+i: write mask, 11+i: value */
36066+				value = BIT(27 + bus_nr) | BIT(11 + bus_nr);
36067+
36068+			ret = regmap_write(grf, i2c->soc_data->grf_offset,
36069+					   value);
36070+			if (ret != 0) {
36071+				dev_err(i2c->dev, "Could not write to GRF: %d\n",
36072+					ret);
36073+				return ret;
36074+			}
36075 		}
36076 	}
36077 
36078@@ -1294,6 +1564,13 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
36079 	if (irq < 0)
36080 		return irq;
36081 
36082+	if (IS_ENABLED(CONFIG_ROCKCHIP_THUNDER_BOOT_SERVICE) &&
36083+	    device_property_read_bool(&pdev->dev, "rockchip,amp-shared")) {
36084+		i2c->tb_cl.data = (void *)(unsigned long)irq;
36085+		i2c->tb_cl.cb = rk3x_i2c_tb_cb;
36086+		irq_set_status_flags(irq, IRQ_NOAUTOEN);
36087+	}
36088+
36089 	ret = devm_request_irq(&pdev->dev, irq, rk3x_i2c_irq,
36090 			       0, dev_name(&pdev->dev), i2c);
36091 	if (ret < 0) {
36092@@ -1301,24 +1578,29 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
36093 		return ret;
36094 	}
36095 
36096+	if (IS_ENABLED(CONFIG_ROCKCHIP_THUNDER_BOOT_SERVICE) && i2c->tb_cl.cb)
36097+		rk_tb_client_register_cb(&i2c->tb_cl);
36098+
36099 	platform_set_drvdata(pdev, i2c);
36100 
36101-	if (i2c->soc_data->calc_timings == rk3x_i2c_v0_calc_timings) {
36102-		/* Only one clock to use for bus clock and peripheral clock */
36103-		i2c->clk = devm_clk_get(&pdev->dev, NULL);
36104-		i2c->pclk = i2c->clk;
36105-	} else {
36106-		i2c->clk = devm_clk_get(&pdev->dev, "i2c");
36107-		i2c->pclk = devm_clk_get(&pdev->dev, "pclk");
36108-	}
36109+	if (!has_acpi_companion(&pdev->dev)) {
36110+		if (i2c->soc_data->calc_timings == rk3x_i2c_v0_calc_timings) {
36111+			/* Only one clock to use for bus clock and peripheral clock */
36112+			i2c->clk = devm_clk_get(&pdev->dev, NULL);
36113+			i2c->pclk = i2c->clk;
36114+		} else {
36115+			i2c->clk = devm_clk_get(&pdev->dev, "i2c");
36116+			i2c->pclk = devm_clk_get(&pdev->dev, "pclk");
36117+		}
36118 
36119-	if (IS_ERR(i2c->clk))
36120-		return dev_err_probe(&pdev->dev, PTR_ERR(i2c->clk),
36121-				     "Can't get bus clk\n");
36122+		if (IS_ERR(i2c->clk))
36123+			return dev_err_probe(&pdev->dev, PTR_ERR(i2c->clk),
36124+					     "Can't get bus clk\n");
36125 
36126-	if (IS_ERR(i2c->pclk))
36127-		return dev_err_probe(&pdev->dev, PTR_ERR(i2c->pclk),
36128-				     "Can't get periph clk\n");
36129+		if (IS_ERR(i2c->pclk))
36130+			return dev_err_probe(&pdev->dev, PTR_ERR(i2c->pclk),
36131+					     "Can't get periph clk\n");
36132+	}
36133 
36134 	ret = clk_prepare(i2c->clk);
36135 	if (ret < 0) {
36136@@ -1331,17 +1613,25 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
36137 		goto err_clk;
36138 	}
36139 
36140-	i2c->clk_rate_nb.notifier_call = rk3x_i2c_clk_notifier_cb;
36141-	ret = clk_notifier_register(i2c->clk, &i2c->clk_rate_nb);
36142-	if (ret != 0) {
36143-		dev_err(&pdev->dev, "Unable to register clock notifier\n");
36144-		goto err_pclk;
36145+	if (i2c->clk) {
36146+		i2c->clk_rate_nb.notifier_call = rk3x_i2c_clk_notifier_cb;
36147+		ret = clk_notifier_register(i2c->clk, &i2c->clk_rate_nb);
36148+		if (ret != 0) {
36149+			dev_err(&pdev->dev, "Unable to register clock notifier\n");
36150+			goto err_pclk;
36151+		}
36152 	}
36153 
36154 	clk_rate = clk_get_rate(i2c->clk);
36155+	if (!clk_rate)
36156+		device_property_read_u32(&pdev->dev, "i2c,clk-rate", (u32 *)&clk_rate);
36157+
36158 	rk3x_i2c_adapt_div(i2c, clk_rate);
36159 
36160-	ret = i2c_add_adapter(&i2c->adap);
36161+	if (rk3x_i2c_get_version(i2c) >= RK_I2C_VERSION5)
36162+		i2c->autostop_supported = true;
36163+
36164+	ret = i2c_add_numbered_adapter(&i2c->adap);
36165 	if (ret < 0)
36166 		goto err_clk_notifier;
36167 
36168@@ -1363,13 +1653,17 @@ static int rk3x_i2c_remove(struct platform_device *pdev)
36169 	i2c_del_adapter(&i2c->adap);
36170 
36171 	clk_notifier_unregister(i2c->clk, &i2c->clk_rate_nb);
36172+	unregister_pre_restart_handler(&i2c->i2c_restart_nb);
36173 	clk_unprepare(i2c->pclk);
36174 	clk_unprepare(i2c->clk);
36175 
36176 	return 0;
36177 }
36178 
36179-static SIMPLE_DEV_PM_OPS(rk3x_i2c_pm_ops, NULL, rk3x_i2c_resume);
36180+static const struct dev_pm_ops rk3x_i2c_pm_ops = {
36181+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rk3x_i2c_suspend_noirq,
36182+				      rk3x_i2c_resume_noirq)
36183+};
36184 
36185 static struct platform_driver rk3x_i2c_driver = {
36186 	.probe   = rk3x_i2c_probe,
36187@@ -1381,7 +1675,25 @@ static struct platform_driver rk3x_i2c_driver = {
36188 	},
36189 };
36190 
36191+#ifdef CONFIG_ROCKCHIP_THUNDER_BOOT
36192+static int __init rk3x_i2c_driver_init(void)
36193+{
36194+	return platform_driver_register(&rk3x_i2c_driver);
36195+}
36196+#ifdef CONFIG_INITCALL_ASYNC
36197+subsys_initcall_sync(rk3x_i2c_driver_init);
36198+#else
36199+subsys_initcall(rk3x_i2c_driver_init);
36200+#endif
36201+
36202+static void __exit rk3x_i2c_driver_exit(void)
36203+{
36204+	platform_driver_unregister(&rk3x_i2c_driver);
36205+}
36206+module_exit(rk3x_i2c_driver_exit);
36207+#else
36208 module_platform_driver(rk3x_i2c_driver);
36209+#endif
36210 
36211 MODULE_DESCRIPTION("Rockchip RK3xxx I2C Bus driver");
36212 MODULE_AUTHOR("Max Schwarz <max.schwarz@online.de>");
36213diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
36214index 34fecf97a..d041b7fa5 100644
36215--- a/drivers/i2c/i2c-core-base.c
36216+++ b/drivers/i2c/i2c-core-base.c
36217@@ -61,6 +61,7 @@
36218 static DEFINE_MUTEX(core_lock);
36219 static DEFINE_IDR(i2c_adapter_idr);
36220 
36221+static int i2c_check_addr_ex(struct i2c_adapter *adapter, int addr);
36222 static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver);
36223 
36224 static DEFINE_STATIC_KEY_FALSE(i2c_trace_msg_key);
36225@@ -808,7 +809,8 @@ static void i2c_adapter_unlock_bus(struct i2c_adapter *adapter,
36226 
36227 static void i2c_dev_set_name(struct i2c_adapter *adap,
36228 			     struct i2c_client *client,
36229-			     struct i2c_board_info const *info)
36230+			     struct i2c_board_info const *info,
36231+			     int status)
36232 {
36233 	struct acpi_device *adev = ACPI_COMPANION(&client->dev);
36234 
36235@@ -822,8 +824,12 @@ static void i2c_dev_set_name(struct i2c_adapter *adap,
36236 		return;
36237 	}
36238 
36239-	dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
36240-		     i2c_encode_flags_to_addr(client));
36241+	if (status == 0)
36242+		dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
36243+			i2c_encode_flags_to_addr(client));
36244+	else
36245+		dev_set_name(&client->dev, "%d-%04x-%01x", i2c_adapter_id(adap),
36246+			i2c_encode_flags_to_addr(client), status);
36247 }
36248 
36249 int i2c_dev_irq_from_resources(const struct resource *resources,
36250@@ -899,9 +905,11 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
36251 	}
36252 
36253 	/* Check for address business */
36254-	status = i2c_check_addr_busy(adap, i2c_encode_flags_to_addr(client));
36255+	status = i2c_check_addr_ex(adap, i2c_encode_flags_to_addr(client));
36256 	if (status)
36257-		goto out_err;
36258+		dev_err(&adap->dev,
36259+			"%d i2c clients have been registered at 0x%02x",
36260+			status, client->addr);
36261 
36262 	client->dev.parent = &client->adapter->dev;
36263 	client->dev.bus = &i2c_bus_type;
36264@@ -909,7 +917,7 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
36265 	client->dev.of_node = of_node_get(info->of_node);
36266 	client->dev.fwnode = info->fwnode;
36267 
36268-	i2c_dev_set_name(adap, client, info);
36269+	i2c_dev_set_name(adap, client, info, status);
36270 
36271 	if (info->properties) {
36272 		status = device_add_properties(&client->dev, info->properties);
36273@@ -935,10 +943,6 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
36274 		device_remove_properties(&client->dev);
36275 out_err_put_of_node:
36276 	of_node_put(info->of_node);
36277-out_err:
36278-	dev_err(&adap->dev,
36279-		"Failed to register i2c client %s at 0x%02x (%d)\n",
36280-		client->name, client->addr, status);
36281 out_err_silent:
36282 	kfree(client);
36283 	return ERR_PTR(status);
36284@@ -1838,6 +1842,33 @@ EXPORT_SYMBOL(i2c_del_driver);
36285 
36286 /* ------------------------------------------------------------------------- */
36287 
36288+struct i2c_addr_cnt {
36289+	int addr;
36290+	int cnt;
36291+};
36292+
36293+static int __i2c_check_addr_ex(struct device *dev, void *addrp)
36294+{
36295+	struct i2c_client *client = i2c_verify_client(dev);
36296+	struct i2c_addr_cnt *addrinfo = (struct i2c_addr_cnt *)addrp;
36297+	int addr = addrinfo->addr;
36298+
36299+	if (client && client->addr == addr)
36300+		addrinfo->cnt++;
36301+
36302+	return 0;
36303+}
36304+
36305+static int i2c_check_addr_ex(struct i2c_adapter *adapter, int addr)
36306+{
36307+	struct i2c_addr_cnt addrinfo;
36308+
36309+	addrinfo.addr = addr;
36310+	addrinfo.cnt = 0;
36311+	device_for_each_child(&adapter->dev, &addrinfo, __i2c_check_addr_ex);
36312+	return addrinfo.cnt;
36313+}
36314+
36315 struct i2c_cmd_arg {
36316 	unsigned	cmd;
36317 	void		*arg;
36318diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
36319index 12584f163..03cc336bf 100644
36320--- a/drivers/iio/adc/rockchip_saradc.c
36321+++ b/drivers/iio/adc/rockchip_saradc.c
36322@@ -35,12 +35,31 @@
36323 #define SARADC_DLY_PU_SOC_MASK		0x3f
36324 
36325 #define SARADC_TIMEOUT			msecs_to_jiffies(100)
36326-#define SARADC_MAX_CHANNELS		6
36327+#define SARADC_MAX_CHANNELS		8
36328+
36329+/* v2 registers */
36330+#define SARADC2_CONV_CON		0x0
36331+#define SARADC_T_PD_SOC			0x4
36332+#define SARADC_T_DAS_SOC		0xc
36333+#define SARADC2_END_INT_EN		0x104
36334+#define SARADC2_ST_CON			0x108
36335+#define SARADC2_STATUS			0x10c
36336+#define SARADC2_END_INT_ST		0x110
36337+#define SARADC2_DATA_BASE		0x120
36338+
36339+#define SARADC2_EN_END_INT		BIT(0)
36340+#define SARADC2_START			BIT(4)
36341+#define SARADC2_SINGLE_MODE		BIT(5)
36342+
36343+struct rockchip_saradc;
36344 
36345 struct rockchip_saradc_data {
36346 	const struct iio_chan_spec	*channels;
36347 	int				num_channels;
36348 	unsigned long			clk_rate;
36349+	void (*start)(struct rockchip_saradc *info, int chn);
36350+	int (*read)(struct rockchip_saradc *info);
36351+	void (*power_down)(struct rockchip_saradc *info);
36352 };
36353 
36354 struct rockchip_saradc {
36355@@ -49,33 +68,99 @@ struct rockchip_saradc {
36356 	struct clk		*clk;
36357 	struct completion	completion;
36358 	struct regulator	*vref;
36359+	int			uv_vref;
36360 	struct reset_control	*reset;
36361 	const struct rockchip_saradc_data *data;
36362 	u16			last_val;
36363 	const struct iio_chan_spec *last_chan;
36364+	bool			suspended;
36365+#ifdef CONFIG_ROCKCHIP_SARADC_TEST_CHN
36366+	bool			test;
36367+	u32			chn;
36368+	spinlock_t		lock;
36369+	struct workqueue_struct *wq;
36370+	struct delayed_work	work;
36371+#endif
36372 };
36373 
36374-static void rockchip_saradc_power_down(struct rockchip_saradc *info)
36375+static void rockchip_saradc_reset_controller(struct reset_control *reset);
36376+
36377+static void rockchip_saradc_start_v1(struct rockchip_saradc *info,
36378+					int chn)
36379+{
36380+	/* 8 clock periods as delay between power up and start cmd */
36381+	writel_relaxed(8, info->regs + SARADC_DLY_PU_SOC);
36382+	/* Select the channel to be used and trigger conversion */
36383+	writel(SARADC_CTRL_POWER_CTRL | (chn & SARADC_CTRL_CHN_MASK) |
36384+	       SARADC_CTRL_IRQ_ENABLE, info->regs + SARADC_CTRL);
36385+}
36386+
36387+static void rockchip_saradc_start_v2(struct rockchip_saradc *info,
36388+					int chn)
36389+{
36390+	int val;
36391+
36392+	/* If read other chn at anytime, then chn1 will error, assert
36393+	 * controller as a workaround.
36394+	 */
36395+	if (info->reset)
36396+		rockchip_saradc_reset_controller(info->reset);
36397+
36398+	writel_relaxed(0xc, info->regs + SARADC_T_DAS_SOC);
36399+	writel_relaxed(0x20, info->regs + SARADC_T_PD_SOC);
36400+	val = SARADC2_EN_END_INT << 16 | SARADC2_EN_END_INT;
36401+	writel_relaxed(val, info->regs + SARADC2_END_INT_EN);
36402+	val = SARADC2_START | SARADC2_SINGLE_MODE | chn;
36403+	writel(val << 16 | val, info->regs + SARADC2_CONV_CON);
36404+}
36405+
36406+static void rockchip_saradc_start(struct rockchip_saradc *info,
36407+					int chn)
36408+{
36409+	info->data->start(info, chn);
36410+}
36411+
36412+static int rockchip_saradc_read_v1(struct rockchip_saradc *info)
36413+{
36414+	return readl_relaxed(info->regs + SARADC_DATA);
36415+}
36416+
36417+static int rockchip_saradc_read_v2(struct rockchip_saradc *info)
36418+{
36419+	int offset;
36420+
36421+	/* Clear irq */
36422+	writel_relaxed(0x1, info->regs + SARADC2_END_INT_ST);
36423+
36424+	offset = SARADC2_DATA_BASE + info->last_chan->channel * 0x4;
36425+
36426+	return readl_relaxed(info->regs + offset);
36427+}
36428+
36429+static int rockchip_saradc_read(struct rockchip_saradc *info)
36430+{
36431+	return info->data->read(info);
36432+}
36433+
36434+static void rockchip_saradc_power_down_v1(struct rockchip_saradc *info)
36435 {
36436-	/* Clear irq & power down adc */
36437 	writel_relaxed(0, info->regs + SARADC_CTRL);
36438 }
36439 
36440+static void rockchip_saradc_power_down(struct rockchip_saradc *info)
36441+{
36442+	if (info->data->power_down)
36443+		info->data->power_down(info);
36444+}
36445+
36446 static int rockchip_saradc_conversion(struct rockchip_saradc *info,
36447 				   struct iio_chan_spec const *chan)
36448 {
36449 	reinit_completion(&info->completion);
36450 
36451-	/* 8 clock periods as delay between power up and start cmd */
36452-	writel_relaxed(8, info->regs + SARADC_DLY_PU_SOC);
36453-
36454+	/* prevent isr get NULL last_chan */
36455 	info->last_chan = chan;
36456-
36457-	/* Select the channel to be used and trigger conversion */
36458-	writel(SARADC_CTRL_POWER_CTRL
36459-			| (chan->channel & SARADC_CTRL_CHN_MASK)
36460-			| SARADC_CTRL_IRQ_ENABLE,
36461-		   info->regs + SARADC_CTRL);
36462+	rockchip_saradc_start(info, chan->channel);
36463 
36464 	if (!wait_for_completion_timeout(&info->completion, SARADC_TIMEOUT))
36465 		return -ETIMEDOUT;
36466@@ -90,10 +175,19 @@ static int rockchip_saradc_read_raw(struct iio_dev *indio_dev,
36467 	struct rockchip_saradc *info = iio_priv(indio_dev);
36468 	int ret;
36469 
36470+#ifdef CONFIG_ROCKCHIP_SARADC_TEST_CHN
36471+	if (info->test)
36472+		return 0;
36473+#endif
36474 	switch (mask) {
36475 	case IIO_CHAN_INFO_RAW:
36476 		mutex_lock(&indio_dev->mlock);
36477 
36478+		if (info->suspended) {
36479+			mutex_unlock(&indio_dev->mlock);
36480+			return -EBUSY;
36481+		}
36482+
36483 		ret = rockchip_saradc_conversion(info, chan);
36484 		if (ret) {
36485 			rockchip_saradc_power_down(info);
36486@@ -105,13 +199,11 @@ static int rockchip_saradc_read_raw(struct iio_dev *indio_dev,
36487 		mutex_unlock(&indio_dev->mlock);
36488 		return IIO_VAL_INT;
36489 	case IIO_CHAN_INFO_SCALE:
36490-		ret = regulator_get_voltage(info->vref);
36491-		if (ret < 0) {
36492-			dev_err(&indio_dev->dev, "failed to get voltage\n");
36493-			return ret;
36494-		}
36495+		/* It is a dummy regulator */
36496+		if (info->uv_vref < 0)
36497+			return info->uv_vref;
36498 
36499-		*val = ret / 1000;
36500+		*val = info->uv_vref / 1000;
36501 		*val2 = chan->scan_type.realbits;
36502 		return IIO_VAL_FRACTIONAL_LOG2;
36503 	default:
36504@@ -122,15 +214,25 @@ static int rockchip_saradc_read_raw(struct iio_dev *indio_dev,
36505 static irqreturn_t rockchip_saradc_isr(int irq, void *dev_id)
36506 {
36507 	struct rockchip_saradc *info = dev_id;
36508+#ifdef CONFIG_ROCKCHIP_SARADC_TEST_CHN
36509+	unsigned long flags;
36510+#endif
36511 
36512 	/* Read value */
36513-	info->last_val = readl_relaxed(info->regs + SARADC_DATA);
36514+	info->last_val = rockchip_saradc_read(info);
36515 	info->last_val &= GENMASK(info->last_chan->scan_type.realbits - 1, 0);
36516 
36517 	rockchip_saradc_power_down(info);
36518 
36519 	complete(&info->completion);
36520-
36521+#ifdef CONFIG_ROCKCHIP_SARADC_TEST_CHN
36522+	spin_lock_irqsave(&info->lock, flags);
36523+	if (info->test) {
36524+		pr_info("chn[%d] val = %d\n", info->chn, info->last_val);
36525+		mod_delayed_work(info->wq, &info->work, msecs_to_jiffies(100));
36526+	}
36527+	spin_unlock_irqrestore(&info->lock, flags);
36528+#endif
36529 	return IRQ_HANDLED;
36530 }
36531 
36532@@ -164,6 +266,9 @@ static const struct rockchip_saradc_data saradc_data = {
36533 	.channels = rockchip_saradc_iio_channels,
36534 	.num_channels = ARRAY_SIZE(rockchip_saradc_iio_channels),
36535 	.clk_rate = 1000000,
36536+	.start = rockchip_saradc_start_v1,
36537+	.read = rockchip_saradc_read_v1,
36538+	.power_down = rockchip_saradc_power_down_v1,
36539 };
36540 
36541 static const struct iio_chan_spec rockchip_rk3066_tsadc_iio_channels[] = {
36542@@ -175,6 +280,9 @@ static const struct rockchip_saradc_data rk3066_tsadc_data = {
36543 	.channels = rockchip_rk3066_tsadc_iio_channels,
36544 	.num_channels = ARRAY_SIZE(rockchip_rk3066_tsadc_iio_channels),
36545 	.clk_rate = 50000,
36546+	.start = rockchip_saradc_start_v1,
36547+	.read = rockchip_saradc_read_v1,
36548+	.power_down = rockchip_saradc_power_down_v1,
36549 };
36550 
36551 static const struct iio_chan_spec rockchip_rk3399_saradc_iio_channels[] = {
36552@@ -190,6 +298,48 @@ static const struct rockchip_saradc_data rk3399_saradc_data = {
36553 	.channels = rockchip_rk3399_saradc_iio_channels,
36554 	.num_channels = ARRAY_SIZE(rockchip_rk3399_saradc_iio_channels),
36555 	.clk_rate = 1000000,
36556+	.start = rockchip_saradc_start_v1,
36557+	.read = rockchip_saradc_read_v1,
36558+	.power_down = rockchip_saradc_power_down_v1,
36559+};
36560+
36561+static const struct iio_chan_spec rockchip_rk3568_saradc_iio_channels[] = {
36562+	SARADC_CHANNEL(0, "adc0", 10),
36563+	SARADC_CHANNEL(1, "adc1", 10),
36564+	SARADC_CHANNEL(2, "adc2", 10),
36565+	SARADC_CHANNEL(3, "adc3", 10),
36566+	SARADC_CHANNEL(4, "adc4", 10),
36567+	SARADC_CHANNEL(5, "adc5", 10),
36568+	SARADC_CHANNEL(6, "adc6", 10),
36569+	SARADC_CHANNEL(7, "adc7", 10),
36570+};
36571+
36572+static const struct rockchip_saradc_data rk3568_saradc_data = {
36573+	.channels = rockchip_rk3568_saradc_iio_channels,
36574+	.num_channels = ARRAY_SIZE(rockchip_rk3568_saradc_iio_channels),
36575+	.clk_rate = 1000000,
36576+	.start = rockchip_saradc_start_v1,
36577+	.read = rockchip_saradc_read_v1,
36578+	.power_down = rockchip_saradc_power_down_v1,
36579+};
36580+
36581+static const struct iio_chan_spec rockchip_rk3588_saradc_iio_channels[] = {
36582+	SARADC_CHANNEL(0, "adc0", 12),
36583+	SARADC_CHANNEL(1, "adc1", 12),
36584+	SARADC_CHANNEL(2, "adc2", 12),
36585+	SARADC_CHANNEL(3, "adc3", 12),
36586+	SARADC_CHANNEL(4, "adc4", 12),
36587+	SARADC_CHANNEL(5, "adc5", 12),
36588+	SARADC_CHANNEL(6, "adc6", 12),
36589+	SARADC_CHANNEL(7, "adc7", 12),
36590+};
36591+
36592+static const struct rockchip_saradc_data rk3588_saradc_data = {
36593+	.channels = rockchip_rk3588_saradc_iio_channels,
36594+	.num_channels = ARRAY_SIZE(rockchip_rk3588_saradc_iio_channels),
36595+	.clk_rate = 1000000,
36596+	.start = rockchip_saradc_start_v2,
36597+	.read = rockchip_saradc_read_v2,
36598 };
36599 
36600 static const struct of_device_id rockchip_saradc_match[] = {
36601@@ -202,6 +352,12 @@ static const struct of_device_id rockchip_saradc_match[] = {
36602 	}, {
36603 		.compatible = "rockchip,rk3399-saradc",
36604 		.data = &rk3399_saradc_data,
36605+	}, {
36606+		.compatible = "rockchip,rk3568-saradc",
36607+		.data = &rk3568_saradc_data,
36608+	}, {
36609+		.compatible = "rockchip,rk3588-saradc",
36610+		.data = &rk3588_saradc_data,
36611 	},
36612 	{},
36613 };
36614@@ -278,6 +434,75 @@ static irqreturn_t rockchip_saradc_trigger_handler(int irq, void *p)
36615 	return IRQ_HANDLED;
36616 }
36617 
36618+#ifdef CONFIG_ROCKCHIP_SARADC_TEST_CHN
36619+static ssize_t saradc_test_chn_store(struct device *dev,
36620+			struct device_attribute *attr,
36621+			const char *buf, size_t size)
36622+{
36623+	u32 val = 0;
36624+	int err;
36625+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
36626+	struct rockchip_saradc *info = iio_priv(indio_dev);
36627+	unsigned long flags;
36628+
36629+	err = kstrtou32(buf, 10, &val);
36630+	if (err)
36631+		return err;
36632+
36633+	spin_lock_irqsave(&info->lock, flags);
36634+
36635+	if (val > SARADC_CTRL_CHN_MASK && info->test) {
36636+		info->test = false;
36637+		spin_unlock_irqrestore(&info->lock, flags);
36638+		cancel_delayed_work_sync(&info->work);
36639+		return size;
36640+	}
36641+
36642+	if (!info->test && val < SARADC_CTRL_CHN_MASK) {
36643+		info->test = true;
36644+		info->chn = val;
36645+		mod_delayed_work(info->wq, &info->work, msecs_to_jiffies(100));
36646+	}
36647+
36648+	spin_unlock_irqrestore(&info->lock, flags);
36649+
36650+	return size;
36651+}
36652+
36653+static DEVICE_ATTR_WO(saradc_test_chn);
36654+
36655+static struct attribute *saradc_attrs[] = {
36656+	&dev_attr_saradc_test_chn.attr,
36657+	NULL
36658+};
36659+
36660+static const struct attribute_group rockchip_saradc_attr_group = {
36661+	.attrs = saradc_attrs,
36662+};
36663+
36664+static void rockchip_saradc_remove_sysgroup(void *data)
36665+{
36666+	struct platform_device *pdev = data;
36667+
36668+	sysfs_remove_group(&pdev->dev.kobj, &rockchip_saradc_attr_group);
36669+}
36670+
36671+static void rockchip_saradc_destroy_wq(void *data)
36672+{
36673+	struct rockchip_saradc *info = data;
36674+
36675+	destroy_workqueue(info->wq);
36676+}
36677+
36678+static void rockchip_saradc_test_work(struct work_struct *work)
36679+{
36680+	struct rockchip_saradc *info = container_of(work,
36681+					struct rockchip_saradc, work.work);
36682+
36683+	rockchip_saradc_start(info, info->chn);
36684+}
36685+#endif
36686+
36687 static int rockchip_saradc_probe(struct platform_device *pdev)
36688 {
36689 	struct rockchip_saradc *info = NULL;
36690@@ -390,6 +615,13 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
36691 		return ret;
36692 	}
36693 
36694+	info->uv_vref = regulator_get_voltage(info->vref);
36695+	if (info->uv_vref < 0) {
36696+		dev_err(&pdev->dev, "failed to get voltage\n");
36697+		ret = info->uv_vref;
36698+		return ret;
36699+	}
36700+
36701 	ret = clk_prepare_enable(info->pclk);
36702 	if (ret < 0) {
36703 		dev_err(&pdev->dev, "failed to enable pclk\n");
36704@@ -430,6 +662,30 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
36705 	if (ret)
36706 		return ret;
36707 
36708+#ifdef CONFIG_ROCKCHIP_SARADC_TEST_CHN
36709+	info->wq = create_singlethread_workqueue("adc_wq");
36710+	INIT_DELAYED_WORK(&info->work, rockchip_saradc_test_work);
36711+	spin_lock_init(&info->lock);
36712+	ret = sysfs_create_group(&pdev->dev.kobj, &rockchip_saradc_attr_group);
36713+	if (ret)
36714+		return ret;
36715+
36716+	ret = devm_add_action_or_reset(&pdev->dev,
36717+				       rockchip_saradc_remove_sysgroup, pdev);
36718+	if (ret) {
36719+		dev_err(&pdev->dev, "failed to register devm action, %d\n",
36720+			ret);
36721+		return ret;
36722+	}
36723+
36724+	ret = devm_add_action_or_reset(&pdev->dev,
36725+				       rockchip_saradc_destroy_wq, info);
36726+	if (ret) {
36727+		dev_err(&pdev->dev, "failed to register destroy_wq, %d\n",
36728+			ret);
36729+		return ret;
36730+	}
36731+#endif
36732 	return devm_iio_device_register(&pdev->dev, indio_dev);
36733 }
36734 
36735@@ -439,10 +695,16 @@ static int rockchip_saradc_suspend(struct device *dev)
36736 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
36737 	struct rockchip_saradc *info = iio_priv(indio_dev);
36738 
36739+	/* Avoid reading saradc when suspending */
36740+	mutex_lock(&indio_dev->mlock);
36741+
36742 	clk_disable_unprepare(info->clk);
36743 	clk_disable_unprepare(info->pclk);
36744 	regulator_disable(info->vref);
36745 
36746+	info->suspended = true;
36747+	mutex_unlock(&indio_dev->mlock);
36748+
36749 	return 0;
36750 }
36751 
36752@@ -464,6 +726,8 @@ static int rockchip_saradc_resume(struct device *dev)
36753 	if (ret)
36754 		clk_disable_unprepare(info->pclk);
36755 
36756+	info->suspended = false;
36757+
36758 	return ret;
36759 }
36760 #endif
36761diff --git a/drivers/input/Makefile b/drivers/input/Makefile
36762index e35650930..175acb2f6 100644
36763--- a/drivers/input/Makefile
36764+++ b/drivers/input/Makefile
36765@@ -24,6 +24,7 @@ obj-$(CONFIG_INPUT_MOUSE)	+= mouse/
36766 obj-$(CONFIG_INPUT_JOYSTICK)	+= joystick/
36767 obj-$(CONFIG_INPUT_TABLET)	+= tablet/
36768 obj-$(CONFIG_INPUT_TOUCHSCREEN)	+= touchscreen/
36769+obj-$(CONFIG_SENSOR_DEVICE)	+= sensors/
36770 obj-$(CONFIG_INPUT_MISC)	+= misc/
36771 
36772 obj-$(CONFIG_INPUT_APMPOWER)	+= apm-power.o
36773diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
36774index 362e8a019..1f3e8132a 100644
36775--- a/drivers/input/misc/Kconfig
36776+++ b/drivers/input/misc/Kconfig
36777@@ -578,7 +578,7 @@ config INPUT_PWM_VIBRA
36778 
36779 config INPUT_RK805_PWRKEY
36780 	tristate "Rockchip RK805 PMIC power key support"
36781-	depends on MFD_RK808
36782+	depends on MFD_RK806 || MFD_RK808
36783 	help
36784 	  Select this option to enable power key driver for RK805.
36785 
36786diff --git a/drivers/input/misc/rk805-pwrkey.c b/drivers/input/misc/rk805-pwrkey.c
36787index 76873aa00..769821763 100644
36788--- a/drivers/input/misc/rk805-pwrkey.c
36789+++ b/drivers/input/misc/rk805-pwrkey.c
36790@@ -13,6 +13,7 @@
36791 #include <linux/interrupt.h>
36792 #include <linux/kernel.h>
36793 #include <linux/module.h>
36794+#include <linux/of.h>
36795 #include <linux/platform_device.h>
36796 
36797 static irqreturn_t pwrkey_fall_irq(int irq, void *_pwr)
36798@@ -39,8 +40,15 @@ static int rk805_pwrkey_probe(struct platform_device *pdev)
36799 {
36800 	struct input_dev *pwr;
36801 	int fall_irq, rise_irq;
36802+	struct device_node *np;
36803 	int err;
36804 
36805+	np = of_get_child_by_name(pdev->dev.parent->of_node, "pwrkey");
36806+	if (np && !of_device_is_available(np)) {
36807+		dev_info(&pdev->dev, "device is disabled\n");
36808+		return -EINVAL;
36809+	}
36810+
36811 	pwr = devm_input_allocate_device(&pdev->dev);
36812 	if (!pwr) {
36813 		dev_err(&pdev->dev, "Can't allocate power button\n");
36814diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
36815index 04878caf6..fe1256b01 100644
36816--- a/drivers/iommu/Kconfig
36817+++ b/drivers/iommu/Kconfig
36818@@ -160,7 +160,8 @@ config OMAP_IOMMU_DEBUG
36819 	  Say N unless you know you need this.
36820 
36821 config ROCKCHIP_IOMMU
36822-	bool "Rockchip IOMMU Support"
36823+	tristate "Rockchip IOMMU Support"
36824+	depends on ARM || ARM64
36825 	depends on ARCH_ROCKCHIP || COMPILE_TEST
36826 	select IOMMU_API
36827 	select ARM_DMA_USE_IOMMU
36828diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
36829index d1539b739..dc5d98b1d 100644
36830--- a/drivers/iommu/dma-iommu.c
36831+++ b/drivers/iommu/dma-iommu.c
36832@@ -372,6 +372,52 @@ static int iommu_dma_deferred_attach(struct device *dev,
36833 	return 0;
36834 }
36835 
36836+/*
36837+ * Should be called prior to using dma-apis
36838+ */
36839+int iommu_dma_reserve_iova(struct device *dev, dma_addr_t base,
36840+			   u64 size)
36841+{
36842+	struct iommu_domain *domain;
36843+	struct iommu_dma_cookie *cookie;
36844+	struct iova_domain *iovad;
36845+	unsigned long pfn_lo, pfn_hi;
36846+
36847+	domain = iommu_get_domain_for_dev(dev);
36848+	if (!domain || !domain->iova_cookie)
36849+		return -EINVAL;
36850+
36851+	cookie = domain->iova_cookie;
36852+	iovad = &cookie->iovad;
36853+
36854+	/* iova will be freed automatically by put_iova_domain() */
36855+	pfn_lo = iova_pfn(iovad, base);
36856+	pfn_hi = iova_pfn(iovad, base + size - 1);
36857+	if (!reserve_iova(iovad, pfn_lo, pfn_hi))
36858+		return -EINVAL;
36859+
36860+	return 0;
36861+}
36862+EXPORT_SYMBOL(iommu_dma_reserve_iova);
36863+
36864+/*
36865+ * Should be called prior to using dma-apis.
36866+ */
36867+int iommu_dma_enable_best_fit_algo(struct device *dev)
36868+{
36869+	struct iommu_domain *domain;
36870+	struct iova_domain *iovad;
36871+
36872+	domain = iommu_get_domain_for_dev(dev);
36873+	if (!domain || !domain->iova_cookie)
36874+		return -EINVAL;
36875+
36876+	iovad = &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad;
36877+	iovad->best_fit = true;
36878+	return 0;
36879+}
36880+EXPORT_SYMBOL(iommu_dma_enable_best_fit_algo);
36881+
36882 /**
36883  * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
36884  *                    page flags.
36885@@ -388,6 +434,10 @@ static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
36886 
36887 	if (attrs & DMA_ATTR_PRIVILEGED)
36888 		prot |= IOMMU_PRIV;
36889+	if (attrs & DMA_ATTR_SYS_CACHE_ONLY)
36890+		prot |= IOMMU_SYS_CACHE;
36891+	if (attrs & DMA_ATTR_SYS_CACHE_ONLY_NWA)
36892+		prot |= IOMMU_SYS_CACHE_NWA;
36893 
36894 	switch (dir) {
36895 	case DMA_BIDIRECTIONAL:
36896diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
36897index 9d65557df..0853dd704 100644
36898--- a/drivers/iommu/iommu.c
36899+++ b/drivers/iommu/iommu.c
36900@@ -8,6 +8,7 @@
36901 
36902 #include <linux/device.h>
36903 #include <linux/kernel.h>
36904+#include <linux/bits.h>
36905 #include <linux/bug.h>
36906 #include <linux/types.h>
36907 #include <linux/init.h>
36908@@ -2203,7 +2204,8 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
36909 		return;
36910 
36911 	mutex_lock(&group->mutex);
36912-	if (iommu_group_device_count(group) != 1) {
36913+	/* Don't break detach if iommu shared by more than one master */
36914+	if (iommu_group_device_count(group) < 1) {
36915 		WARN_ON(1);
36916 		goto out_unlock;
36917 	}
36918@@ -2337,38 +2339,85 @@ phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
36919 }
36920 EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
36921 
36922-static size_t iommu_pgsize(struct iommu_domain *domain,
36923-			   unsigned long addr_merge, size_t size)
36924+static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
36925+			   phys_addr_t paddr, size_t size, size_t *count)
36926 {
36927-	unsigned int pgsize_idx;
36928-	size_t pgsize;
36929+	unsigned int pgsize_idx, pgsize_idx_next;
36930+	unsigned long pgsizes;
36931+	size_t offset, pgsize, pgsize_next;
36932+	unsigned long addr_merge = paddr | iova;
36933 
36934-	/* Max page size that still fits into 'size' */
36935-	pgsize_idx = __fls(size);
36936+	/* Page sizes supported by the hardware and small enough for @size */
36937+	pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0);
36938 
36939-	/* need to consider alignment requirements ? */
36940-	if (likely(addr_merge)) {
36941-		/* Max page size allowed by address */
36942-		unsigned int align_pgsize_idx = __ffs(addr_merge);
36943-		pgsize_idx = min(pgsize_idx, align_pgsize_idx);
36944-	}
36945+	/* Constrain the page sizes further based on the maximum alignment */
36946+	if (likely(addr_merge))
36947+		pgsizes &= GENMASK(__ffs(addr_merge), 0);
36948+
36949+	/* Make sure we have at least one suitable page size */
36950+	BUG_ON(!pgsizes);
36951+
36952+	/* Pick the biggest page size remaining */
36953+	pgsize_idx = __fls(pgsizes);
36954+	pgsize = BIT(pgsize_idx);
36955+	if (!count)
36956+		return pgsize;
36957 
36958-	/* build a mask of acceptable page sizes */
36959-	pgsize = (1UL << (pgsize_idx + 1)) - 1;
36960 
36961-	/* throw away page sizes not supported by the hardware */
36962-	pgsize &= domain->pgsize_bitmap;
36963+	/* Find the next biggest support page size, if it exists */
36964+	pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
36965+	if (!pgsizes)
36966+		goto out_set_count;
36967 
36968-	/* make sure we're still sane */
36969-	BUG_ON(!pgsize);
36970+	pgsize_idx_next = __ffs(pgsizes);
36971+	pgsize_next = BIT(pgsize_idx_next);
36972 
36973-	/* pick the biggest page */
36974-	pgsize_idx = __fls(pgsize);
36975-	pgsize = 1UL << pgsize_idx;
36976+	/*
36977+	 * There's no point trying a bigger page size unless the virtual
36978+	 * and physical addresses are similarly offset within the larger page.
36979+	 */
36980+	if ((iova ^ paddr) & (pgsize_next - 1))
36981+		goto out_set_count;
36982+
36983+	/* Calculate the offset to the next page size alignment boundary */
36984+	offset = pgsize_next - (addr_merge & (pgsize_next - 1));
36985+
36986+	/*
36987+	 * If size is big enough to accommodate the larger page, reduce
36988+	 * the number of smaller pages.
36989+	 */
36990+	if (offset + pgsize_next <= size)
36991+		size = offset;
36992 
36993+out_set_count:
36994+	*count = size >> pgsize_idx;
36995 	return pgsize;
36996 }
36997 
36998+static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
36999+			     phys_addr_t paddr, size_t size, int prot,
37000+			     gfp_t gfp, size_t *mapped)
37001+{
37002+	const struct iommu_ops *ops = domain->ops;
37003+	size_t pgsize, count;
37004+	int ret;
37005+
37006+	pgsize = iommu_pgsize(domain, iova, paddr, size, &count);
37007+
37008+	pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n",
37009+			 iova, &paddr, pgsize, count);
37010+
37011+	if (ops->map_pages) {
37012+		ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot,
37013+				     gfp, mapped);
37014+	} else {
37015+		ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
37016+		*mapped = ret ? 0 : pgsize;
37017+	}
37018+
37019+	return ret;
37020+}
37021+
37022 static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
37023 		       phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
37024 {
37025@@ -2379,7 +2428,7 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
37026 	phys_addr_t orig_paddr = paddr;
37027 	int ret = 0;
37028 
37029-	if (unlikely(ops->map == NULL ||
37030+	if (unlikely(!(ops->map || ops->map_pages) ||
37031 		     domain->pgsize_bitmap == 0UL))
37032 		return -ENODEV;
37033 
37034@@ -2403,18 +2452,21 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
37035 	pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
37036 
37037 	while (size) {
37038-		size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
37039+		size_t mapped = 0;
37040 
37041-		pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
37042-			 iova, &paddr, pgsize);
37043-		ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
37044+		ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp,
37045+					&mapped);
37046+		/*
37047+		 * Some pages may have been mapped, even if an error occurred,
37048+		 * so we should account for those so they can be unmapped.
37049+		 */
37050+		size -= mapped;
37051 
37052 		if (ret)
37053 			break;
37054 
37055-		iova += pgsize;
37056-		paddr += pgsize;
37057-		size -= pgsize;
37058+		iova += mapped;
37059+		paddr += mapped;
37060 	}
37061 
37062 	/* unroll mapping in case something went wrong */
37063@@ -2434,7 +2486,7 @@ static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
37064 
37065 	ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
37066 	if (ret == 0 && ops->iotlb_sync_map)
37067-		ops->iotlb_sync_map(domain);
37068+		ops->iotlb_sync_map(domain, iova, size);
37069 
37070 	return ret;
37071 }
37072@@ -2454,6 +2506,19 @@ int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
37073 }
37074 EXPORT_SYMBOL_GPL(iommu_map_atomic);
37075 
37076+static size_t __iommu_unmap_pages(struct iommu_domain *domain,
37077+				  unsigned long iova, size_t size,
37078+				  struct iommu_iotlb_gather *iotlb_gather)
37079+{
37080+	const struct iommu_ops *ops = domain->ops;
37081+	size_t pgsize, count;
37082+
37083+	pgsize = iommu_pgsize(domain, iova, iova, size, &count);
37084+	return ops->unmap_pages ?
37085+	       ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) :
37086+	       ops->unmap(domain, iova, pgsize, iotlb_gather);
37087+}
37088+
37089 static size_t __iommu_unmap(struct iommu_domain *domain,
37090 			    unsigned long iova, size_t size,
37091 			    struct iommu_iotlb_gather *iotlb_gather)
37092@@ -2463,7 +2528,7 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
37093 	unsigned long orig_iova = iova;
37094 	unsigned int min_pagesz;
37095 
37096-	if (unlikely(ops->unmap == NULL ||
37097+	if (unlikely(!(ops->unmap || ops->unmap_pages) ||
37098 		     domain->pgsize_bitmap == 0UL))
37099 		return 0;
37100 
37101@@ -2491,9 +2556,9 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
37102 	 * or we hit an area that isn't mapped.
37103 	 */
37104 	while (unmapped < size) {
37105-		size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
37106-
37107-		unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather);
37108+		unmapped_page = __iommu_unmap_pages(domain, iova,
37109+						    size - unmapped,
37110+						    iotlb_gather);
37111 		if (!unmapped_page)
37112 			break;
37113 
37114@@ -2540,6 +2605,18 @@ static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
37115 	unsigned int i = 0;
37116 	int ret;
37117 
37118+	if (ops->map_sg) {
37119+		ret = ops->map_sg(domain, iova, sg, nents, prot, gfp, &mapped);
37120+
37121+		if (ops->iotlb_sync_map)
37122+			ops->iotlb_sync_map(domain, iova, mapped);
37123+
37124+		if (ret)
37125+			goto out_err;
37126+
37127+		return mapped;
37128+	}
37129+
37130 	while (i <= nents) {
37131 		phys_addr_t s_phys = sg_phys(sg);
37132 
37133@@ -2566,7 +2643,7 @@ static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
37134 	}
37135 
37136 	if (ops->iotlb_sync_map)
37137-		ops->iotlb_sync_map(domain);
37138+		ops->iotlb_sync_map(domain, iova, mapped);
37139 	return mapped;
37140 
37141 out_err:
37142diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
37143index 4600e97ac..cf32b592a 100644
37144--- a/drivers/iommu/iova.c
37145+++ b/drivers/iommu/iova.c
37146@@ -50,6 +50,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
37147 	iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
37148 	rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
37149 	rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
37150+	iovad->best_fit = false;
37151 	init_iova_rcaches(iovad);
37152 }
37153 EXPORT_SYMBOL_GPL(init_iova_domain);
37154@@ -64,7 +65,8 @@ static void free_iova_flush_queue(struct iova_domain *iovad)
37155 	if (!has_iova_flush_queue(iovad))
37156 		return;
37157 
37158-	del_timer_sync(&iovad->fq_timer);
37159+	if (timer_pending(&iovad->fq_timer))
37160+		del_timer(&iovad->fq_timer);
37161 
37162 	fq_destroy_all_entries(iovad);
37163 
37164@@ -177,6 +179,24 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova,
37165 	rb_insert_color(&iova->node, root);
37166 }
37167 
37168+#ifdef CONFIG_IOMMU_LIMIT_IOVA_ALIGNMENT
37169+static unsigned long limit_align_shift(struct iova_domain *iovad,
37170+				       unsigned long shift)
37171+{
37172+	unsigned long max_align_shift;
37173+
37174+	max_align_shift = CONFIG_IOMMU_IOVA_ALIGNMENT + PAGE_SHIFT
37175+		- iova_shift(iovad);
37176+	return min_t(unsigned long, max_align_shift, shift);
37177+}
37178+#else
37179+static unsigned long limit_align_shift(struct iova_domain *iovad,
37180+				       unsigned long shift)
37181+{
37182+	return shift;
37183+}
37184+#endif
37185+
37186 static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
37187 		unsigned long size, unsigned long limit_pfn,
37188 			struct iova *new, bool size_aligned)
37189@@ -184,11 +204,12 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
37190 	struct rb_node *curr, *prev;
37191 	struct iova *curr_iova;
37192 	unsigned long flags;
37193-	unsigned long new_pfn;
37194+	unsigned long new_pfn, low_pfn_new;
37195 	unsigned long align_mask = ~0UL;
37196+	unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;
37197 
37198 	if (size_aligned)
37199-		align_mask <<= fls_long(size - 1);
37200+		align_mask <<= limit_align_shift(iovad, fls_long(size - 1));
37201 
37202 	/* Walk the tree backwards */
37203 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
37204@@ -198,15 +219,25 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
37205 
37206 	curr = __get_cached_rbnode(iovad, limit_pfn);
37207 	curr_iova = rb_entry(curr, struct iova, node);
37208+	low_pfn_new = curr_iova->pfn_hi + 1;
37209+
37210+retry:
37211 	do {
37212-		limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
37213-		new_pfn = (limit_pfn - size) & align_mask;
37214+		high_pfn = min(high_pfn, curr_iova->pfn_lo);
37215+		new_pfn = (high_pfn - size) & align_mask;
37216 		prev = curr;
37217 		curr = rb_prev(curr);
37218 		curr_iova = rb_entry(curr, struct iova, node);
37219-	} while (curr && new_pfn <= curr_iova->pfn_hi);
37220-
37221-	if (limit_pfn < size || new_pfn < iovad->start_pfn) {
37222+	} while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn);
37223+
37224+	if (high_pfn < size || new_pfn < low_pfn) {
37225+		if (low_pfn == iovad->start_pfn && low_pfn_new < limit_pfn) {
37226+			high_pfn = limit_pfn;
37227+			low_pfn = low_pfn_new;
37228+			curr = &iovad->anchor.node;
37229+			curr_iova = rb_entry(curr, struct iova, node);
37230+			goto retry;
37231+		}
37232 		iovad->max32_alloc_size = size;
37233 		goto iova32_full;
37234 	}
37235@@ -227,6 +258,70 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
37236 	return -ENOMEM;
37237 }
37238 
37239+static int __alloc_and_insert_iova_best_fit(struct iova_domain *iovad,
37240+					    unsigned long size,
37241+					    unsigned long limit_pfn,
37242+					    struct iova *new, bool size_aligned)
37243+{
37244+	struct rb_node *curr, *prev;
37245+	struct iova *curr_iova, *prev_iova;
37246+	unsigned long flags;
37247+	unsigned long align_mask = ~0UL;
37248+	struct rb_node *candidate_rb_parent;
37249+	unsigned long new_pfn, candidate_pfn = ~0UL;
37250+	unsigned long gap, candidate_gap = ~0UL;
37251+
37252+	if (size_aligned)
37253+		align_mask <<= limit_align_shift(iovad, fls_long(size - 1));
37254+
37255+	/* Walk the tree backwards */
37256+	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
37257+	curr = &iovad->anchor.node;
37258+	prev = rb_prev(curr);
37259+	for (; prev; curr = prev, prev = rb_prev(curr)) {
37260+		curr_iova = rb_entry(curr, struct iova, node);
37261+		prev_iova = rb_entry(prev, struct iova, node);
37262+
37263+		limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
37264+		new_pfn = (limit_pfn - size) & align_mask;
37265+		gap = curr_iova->pfn_lo - prev_iova->pfn_hi - 1;
37266+		if ((limit_pfn >= size) && (new_pfn > prev_iova->pfn_hi)
37267+				&& (gap < candidate_gap)) {
37268+			candidate_gap = gap;
37269+			candidate_pfn = new_pfn;
37270+			candidate_rb_parent = curr;
37271+			if (gap == size)
37272+				goto insert;
37273+		}
37274+	}
37275+
37276+	curr_iova = rb_entry(curr, struct iova, node);
37277+	limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
37278+	new_pfn = (limit_pfn - size) & align_mask;
37279+	gap = curr_iova->pfn_lo - iovad->start_pfn;
37280+	if (limit_pfn >= size && new_pfn >= iovad->start_pfn &&
37281+			gap < candidate_gap) {
37282+		candidate_gap = gap;
37283+		candidate_pfn = new_pfn;
37284+		candidate_rb_parent = curr;
37285+	}
37286+
37287+insert:
37288+	if (candidate_pfn == ~0UL) {
37289+		spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
37290+		return -ENOMEM;
37291+	}
37292+
37293+	/* pfn_lo will point to size aligned address if size_aligned is set */
37294+	new->pfn_lo = candidate_pfn;
37295+	new->pfn_hi = new->pfn_lo + size - 1;
37296+
37297+	/* If we have 'prev', it's a valid place to start the insertion. */
37298+	iova_insert_rbtree(&iovad->rbroot, new, candidate_rb_parent);
37299+	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
37300+	return 0;
37301+}
37302+
37303 static struct kmem_cache *iova_cache;
37304 static unsigned int iova_cache_users;
37305 static DEFINE_MUTEX(iova_cache_mutex);
37306@@ -302,8 +397,13 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
37307 	if (!new_iova)
37308 		return NULL;
37309 
37310-	ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
37311-			new_iova, size_aligned);
37312+	if (iovad->best_fit) {
37313+		ret = __alloc_and_insert_iova_best_fit(iovad, size,
37314+				limit_pfn + 1, new_iova, size_aligned);
37315+	} else {
37316+		ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
37317+				new_iova, size_aligned);
37318+	}
37319 
37320 	if (ret) {
37321 		free_iova_mem(new_iova);
37322@@ -431,6 +531,7 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
37323 		flush_rcache = false;
37324 		for_each_online_cpu(cpu)
37325 			free_cpu_cached_iovas(cpu, iovad);
37326+		free_global_cached_iovas(iovad);
37327 		goto retry;
37328 	}
37329 
37330@@ -1046,5 +1147,27 @@ void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
37331 	}
37332 }
37333 
37334+/*
37335+ * free all the IOVA ranges of global cache
37336+ */
37337+void free_global_cached_iovas(struct iova_domain *iovad)
37338+{
37339+	struct iova_rcache *rcache;
37340+	unsigned long flags;
37341+	int i, j;
37342+
37343+	for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
37344+		rcache = &iovad->rcaches[i];
37345+		spin_lock_irqsave(&rcache->lock, flags);
37346+		for (j = 0; j < rcache->depot_size; ++j) {
37347+			iova_magazine_free_pfns(rcache->depot[j], iovad);
37348+			iova_magazine_free(rcache->depot[j]);
37349+			rcache->depot[j] = NULL;
37350+		}
37351+		rcache->depot_size = 0;
37352+		spin_unlock_irqrestore(&rcache->lock, flags);
37353+	}
37354+}
37355+
37356 MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
37357 MODULE_LICENSE("GPL");
37358diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
37359index e5d86b717..b359fcf2c 100644
37360--- a/drivers/iommu/rockchip-iommu.c
37361+++ b/drivers/iommu/rockchip-iommu.c
37362@@ -19,6 +19,7 @@
37363 #include <linux/iopoll.h>
37364 #include <linux/list.h>
37365 #include <linux/mm.h>
37366+#include <linux/module.h>
37367 #include <linux/init.h>
37368 #include <linux/of.h>
37369 #include <linux/of_iommu.h>
37370@@ -27,6 +28,7 @@
37371 #include <linux/pm_runtime.h>
37372 #include <linux/slab.h>
37373 #include <linux/spinlock.h>
37374+#include <soc/rockchip/rockchip_iommu.h>
37375 
37376 /** MMU register offsets */
37377 #define RK_MMU_DTE_ADDR		0x00	/* Directory table address */
37378@@ -75,25 +77,53 @@
37379 #define SPAGE_ORDER 12
37380 #define SPAGE_SIZE (1 << SPAGE_ORDER)
37381 
37382+#define DISABLE_FETCH_DTE_TIME_LIMIT BIT(31)
37383+
37384+#define CMD_RETRY_COUNT 10
37385+
37386  /*
37387   * Support mapping any size that fits in one page table:
37388   *   4 KiB to 4 MiB
37389   */
37390 #define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
37391 
37392+#define DT_LO_MASK 0xfffff000
37393+#define DT_HI_MASK GENMASK_ULL(39, 32)
37394+#define DT_SHIFT   28
37395+
37396+#define DTE_BASE_HI_MASK GENMASK(11, 4)
37397+
37398+#define PAGE_DESC_LO_MASK   0xfffff000
37399+#define PAGE_DESC_HI1_LOWER 32
37400+#define PAGE_DESC_HI1_UPPER 35
37401+#define PAGE_DESC_HI2_LOWER 36
37402+#define PAGE_DESC_HI2_UPPER 39
37403+#define PAGE_DESC_HI_MASK1  GENMASK_ULL(PAGE_DESC_HI1_UPPER, PAGE_DESC_HI1_LOWER)
37404+#define PAGE_DESC_HI_MASK2  GENMASK_ULL(PAGE_DESC_HI2_UPPER, PAGE_DESC_HI2_LOWER)
37405+
37406+#define DTE_HI1_LOWER 8
37407+#define DTE_HI1_UPPER 11
37408+#define DTE_HI2_LOWER 4
37409+#define DTE_HI2_UPPER 7
37410+#define DTE_HI_MASK1  GENMASK(DTE_HI1_UPPER, DTE_HI1_LOWER)
37411+#define DTE_HI_MASK2  GENMASK(DTE_HI2_UPPER, DTE_HI2_LOWER)
37412+
37413+#define PAGE_DESC_HI_SHIFT1 (PAGE_DESC_HI1_LOWER - DTE_HI1_LOWER)
37414+#define PAGE_DESC_HI_SHIFT2 (PAGE_DESC_HI2_LOWER - DTE_HI2_LOWER)
37415+
37416 struct rk_iommu_domain {
37417 	struct list_head iommus;
37418 	u32 *dt; /* page directory table */
37419 	dma_addr_t dt_dma;
37420 	spinlock_t iommus_lock; /* lock for iommus list */
37421 	spinlock_t dt_lock; /* lock for modifying page directory table */
37422+	bool shootdown_entire;
37423 
37424 	struct iommu_domain domain;
37425 };
37426 
37427-/* list of clocks required by IOMMU */
37428-static const char * const rk_iommu_clocks[] = {
37429-	"aclk", "iface",
37430+struct rockchip_iommu_data {
37431+	u32 version;
37432 };
37433 
37434 struct rk_iommu {
37435@@ -104,15 +134,21 @@ struct rk_iommu {
37436 	struct clk_bulk_data *clocks;
37437 	int num_clocks;
37438 	bool reset_disabled;
37439+	bool skip_read; /* rk3126/rk3128 can't read vop iommu registers */
37440+	bool dlr_disable; /* avoid access iommu when runtime ops called */
37441+	bool cmd_retry;
37442 	struct iommu_device iommu;
37443 	struct list_head node; /* entry in rk_iommu_domain.iommus */
37444 	struct iommu_domain *domain; /* domain to which iommu is attached */
37445 	struct iommu_group *group;
37446+	u32 version;
37447+	bool shootdown_entire;
37448 };
37449 
37450 struct rk_iommudata {
37451 	struct device_link *link; /* runtime PM link from IOMMU to master */
37452 	struct rk_iommu *iommu;
37453+	bool defer_attach;
37454 };
37455 
37456 static struct device *dma_dev;
37457@@ -174,11 +210,32 @@ static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
37458 #define RK_DTE_PT_ADDRESS_MASK    0xfffff000
37459 #define RK_DTE_PT_VALID           BIT(0)
37460 
37461+/*
37462+ * In v2:
37463+ * 31:12 - PT address bit 31:0
37464+ * 11: 8 - PT address bit 35:32
37465+ *  7: 4 - PT address bit 39:36
37466+ *  3: 1 - Reserved
37467+ *     0 - 1 if PT @ PT address is valid
37468+ */
37469+#define RK_DTE_PT_ADDRESS_MASK_V2 0xfffffff0
37470+
37471 static inline phys_addr_t rk_dte_pt_address(u32 dte)
37472 {
37473 	return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
37474 }
37475 
37476+static inline phys_addr_t rk_dte_pt_address_v2(u32 dte)
37477+{
37478+	u64 dte_v2 = dte;
37479+
37480+	dte_v2 = ((dte_v2 & DTE_HI_MASK2) << PAGE_DESC_HI_SHIFT2) |
37481+		 ((dte_v2 & DTE_HI_MASK1) << PAGE_DESC_HI_SHIFT1) |
37482+		 (dte_v2 & PAGE_DESC_LO_MASK);
37483+
37484+	return (phys_addr_t)dte_v2;
37485+}
37486+
37487 static inline bool rk_dte_is_pt_valid(u32 dte)
37488 {
37489 	return dte & RK_DTE_PT_VALID;
37490@@ -189,6 +246,15 @@ static inline u32 rk_mk_dte(dma_addr_t pt_dma)
37491 	return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
37492 }
37493 
37494+static inline u32 rk_mk_dte_v2(dma_addr_t pt_dma)
37495+{
37496+	pt_dma = (pt_dma & PAGE_DESC_LO_MASK) |
37497+		 ((pt_dma & PAGE_DESC_HI_MASK1) >> PAGE_DESC_HI_SHIFT1) |
37498+		 (pt_dma & PAGE_DESC_HI_MASK2) >> PAGE_DESC_HI_SHIFT2;
37499+
37500+	return (pt_dma & RK_DTE_PT_ADDRESS_MASK_V2) | RK_DTE_PT_VALID;
37501+}
37502+
37503 /*
37504  * Each PTE has a Page address, some flags and a valid bit:
37505  * +---------------------+---+-------+-+
37506@@ -215,11 +281,37 @@ static inline u32 rk_mk_dte(dma_addr_t pt_dma)
37507 #define RK_PTE_PAGE_READABLE      BIT(1)
37508 #define RK_PTE_PAGE_VALID         BIT(0)
37509 
37510+/*
37511+ * In v2:
37512+ * 31:12 - Page address bit 31:0
37513+ *  11:9 - Page address bit 34:32
37514+ *   8:4 - Page address bit 39:35
37515+ *     3 - Security
37516+ *     2 - Writable
37517+ *     1 - Readable
37518+ *     0 - 1 if Page @ Page address is valid
37519+ */
37520+#define RK_PTE_PAGE_ADDRESS_MASK_V2  0xfffffff0
37521+#define RK_PTE_PAGE_FLAGS_MASK_V2    0x0000000e
37522+#define RK_PTE_PAGE_READABLE_V2      BIT(1)
37523+#define RK_PTE_PAGE_WRITABLE_V2      BIT(2)
37524+
37525 static inline phys_addr_t rk_pte_page_address(u32 pte)
37526 {
37527 	return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
37528 }
37529 
37530+static inline phys_addr_t rk_pte_page_address_v2(u32 pte)
37531+{
37532+	u64 pte_v2 = pte;
37533+
37534+	pte_v2 = ((pte_v2 & DTE_HI_MASK2) << PAGE_DESC_HI_SHIFT2) |
37535+		 ((pte_v2 & DTE_HI_MASK1) << PAGE_DESC_HI_SHIFT1) |
37536+		 (pte_v2 & PAGE_DESC_LO_MASK);
37537+
37538+	return (phys_addr_t)pte_v2;
37539+}
37540+
37541 static inline bool rk_pte_is_page_valid(u32 pte)
37542 {
37543 	return pte & RK_PTE_PAGE_VALID;
37544@@ -235,6 +327,20 @@ static u32 rk_mk_pte(phys_addr_t page, int prot)
37545 	return page | flags | RK_PTE_PAGE_VALID;
37546 }
37547 
37548+static u32 rk_mk_pte_v2(phys_addr_t page, int prot)
37549+{
37550+	u32 flags = 0;
37551+
37552+	flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE_V2 : 0;
37553+	flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE_V2 : 0;
37554+	page = (page & PAGE_DESC_LO_MASK) |
37555+	       ((page & PAGE_DESC_HI_MASK1) >> PAGE_DESC_HI_SHIFT1) |
37556+	       (page & PAGE_DESC_HI_MASK2) >> PAGE_DESC_HI_SHIFT2;
37557+	page &= RK_PTE_PAGE_ADDRESS_MASK_V2;
37558+
37559+	return page | flags | RK_PTE_PAGE_VALID;
37560+}
37561+
37562 static u32 rk_mk_pte_invalid(u32 pte)
37563 {
37564 	return pte & ~RK_PTE_PAGE_VALID;
37565@@ -350,6 +456,10 @@ static int rk_iommu_enable_stall(struct rk_iommu *iommu)
37566 {
37567 	int ret, i;
37568 	bool val;
37569+	int retry_count = 0;
37570+
37571+	if (iommu->skip_read)
37572+		goto read_wa;
37573 
37574 	if (rk_iommu_is_stall_active(iommu))
37575 		return 0;
37576@@ -358,15 +468,22 @@ static int rk_iommu_enable_stall(struct rk_iommu *iommu)
37577 	if (!rk_iommu_is_paging_enabled(iommu))
37578 		return 0;
37579 
37580+read_wa:
37581 	rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
37582+	if (iommu->skip_read)
37583+		return 0;
37584 
37585 	ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
37586 				 val, RK_MMU_POLL_PERIOD_US,
37587 				 RK_MMU_POLL_TIMEOUT_US);
37588-	if (ret)
37589+	if (ret) {
37590 		for (i = 0; i < iommu->num_mmu; i++)
37591-			dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
37592+			dev_err(iommu->dev, "Enable stall request timed out, retry_count = %d, status: %#08x\n",
37593+				retry_count,
37594 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
37595+		if (iommu->cmd_retry && (retry_count++ < CMD_RETRY_COUNT))
37596+			goto read_wa;
37597+	}
37598 
37599 	return ret;
37600 }
37601@@ -375,19 +492,30 @@ static int rk_iommu_disable_stall(struct rk_iommu *iommu)
37602 {
37603 	int ret, i;
37604 	bool val;
37605+	int retry_count = 0;
37606+
37607+	if (iommu->skip_read)
37608+		goto read_wa;
37609 
37610 	if (!rk_iommu_is_stall_active(iommu))
37611 		return 0;
37612 
37613+read_wa:
37614 	rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
37615+	if (iommu->skip_read)
37616+		return 0;
37617 
37618 	ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
37619 				 !val, RK_MMU_POLL_PERIOD_US,
37620 				 RK_MMU_POLL_TIMEOUT_US);
37621-	if (ret)
37622+	if (ret) {
37623 		for (i = 0; i < iommu->num_mmu; i++)
37624-			dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
37625+			dev_err(iommu->dev, "Disable stall request timed out, retry_count = %d, status: %#08x\n",
37626+				retry_count,
37627 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
37628+		if (iommu->cmd_retry && (retry_count++ < CMD_RETRY_COUNT))
37629+			goto read_wa;
37630+	}
37631 
37632 	return ret;
37633 }
37634@@ -396,19 +524,30 @@ static int rk_iommu_enable_paging(struct rk_iommu *iommu)
37635 {
37636 	int ret, i;
37637 	bool val;
37638+	int retry_count = 0;
37639+
37640+	if (iommu->skip_read)
37641+		goto read_wa;
37642 
37643 	if (rk_iommu_is_paging_enabled(iommu))
37644 		return 0;
37645 
37646+read_wa:
37647 	rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
37648+	if (iommu->skip_read)
37649+		return 0;
37650 
37651 	ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
37652 				 val, RK_MMU_POLL_PERIOD_US,
37653 				 RK_MMU_POLL_TIMEOUT_US);
37654-	if (ret)
37655+	if (ret) {
37656 		for (i = 0; i < iommu->num_mmu; i++)
37657-			dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
37658+			dev_err(iommu->dev, "Enable paging request timed out, retry_count = %d, status: %#08x\n",
37659+				retry_count,
37660 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
37661+		if (iommu->cmd_retry && (retry_count++ < CMD_RETRY_COUNT))
37662+			goto read_wa;
37663+	}
37664 
37665 	return ret;
37666 }
37667@@ -417,19 +556,30 @@ static int rk_iommu_disable_paging(struct rk_iommu *iommu)
37668 {
37669 	int ret, i;
37670 	bool val;
37671+	int retry_count = 0;
37672+
37673+	if (iommu->skip_read)
37674+		goto read_wa;
37675 
37676 	if (!rk_iommu_is_paging_enabled(iommu))
37677 		return 0;
37678 
37679+read_wa:
37680 	rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
37681+	if (iommu->skip_read)
37682+		return 0;
37683 
37684 	ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
37685 				 !val, RK_MMU_POLL_PERIOD_US,
37686 				 RK_MMU_POLL_TIMEOUT_US);
37687-	if (ret)
37688+	if (ret) {
37689 		for (i = 0; i < iommu->num_mmu; i++)
37690-			dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
37691+			dev_err(iommu->dev, "Disable paging request timed out, retry_count = %d, status: %#08x\n",
37692+				retry_count,
37693 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
37694+		if (iommu->cmd_retry && (retry_count++ < CMD_RETRY_COUNT))
37695+			goto read_wa;
37696+	}
37697 
37698 	return ret;
37699 }
37700@@ -439,25 +589,40 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu)
37701 	int ret, i;
37702 	u32 dte_addr;
37703 	bool val;
37704+	u32 address_mask;
37705 
37706 	if (iommu->reset_disabled)
37707 		return 0;
37708 
37709+	if (iommu->skip_read)
37710+		goto read_wa;
37711+
37712 	/*
37713 	 * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
37714 	 * and verifying that upper 5 nybbles are read back.
37715 	 */
37716+
37717+	/*
37718+	 * In v2: upper 7 nybbles are read back.
37719+	 */
37720 	for (i = 0; i < iommu->num_mmu; i++) {
37721 		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
37722 
37723+		if (iommu->version >= 0x2)
37724+			address_mask = RK_DTE_PT_ADDRESS_MASK_V2;
37725+		else
37726+			address_mask = RK_DTE_PT_ADDRESS_MASK;
37727 		dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
37728-		if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
37729+		if (dte_addr != (DTE_ADDR_DUMMY & address_mask)) {
37730 			dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
37731 			return -EFAULT;
37732 		}
37733 	}
37734 
37735+read_wa:
37736 	rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
37737+	if (iommu->skip_read)
37738+		return 0;
37739 
37740 	ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val,
37741 				 val, RK_MMU_FORCE_RESET_TIMEOUT_US,
37742@@ -490,6 +655,10 @@ static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
37743 
37744 	mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
37745 	mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
37746+	if (iommu->version >= 0x2) {
37747+		mmu_dte_addr_phys = (mmu_dte_addr_phys & DT_LO_MASK) |
37748+				    ((mmu_dte_addr_phys & DTE_BASE_HI_MASK) << DT_SHIFT);
37749+	}
37750 
37751 	dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
37752 	dte_addr = phys_to_virt(dte_addr_phys);
37753@@ -498,14 +667,20 @@ static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
37754 	if (!rk_dte_is_pt_valid(dte))
37755 		goto print_it;
37756 
37757-	pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
37758+	if (iommu->version >= 0x2)
37759+		pte_addr_phys = rk_dte_pt_address_v2(dte) + (pte_index * 4);
37760+	else
37761+		pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
37762 	pte_addr = phys_to_virt(pte_addr_phys);
37763 	pte = *pte_addr;
37764 
37765 	if (!rk_pte_is_page_valid(pte))
37766 		goto print_it;
37767 
37768-	page_addr_phys = rk_pte_page_address(pte) + page_offset;
37769+	if (iommu->version >= 0x2)
37770+		page_addr_phys = rk_pte_page_address_v2(pte) + page_offset;
37771+	else
37772+		page_addr_phys = rk_pte_page_address(pte) + page_offset;
37773 	page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
37774 
37775 print_it:
37776@@ -522,6 +697,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
37777 	struct rk_iommu *iommu = dev_id;
37778 	u32 status;
37779 	u32 int_status;
37780+	u32 int_mask;
37781 	dma_addr_t iova;
37782 	irqreturn_t ret = IRQ_NONE;
37783 	int i, err;
37784@@ -561,12 +737,20 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
37785 			 */
37786 			if (iommu->domain)
37787 				report_iommu_fault(iommu->domain, iommu->dev, iova,
37788-						   flags);
37789+						   status);
37790 			else
37791 				dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
37792 
37793 			rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
37794-			rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
37795+
37796+			/*
37797+			 * Master may clear the int_mask to prevent iommu
37798+			 * re-enter interrupt when mapping. So we postpone
37799+			 * sending PAGE_FAULT_DONE command to mapping finished.
37800+			 */
37801+			int_mask = rk_iommu_read(iommu->bases[i], RK_MMU_INT_MASK);
37802+			if (int_mask != 0x0)
37803+				rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
37804 		}
37805 
37806 		if (int_status & RK_MMU_IRQ_BUS_ERROR)
37807@@ -614,6 +798,34 @@ static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
37808 	return phys;
37809 }
37810 
37811+static phys_addr_t rk_iommu_iova_to_phys_v2(struct iommu_domain *domain,
37812+					    dma_addr_t iova)
37813+{
37814+	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
37815+	unsigned long flags;
37816+	phys_addr_t pt_phys, phys = 0;
37817+	u32 dte, pte;
37818+	u32 *page_table;
37819+
37820+	spin_lock_irqsave(&rk_domain->dt_lock, flags);
37821+
37822+	dte = rk_domain->dt[rk_iova_dte_index(iova)];
37823+	if (!rk_dte_is_pt_valid(dte))
37824+		goto out;
37825+
37826+	pt_phys = rk_dte_pt_address_v2(dte);
37827+	page_table = (u32 *)phys_to_virt(pt_phys);
37828+	pte = page_table[rk_iova_pte_index(iova)];
37829+	if (!rk_pte_is_page_valid(pte))
37830+		goto out;
37831+
37832+	phys = rk_pte_page_address_v2(pte) + rk_iova_page_offset(iova);
37833+out:
37834+	spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
37835+
37836+	return phys;
37837+}
37838+
37839 static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
37840 			      dma_addr_t iova, size_t size)
37841 {
37842@@ -690,6 +902,44 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
37843 	return (u32 *)phys_to_virt(pt_phys);
37844 }
37845 
37846+static u32 *rk_dte_get_page_table_v2(struct rk_iommu_domain *rk_domain,
37847+				     dma_addr_t iova)
37848+{
37849+	u32 *page_table, *dte_addr;
37850+	u32 dte_index, dte;
37851+	phys_addr_t pt_phys;
37852+	dma_addr_t pt_dma;
37853+
37854+	assert_spin_locked(&rk_domain->dt_lock);
37855+
37856+	dte_index = rk_iova_dte_index(iova);
37857+	dte_addr = &rk_domain->dt[dte_index];
37858+	dte = *dte_addr;
37859+	if (rk_dte_is_pt_valid(dte))
37860+		goto done;
37861+
37862+	page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
37863+	if (!page_table)
37864+		return ERR_PTR(-ENOMEM);
37865+
37866+	pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
37867+	if (dma_mapping_error(dma_dev, pt_dma)) {
37868+		dev_err(dma_dev, "DMA mapping error while allocating page table\n");
37869+		free_page((unsigned long)page_table);
37870+		return ERR_PTR(-ENOMEM);
37871+	}
37872+
37873+	dte = rk_mk_dte_v2(pt_dma);
37874+	*dte_addr = dte;
37875+
37876+	rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
37877+	rk_table_flush(rk_domain,
37878+		       rk_domain->dt_dma + dte_index * sizeof(u32), 1);
37879+done:
37880+	pt_phys = rk_dte_pt_address_v2(dte);
37881+	return (u32 *)phys_to_virt(pt_phys);
37882+}
37883+
37884 static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
37885 				  u32 *pte_addr, dma_addr_t pte_dma,
37886 				  size_t size)
37887@@ -741,7 +991,9 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
37888 	 * We only zap the first and last iova, since only they could have
37889 	 * dte or pte shared with an existing mapping.
37890 	 */
37891-	rk_iommu_zap_iova_first_last(rk_domain, iova, size);
37892+	/* Do not zap tlb cache line if shootdown_entire set */
37893+	if (!rk_domain->shootdown_entire)
37894+		rk_iommu_zap_iova_first_last(rk_domain, iova, size);
37895 
37896 	return 0;
37897 unwind:
37898@@ -757,6 +1009,53 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
37899 	return -EADDRINUSE;
37900 }
37901 
37902+static int rk_iommu_map_iova_v2(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
37903+				dma_addr_t pte_dma, dma_addr_t iova,
37904+				phys_addr_t paddr, size_t size, int prot)
37905+{
37906+	unsigned int pte_count;
37907+	unsigned int pte_total = size / SPAGE_SIZE;
37908+	phys_addr_t page_phys;
37909+
37910+	assert_spin_locked(&rk_domain->dt_lock);
37911+
37912+	for (pte_count = 0; pte_count < pte_total; pte_count++) {
37913+		u32 pte = pte_addr[pte_count];
37914+
37915+		if (rk_pte_is_page_valid(pte))
37916+			goto unwind;
37917+
37918+		pte_addr[pte_count] = rk_mk_pte_v2(paddr, prot);
37919+
37920+		paddr += SPAGE_SIZE;
37921+	}
37922+
37923+	rk_table_flush(rk_domain, pte_dma, pte_total);
37924+
37925+	/*
37926+	 * Zap the first and last iova to evict from iotlb any previously
37927+	 * mapped cachelines holding stale values for its dte and pte.
37928+	 * We only zap the first and last iova, since only they could have
37929+	 * dte or pte shared with an existing mapping.
37930+	 */
37931+	/* Do not zap tlb cache line if shootdown_entire set */
37932+	if (!rk_domain->shootdown_entire)
37933+		rk_iommu_zap_iova_first_last(rk_domain, iova, size);
37934+
37935+	return 0;
37936+unwind:
37937+	/* Unmap the range of iovas that we just mapped */
37938+	rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
37939+			    pte_count * SPAGE_SIZE);
37940+
37941+	iova += pte_count * SPAGE_SIZE;
37942+	page_phys = rk_pte_page_address_v2(pte_addr[pte_count]);
37943+	pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
37944+	       &iova, &page_phys, &paddr, prot);
37945+
37946+	return -EADDRINUSE;
37947+}
37948+
37949 static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
37950 			phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
37951 {
37952@@ -764,7 +1063,7 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
37953 	unsigned long flags;
37954 	dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
37955 	u32 *page_table, *pte_addr;
37956-	u32 dte_index, pte_index;
37957+	u32 dte, pte_index;
37958 	int ret;
37959 
37960 	spin_lock_irqsave(&rk_domain->dt_lock, flags);
37961@@ -782,10 +1081,10 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
37962 		return PTR_ERR(page_table);
37963 	}
37964 
37965-	dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
37966+	dte = rk_domain->dt[rk_iova_dte_index(iova)];
37967 	pte_index = rk_iova_pte_index(iova);
37968 	pte_addr = &page_table[pte_index];
37969-	pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32);
37970+	pte_dma = rk_dte_pt_address(dte) + pte_index * sizeof(u32);
37971 	ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
37972 				paddr, size, prot);
37973 
37974@@ -794,6 +1093,43 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
37975 	return ret;
37976 }
37977 
37978+static int rk_iommu_map_v2(struct iommu_domain *domain, unsigned long _iova,
37979+			phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
37980+{
37981+	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
37982+	unsigned long flags;
37983+	dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
37984+	u32 *page_table, *pte_addr;
37985+	u32 dte, pte_index;
37986+	int ret;
37987+
37988+	spin_lock_irqsave(&rk_domain->dt_lock, flags);
37989+
37990+	/*
37991+	 * pgsize_bitmap specifies iova sizes that fit in one page table
37992+	 * (1024 4-KiB pages = 4 MiB).
37993+	 * So, size will always be 4096 <= size <= 4194304.
37994+	 * Since iommu_map() guarantees that both iova and size will be
37995+	 * aligned, we will always only be mapping from a single dte here.
37996+	 */
37997+	page_table = rk_dte_get_page_table_v2(rk_domain, iova);
37998+	if (IS_ERR(page_table)) {
37999+		spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
38000+		return PTR_ERR(page_table);
38001+	}
38002+
38003+	dte = rk_domain->dt[rk_iova_dte_index(iova)];
38004+	pte_index = rk_iova_pte_index(iova);
38005+	pte_addr = &page_table[pte_index];
38006+	pte_dma = rk_dte_pt_address_v2(dte) + pte_index * sizeof(u32);
38007+	ret = rk_iommu_map_iova_v2(rk_domain, pte_addr, pte_dma, iova,
38008+				   paddr, size, prot);
38009+
38010+	spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
38011+
38012+	return ret;
38013+}
38014+
38015 static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
38016 			     size_t size, struct iommu_iotlb_gather *gather)
38017 {
38018@@ -834,6 +1170,77 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
38019 	return unmap_size;
38020 }
38021 
38022+static size_t rk_iommu_unmap_v2(struct iommu_domain *domain, unsigned long _iova,
38023+				size_t size, struct iommu_iotlb_gather *gather)
38024+{
38025+	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
38026+	unsigned long flags;
38027+	dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
38028+	phys_addr_t pt_phys;
38029+	u32 dte;
38030+	u32 *pte_addr;
38031+	size_t unmap_size;
38032+
38033+	spin_lock_irqsave(&rk_domain->dt_lock, flags);
38034+
38035+	/*
38036+	 * pgsize_bitmap specifies iova sizes that fit in one page table
38037+	 * (1024 4-KiB pages = 4 MiB).
38038+	 * So, size will always be 4096 <= size <= 4194304.
38039+	 * Since iommu_unmap() guarantees that both iova and size will be
38040+	 * aligned, we will always only be unmapping from a single dte here.
38041+	 */
38042+	dte = rk_domain->dt[rk_iova_dte_index(iova)];
38043+	/* Just return 0 if iova is unmapped */
38044+	if (!rk_dte_is_pt_valid(dte)) {
38045+		spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
38046+		return 0;
38047+	}
38048+
38049+	pt_phys = rk_dte_pt_address_v2(dte);
38050+	pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
38051+	pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
38052+	unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
38053+
38054+	spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
38055+
38056+	/* Shootdown iotlb entries for iova range that was just unmapped */
38057+	/* Do not zap tlb cache line if shootdown_entire set */
38058+	if (!rk_domain->shootdown_entire)
38059+		rk_iommu_zap_iova(rk_domain, iova, unmap_size);
38060+
38061+	return unmap_size;
38062+}
38063+
38064+static void rk_iommu_flush_tlb_all(struct iommu_domain *domain)
38065+{
38066+	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
38067+	struct list_head *pos;
38068+	unsigned long flags;
38069+	int i;
38070+
38071+	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
38072+	list_for_each(pos, &rk_domain->iommus) {
38073+		struct rk_iommu *iommu;
38074+		int ret;
38075+
38076+		iommu = list_entry(pos, struct rk_iommu, node);
38077+
38078+		ret = pm_runtime_get_if_in_use(iommu->dev);
38079+		if (WARN_ON_ONCE(ret < 0))
38080+			continue;
38081+		if (ret) {
38082+			WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
38083+			for (i = 0; i < iommu->num_mmu; i++)
38084+				rk_iommu_write(iommu->bases[i], RK_MMU_COMMAND,
38085+					       RK_MMU_CMD_ZAP_CACHE);
38086+			clk_bulk_disable(iommu->num_clocks, iommu->clocks);
38087+			pm_runtime_put(iommu->dev);
38088+		}
38089+	}
38090+	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
38091+}
38092+
38093 static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
38094 {
38095 	struct rk_iommudata *data = dev_iommu_priv_get(dev);
38096@@ -858,12 +1265,28 @@ static void rk_iommu_disable(struct rk_iommu *iommu)
38097 	clk_bulk_disable(iommu->num_clocks, iommu->clocks);
38098 }
38099 
38100+int rockchip_iommu_disable(struct device *dev)
38101+{
38102+	struct rk_iommu *iommu;
38103+
38104+	iommu = rk_iommu_from_dev(dev);
38105+	if (!iommu)
38106+		return -ENODEV;
38107+
38108+	rk_iommu_disable(iommu);
38109+
38110+	return 0;
38111+}
38112+EXPORT_SYMBOL(rockchip_iommu_disable);
38113+
38114 /* Must be called with iommu powered on and attached */
38115 static int rk_iommu_enable(struct rk_iommu *iommu)
38116 {
38117 	struct iommu_domain *domain = iommu->domain;
38118 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
38119 	int ret, i;
38120+	u32 dt_v2;
38121+	u32 auto_gate;
38122 
38123 	ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
38124 	if (ret)
38125@@ -878,10 +1301,21 @@ static int rk_iommu_enable(struct rk_iommu *iommu)
38126 		goto out_disable_stall;
38127 
38128 	for (i = 0; i < iommu->num_mmu; i++) {
38129-		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
38130-			       rk_domain->dt_dma);
38131+		if (iommu->version >= 0x2) {
38132+			dt_v2 = (rk_domain->dt_dma & DT_LO_MASK) |
38133+				((rk_domain->dt_dma & DT_HI_MASK) >> DT_SHIFT);
38134+			rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dt_v2);
38135+		} else {
38136+			rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
38137+				       rk_domain->dt_dma);
38138+		}
38139 		rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
38140 		rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
38141+
38142+		/* Workaround for iommu blocked, BIT(31) default to 1 */
38143+		auto_gate = rk_iommu_read(iommu->bases[i], RK_MMU_AUTO_GATING);
38144+		auto_gate |= DISABLE_FETCH_DTE_TIME_LIMIT;
38145+		rk_iommu_write(iommu->bases[i], RK_MMU_AUTO_GATING, auto_gate);
38146 	}
38147 
38148 	ret = rk_iommu_enable_paging(iommu);
38149@@ -893,6 +1327,18 @@ static int rk_iommu_enable(struct rk_iommu *iommu)
38150 	return ret;
38151 }
38152 
38153+int rockchip_iommu_enable(struct device *dev)
38154+{
38155+	struct rk_iommu *iommu;
38156+
38157+	iommu = rk_iommu_from_dev(dev);
38158+	if (!iommu)
38159+		return -ENODEV;
38160+
38161+	return rk_iommu_enable(iommu);
38162+}
38163+EXPORT_SYMBOL(rockchip_iommu_enable);
38164+
38165 static void rk_iommu_detach_device(struct iommu_domain *domain,
38166 				   struct device *dev)
38167 {
38168@@ -908,8 +1354,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
38169 
38170 	dev_dbg(dev, "Detaching from iommu domain\n");
38171 
38172-	/* iommu already detached */
38173-	if (iommu->domain != domain)
38174+	if (!iommu->domain)
38175 		return;
38176 
38177 	iommu->domain = NULL;
38178@@ -944,19 +1389,20 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
38179 
38180 	dev_dbg(dev, "Attaching to iommu domain\n");
38181 
38182-	/* iommu already attached */
38183-	if (iommu->domain == domain)
38184-		return 0;
38185-
38186 	if (iommu->domain)
38187 		rk_iommu_detach_device(iommu->domain, dev);
38188 
38189 	iommu->domain = domain;
38190 
38191+	/* Attach NULL for disable iommu */
38192+	if (!domain)
38193+		return 0;
38194+
38195 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
38196 	list_add_tail(&iommu->node, &rk_domain->iommus);
38197 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
38198 
38199+	rk_domain->shootdown_entire = iommu->shootdown_entire;
38200 	ret = pm_runtime_get_if_in_use(iommu->dev);
38201 	if (!ret || WARN_ON_ONCE(ret < 0))
38202 		return 0;
38203@@ -1054,6 +1500,35 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
38204 	kfree(rk_domain);
38205 }
38206 
38207+static void rk_iommu_domain_free_v2(struct iommu_domain *domain)
38208+{
38209+	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
38210+	int i;
38211+
38212+	WARN_ON(!list_empty(&rk_domain->iommus));
38213+
38214+	for (i = 0; i < NUM_DT_ENTRIES; i++) {
38215+		u32 dte = rk_domain->dt[i];
38216+
38217+		if (rk_dte_is_pt_valid(dte)) {
38218+			phys_addr_t pt_phys = rk_dte_pt_address_v2(dte);
38219+			u32 *page_table = phys_to_virt(pt_phys);
38220+
38221+			dma_unmap_single(dma_dev, pt_phys,
38222+					 SPAGE_SIZE, DMA_TO_DEVICE);
38223+			free_page((unsigned long)page_table);
38224+		}
38225+	}
38226+
38227+	dma_unmap_single(dma_dev, rk_domain->dt_dma,
38228+			 SPAGE_SIZE, DMA_TO_DEVICE);
38229+	free_page((unsigned long)rk_domain->dt);
38230+
38231+	if (domain->type == IOMMU_DOMAIN_DMA)
38232+		iommu_put_dma_cookie(&rk_domain->domain);
38233+	kfree(rk_domain);
38234+}
38235+
38236 static struct iommu_device *rk_iommu_probe_device(struct device *dev)
38237 {
38238 	struct rk_iommudata *data;
38239@@ -1068,6 +1543,16 @@ static struct iommu_device *rk_iommu_probe_device(struct device *dev)
38240 	data->link = device_link_add(dev, iommu->dev,
38241 				     DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
38242 
38243+	data->defer_attach = false;
38244+
38245+	/* set max segment size for dev, needed for single chunk map */
38246+	if (!dev->dma_parms)
38247+		dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
38248+	if (!dev->dma_parms)
38249+		return ERR_PTR(-ENOMEM);
38250+
38251+	dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
38252+
38253 	return &iommu->iommu;
38254 }
38255 
38256@@ -1087,6 +1572,14 @@ static struct iommu_group *rk_iommu_device_group(struct device *dev)
38257 	return iommu_group_ref_get(iommu->group);
38258 }
38259 
38260+static bool rk_iommu_is_attach_deferred(struct iommu_domain *domain,
38261+					struct device *dev)
38262+{
38263+	struct rk_iommudata *data = dev_iommu_priv_get(dev);
38264+
38265+	return data->defer_attach;
38266+}
38267+
38268 static int rk_iommu_of_xlate(struct device *dev,
38269 			     struct of_phandle_args *args)
38270 {
38271@@ -1100,6 +1593,10 @@ static int rk_iommu_of_xlate(struct device *dev,
38272 	iommu_dev = of_find_device_by_node(args->np);
38273 
38274 	data->iommu = platform_get_drvdata(iommu_dev);
38275+
38276+	if (strstr(dev_name(dev), "vop"))
38277+		data->defer_attach = true;
38278+
38279 	dev_iommu_priv_set(dev, data);
38280 
38281 	platform_device_put(iommu_dev);
38282@@ -1107,21 +1604,90 @@ static int rk_iommu_of_xlate(struct device *dev,
38283 	return 0;
38284 }
38285 
38286-static const struct iommu_ops rk_iommu_ops = {
38287+void rk_iommu_mask_irq(struct device *dev)
38288+{
38289+	struct rk_iommu *iommu = rk_iommu_from_dev(dev);
38290+	int i;
38291+
38292+	if (!iommu)
38293+		return;
38294+
38295+	for (i = 0; i < iommu->num_mmu; i++)
38296+		rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
38297+}
38298+EXPORT_SYMBOL(rk_iommu_mask_irq);
38299+
38300+void rk_iommu_unmask_irq(struct device *dev)
38301+{
38302+	struct rk_iommu *iommu = rk_iommu_from_dev(dev);
38303+	int i;
38304+
38305+	if (!iommu)
38306+		return;
38307+
38308+	for (i = 0; i < iommu->num_mmu; i++) {
38309+		/* Need to zap tlb in case of mapping during pagefault */
38310+		rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
38311+		rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
38312+		/* Leave iommu in pagefault state until mapping finished */
38313+		rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
38314+	}
38315+}
38316+EXPORT_SYMBOL(rk_iommu_unmask_irq);
38317+
38318+static struct iommu_ops rk_iommu_ops = {
38319 	.domain_alloc = rk_iommu_domain_alloc,
38320 	.domain_free = rk_iommu_domain_free,
38321 	.attach_dev = rk_iommu_attach_device,
38322 	.detach_dev = rk_iommu_detach_device,
38323 	.map = rk_iommu_map,
38324 	.unmap = rk_iommu_unmap,
38325+	.flush_iotlb_all = rk_iommu_flush_tlb_all,
38326 	.probe_device = rk_iommu_probe_device,
38327 	.release_device = rk_iommu_release_device,
38328 	.iova_to_phys = rk_iommu_iova_to_phys,
38329+	.is_attach_deferred = rk_iommu_is_attach_deferred,
38330 	.device_group = rk_iommu_device_group,
38331 	.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
38332 	.of_xlate = rk_iommu_of_xlate,
38333 };
38334 
38335+static struct iommu_ops rk_iommu_ops_v2 = {
38336+	.domain_alloc = rk_iommu_domain_alloc,
38337+	.domain_free = rk_iommu_domain_free_v2,
38338+	.attach_dev = rk_iommu_attach_device,
38339+	.detach_dev = rk_iommu_detach_device,
38340+	.map = rk_iommu_map_v2,
38341+	.unmap = rk_iommu_unmap_v2,
38342+	.flush_iotlb_all = rk_iommu_flush_tlb_all,
38343+	.probe_device = rk_iommu_probe_device,
38344+	.release_device = rk_iommu_release_device,
38345+	.iova_to_phys = rk_iommu_iova_to_phys_v2,
38346+	.is_attach_deferred = rk_iommu_is_attach_deferred,
38347+	.device_group = rk_iommu_device_group,
38348+	.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
38349+	.of_xlate = rk_iommu_of_xlate,
38350+};
38351+
38352+static const struct rockchip_iommu_data iommu_data_v1 = {
38353+	.version = 0x1,
38354+};
38355+
38356+static const struct rockchip_iommu_data iommu_data_v2 = {
38357+	.version = 0x2,
38358+};
38359+
38360+static const struct of_device_id rk_iommu_dt_ids[] = {
38361+	{
38362+		.compatible = "rockchip,iommu",
38363+		.data = &iommu_data_v1,
38364+	}, {
38365+		.compatible = "rockchip,iommu-v2",
38366+		.data = &iommu_data_v2,
38367+	},
38368+	{ /* sentinel */ }
38369+};
38370+
38371 static int rk_iommu_probe(struct platform_device *pdev)
38372 {
38373 	struct device *dev = &pdev->dev;
38374@@ -1129,11 +1695,21 @@ static int rk_iommu_probe(struct platform_device *pdev)
38375 	struct resource *res;
38376 	int num_res = pdev->num_resources;
38377 	int err, i;
38378+	const struct of_device_id *match;
38379+	struct rockchip_iommu_data *data;
38380 
38381 	iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
38382 	if (!iommu)
38383 		return -ENOMEM;
38384 
38385+	match = of_match_device(rk_iommu_dt_ids, dev);
38386+	if (!match)
38387+		return -EINVAL;
38388+
38389+	data = (struct rockchip_iommu_data *)match->data;
38390+	iommu->version = data->version;
38391+	dev_info(dev, "version = %x\n", iommu->version);
38392+
38393 	platform_set_drvdata(pdev, iommu);
38394 	iommu->dev = dev;
38395 	iommu->num_mmu = 0;
38396@@ -1161,26 +1737,30 @@ static int rk_iommu_probe(struct platform_device *pdev)
38397 
38398 	iommu->reset_disabled = device_property_read_bool(dev,
38399 					"rockchip,disable-mmu-reset");
38400-
38401-	iommu->num_clocks = ARRAY_SIZE(rk_iommu_clocks);
38402-	iommu->clocks = devm_kcalloc(iommu->dev, iommu->num_clocks,
38403-				     sizeof(*iommu->clocks), GFP_KERNEL);
38404-	if (!iommu->clocks)
38405-		return -ENOMEM;
38406-
38407-	for (i = 0; i < iommu->num_clocks; ++i)
38408-		iommu->clocks[i].id = rk_iommu_clocks[i];
38409+	iommu->skip_read = device_property_read_bool(dev,
38410+					"rockchip,skip-mmu-read");
38411+	iommu->dlr_disable = device_property_read_bool(dev,
38412+					"rockchip,disable-device-link-resume");
38413+	iommu->shootdown_entire = device_property_read_bool(dev,
38414+					"rockchip,shootdown-entire");
38415+
38416+	if (of_machine_is_compatible("rockchip,rv1126") ||
38417+	    of_machine_is_compatible("rockchip,rv1109"))
38418+		iommu->cmd_retry = device_property_read_bool(dev,
38419+					"rockchip,enable-cmd-retry");
38420 
38421 	/*
38422 	 * iommu clocks should be present for all new devices and devicetrees
38423 	 * but there are older devicetrees without clocks out in the wild.
38424 	 * So clocks as optional for the time being.
38425 	 */
38426-	err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks);
38427+	err = devm_clk_bulk_get_all(dev, &iommu->clocks);
38428 	if (err == -ENOENT)
38429 		iommu->num_clocks = 0;
38430-	else if (err)
38431+	else if (err < 0)
38432 		return err;
38433+	else
38434+		iommu->num_clocks = err;
38435 
38436 	err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks);
38437 	if (err)
38438@@ -1196,7 +1776,10 @@ static int rk_iommu_probe(struct platform_device *pdev)
38439 	if (err)
38440 		goto err_put_group;
38441 
38442-	iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
38443+	if (iommu->version >= 0x2)
38444+		iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops_v2);
38445+	else
38446+		iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
38447 	iommu_device_set_fwnode(&iommu->iommu, &dev->of_node->fwnode);
38448 
38449 	err = iommu_device_register(&iommu->iommu);
38450@@ -1211,10 +1794,16 @@ static int rk_iommu_probe(struct platform_device *pdev)
38451 	if (!dma_dev)
38452 		dma_dev = &pdev->dev;
38453 
38454-	bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
38455+	if (iommu->version >= 0x2)
38456+		bus_set_iommu(&platform_bus_type, &rk_iommu_ops_v2);
38457+	else
38458+		bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
38459 
38460 	pm_runtime_enable(dev);
38461 
38462+	if (iommu->skip_read)
38463+		goto skip_request_irq;
38464+
38465 	for (i = 0; i < iommu->num_irq; i++) {
38466 		int irq = platform_get_irq(pdev, i);
38467 
38468@@ -1229,6 +1818,7 @@ static int rk_iommu_probe(struct platform_device *pdev)
38469 		}
38470 	}
38471 
38472+skip_request_irq:
38473 	return 0;
38474 err_remove_sysfs:
38475 	iommu_device_sysfs_remove(&iommu->iommu);
38476@@ -1260,6 +1850,9 @@ static int __maybe_unused rk_iommu_suspend(struct device *dev)
38477 	if (!iommu->domain)
38478 		return 0;
38479 
38480+	if (iommu->dlr_disable)
38481+		return 0;
38482+
38483 	rk_iommu_disable(iommu);
38484 	return 0;
38485 }
38486@@ -1271,6 +1864,9 @@ static int __maybe_unused rk_iommu_resume(struct device *dev)
38487 	if (!iommu->domain)
38488 		return 0;
38489 
38490+	if (iommu->dlr_disable)
38491+		return 0;
38492+
38493 	return rk_iommu_enable(iommu);
38494 }
38495 
38496@@ -1280,11 +1876,6 @@ static const struct dev_pm_ops rk_iommu_pm_ops = {
38497 				pm_runtime_force_resume)
38498 };
38499 
38500-static const struct of_device_id rk_iommu_dt_ids[] = {
38501-	{ .compatible = "rockchip,iommu" },
38502-	{ /* sentinel */ }
38503-};
38504-
38505 static struct platform_driver rk_iommu_driver = {
38506 	.probe = rk_iommu_probe,
38507 	.shutdown = rk_iommu_shutdown,
38508@@ -1301,3 +1892,8 @@ static int __init rk_iommu_init(void)
38509 	return platform_driver_register(&rk_iommu_driver);
38510 }
38511 subsys_initcall(rk_iommu_init);
38512+
38513+MODULE_DESCRIPTION("IOMMU API for Rockchip");
38514+MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
38515+MODULE_ALIAS("platform:rockchip-iommu");
38516+MODULE_LICENSE("GPL v2");
38517diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
38518index d8cb5bcd6..ed18e141e 100644
38519--- a/drivers/irqchip/irq-gic-v3-its.c
38520+++ b/drivers/irqchip/irq-gic-v3-its.c
38521@@ -2167,6 +2167,8 @@ static struct page *its_allocate_prop_table(gfp_t gfp_flags)
38522 {
38523 	struct page *prop_page;
38524 
38525+	if (of_machine_is_compatible("rockchip,rk3568") || of_machine_is_compatible("rockchip,rk3566"))
38526+		gfp_flags |= GFP_DMA32;
38527 	prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
38528 	if (!prop_page)
38529 		return NULL;
38530@@ -2290,6 +2292,7 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
38531 	u32 alloc_pages, psz;
38532 	struct page *page;
38533 	void *base;
38534+	gfp_t gfp_flags;
38535 
38536 	psz = baser->psz;
38537 	alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
38538@@ -2301,7 +2304,10 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
38539 		order = get_order(GITS_BASER_PAGES_MAX * psz);
38540 	}
38541 
38542-	page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
38543+	gfp_flags = GFP_KERNEL | __GFP_ZERO;
38544+	if (of_machine_is_compatible("rockchip,rk3568") || of_machine_is_compatible("rockchip,rk3566"))
38545+		gfp_flags |= GFP_DMA32;
38546+	page = alloc_pages_node(its->numa_node, gfp_flags, order);
38547 	if (!page)
38548 		return -ENOMEM;
38549 
38550@@ -2348,6 +2354,16 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
38551 	its_write_baser(its, baser, val);
38552 	tmp = baser->val;
38553 
38554+	if (IS_ENABLED(CONFIG_NO_GKI) &&
38555+	    (of_machine_is_compatible("rockchip,rk3568") ||
38556+	     of_machine_is_compatible("rockchip,rk3566") ||
38557+	     of_machine_is_compatible("rockchip,rk3588"))) {
38558+		if (tmp & GITS_BASER_SHAREABILITY_MASK)
38559+			tmp &= ~GITS_BASER_SHAREABILITY_MASK;
38560+		else
38561+			gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
38562+	}
38563+
38564 	if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
38565 		/*
38566 		 * Shareability didn't stick. Just use
38567@@ -2930,6 +2946,8 @@ static struct page *its_allocate_pending_table(gfp_t gfp_flags)
38568 {
38569 	struct page *pend_page;
38570 
38571+	if (of_machine_is_compatible("rockchip,rk3568") || of_machine_is_compatible("rockchip,rk3566"))
38572+		gfp_flags |= GFP_DMA32;
38573 	pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
38574 				get_order(LPI_PENDBASE_SZ));
38575 	if (!pend_page)
38576@@ -3077,6 +3095,12 @@ static void its_cpu_init_lpis(void)
38577 	gicr_write_propbaser(val, rbase + GICR_PROPBASER);
38578 	tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
38579 
38580+	if (IS_ENABLED(CONFIG_NO_GKI) &&
38581+	    (of_machine_is_compatible("rockchip,rk3568") ||
38582+	     of_machine_is_compatible("rockchip,rk3566") ||
38583+	     of_machine_is_compatible("rockchip,rk3588")))
38584+		tmp &= ~GICR_PROPBASER_SHAREABILITY_MASK;
38585+
38586 	if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
38587 		if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
38588 			/*
38589@@ -3101,6 +3125,12 @@ static void its_cpu_init_lpis(void)
38590 	gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
38591 	tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
38592 
38593+	if (IS_ENABLED(CONFIG_NO_GKI) &&
38594+	    (of_machine_is_compatible("rockchip,rk3568") ||
38595+	     of_machine_is_compatible("rockchip,rk3566") ||
38596+	     of_machine_is_compatible("rockchip,rk3588")))
38597+		tmp &= ~GICR_PENDBASER_SHAREABILITY_MASK;
38598+
38599 	if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
38600 		/*
38601 		 * The HW reports non-shareable, we must remove the
38602@@ -3263,7 +3293,11 @@ static bool its_alloc_table_entry(struct its_node *its,
38603 
38604 	/* Allocate memory for 2nd level table */
38605 	if (!table[idx]) {
38606-		page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
38607+		gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
38608+
38609+		if (of_machine_is_compatible("rockchip,rk3568") || of_machine_is_compatible("rockchip,rk3566"))
38610+			gfp_flags |= GFP_DMA32;
38611+		page = alloc_pages_node(its->numa_node, gfp_flags,
38612 					get_order(baser->psz));
38613 		if (!page)
38614 			return false;
38615@@ -3352,6 +3386,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
38616 	int nr_lpis;
38617 	int nr_ites;
38618 	int sz;
38619+	gfp_t gfp_flags;
38620 
38621 	if (!its_alloc_device_table(its, dev_id))
38622 		return NULL;
38623@@ -3367,7 +3402,10 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
38624 	nr_ites = max(2, nvecs);
38625 	sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
38626 	sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
38627-	itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
38628+	gfp_flags = GFP_KERNEL;
38629+	if (of_machine_is_compatible("rockchip,rk3568") || of_machine_is_compatible("rockchip,rk3566"))
38630+		gfp_flags |= GFP_DMA32;
38631+	itt = kzalloc_node(sz, gfp_flags, its->numa_node);
38632 	if (alloc_lpis) {
38633 		lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
38634 		if (lpi_map)
38635@@ -3841,8 +3879,6 @@ static void its_vpe_schedule(struct its_vpe *vpe)
38636 	val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
38637 	val |= GICR_VPENDBASER_Valid;
38638 	gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
38639-
38640-	its_wait_vpt_parse_complete();
38641 }
38642 
38643 static void its_vpe_deschedule(struct its_vpe *vpe)
38644@@ -3890,6 +3926,10 @@ static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
38645 		its_vpe_deschedule(vpe);
38646 		return 0;
38647 
38648+	case COMMIT_VPE:
38649+		its_wait_vpt_parse_complete();
38650+		return 0;
38651+
38652 	case INVALL_VPE:
38653 		its_vpe_invall(vpe);
38654 		return 0;
38655@@ -4051,8 +4091,6 @@ static void its_vpe_4_1_schedule(struct its_vpe *vpe,
38656 	val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
38657 
38658 	gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
38659-
38660-	its_wait_vpt_parse_complete();
38661 }
38662 
38663 static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
38664@@ -4127,6 +4165,10 @@ static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
38665 		its_vpe_4_1_deschedule(vpe, info);
38666 		return 0;
38667 
38668+	case COMMIT_VPE:
38669+		its_wait_vpt_parse_complete();
38670+		return 0;
38671+
38672 	case INVALL_VPE:
38673 		its_vpe_4_1_invall(vpe);
38674 		return 0;
38675@@ -4489,7 +4531,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
38676 
38677 	if (err) {
38678 		if (i > 0)
38679-			its_vpe_irq_domain_free(domain, virq, i);
38680+			its_vpe_irq_domain_free(domain, virq, i - 1);
38681 
38682 		its_lpi_free(bitmap, base, nr_ids);
38683 		its_free_prop_table(vprop_page);
38684@@ -4945,6 +4987,7 @@ static int __init its_probe_one(struct resource *res,
38685 	u64 baser, tmp, typer;
38686 	struct page *page;
38687 	int err;
38688+	gfp_t gfp_flags;
38689 
38690 	its_base = ioremap(res->start, SZ_64K);
38691 	if (!its_base) {
38692@@ -5013,7 +5056,10 @@ static int __init its_probe_one(struct resource *res,
38693 
38694 	its->numa_node = numa_node;
38695 
38696-	page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
38697+	gfp_flags = GFP_KERNEL | __GFP_ZERO;
38698+	if (of_machine_is_compatible("rockchip,rk3568") || of_machine_is_compatible("rockchip,rk3566"))
38699+		gfp_flags |= GFP_DMA32;
38700+	page = alloc_pages_node(its->numa_node, gfp_flags,
38701 				get_order(ITS_CMD_QUEUE_SZ));
38702 	if (!page) {
38703 		err = -ENOMEM;
38704@@ -5044,6 +5090,12 @@ static int __init its_probe_one(struct resource *res,
38705 	gits_write_cbaser(baser, its->base + GITS_CBASER);
38706 	tmp = gits_read_cbaser(its->base + GITS_CBASER);
38707 
38708+	if (IS_ENABLED(CONFIG_NO_GKI) &&
38709+	    (of_machine_is_compatible("rockchip,rk3568") ||
38710+	     of_machine_is_compatible("rockchip,rk3566") ||
38711+	     of_machine_is_compatible("rockchip,rk3588")))
38712+		tmp &= ~GITS_CBASER_SHAREABILITY_MASK;
38713+
38714 	if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
38715 		if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
38716 			/*
38717diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
38718index 4c8f18f0c..bafdbb18e 100644
38719--- a/drivers/irqchip/irq-gic-v3.c
38720+++ b/drivers/irqchip/irq-gic-v3.c
38721@@ -18,6 +18,8 @@
38722 #include <linux/percpu.h>
38723 #include <linux/refcount.h>
38724 #include <linux/slab.h>
38725+#include <linux/syscore_ops.h>
38726+#include <linux/wakeup_reason.h>
38727 
38728 #include <linux/irqchip.h>
38729 #include <linux/irqchip/arm-gic-common.h>
38730@@ -42,20 +44,7 @@ struct redist_region {
38731 	void __iomem		*redist_base;
38732 	phys_addr_t		phys_base;
38733 	bool			single_redist;
38734-};
38735 
38736-struct gic_chip_data {
38737-	struct fwnode_handle	*fwnode;
38738-	void __iomem		*dist_base;
38739-	struct redist_region	*redist_regions;
38740-	struct rdists		rdists;
38741-	struct irq_domain	*domain;
38742-	u64			redist_stride;
38743-	u32			nr_redist_regions;
38744-	u64			flags;
38745-	bool			has_rss;
38746-	unsigned int		ppi_nr;
38747-	struct partition_desc	**ppi_descs;
38748 };
38749 
38750 static struct gic_chip_data gic_data __read_mostly;
38751@@ -725,6 +714,7 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
38752 
38753 	if (handle_domain_irq(gic_data.domain, irqnr, regs)) {
38754 		WARN_ONCE(true, "Unexpected interrupt received!\n");
38755+		log_abnormal_wakeup_reason("unexpected HW IRQ %u", irqnr);
38756 		gic_deactivate_unhandled(irqnr);
38757 	}
38758 }
38759@@ -915,22 +905,6 @@ static int __gic_update_rdist_properties(struct redist_region *region,
38760 {
38761 	u64 typer = gic_read_typer(ptr + GICR_TYPER);
38762 
38763-	/* Boot-time cleanip */
38764-	if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) {
38765-		u64 val;
38766-
38767-		/* Deactivate any present vPE */
38768-		val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER);
38769-		if (val & GICR_VPENDBASER_Valid)
38770-			gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
38771-					      ptr + SZ_128K + GICR_VPENDBASER);
38772-
38773-		/* Mark the VPE table as invalid */
38774-		val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER);
38775-		val &= ~GICR_VPROPBASER_4_1_VALID;
38776-		gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER);
38777-	}
38778-
38779 	gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
38780 
38781 	/* RVPEID implies some form of DirectLPI, no matter what the doc says... :-/ */
38782@@ -1324,6 +1298,22 @@ static void gic_cpu_pm_init(void)
38783 #else
38784 static inline void gic_cpu_pm_init(void) { }
38785 #endif /* CONFIG_CPU_PM */
38786+#ifdef CONFIG_PM
38787+void gic_resume(void)
38788+{
38789+}
38790+EXPORT_SYMBOL_GPL(gic_resume);
38791+static struct syscore_ops gic_syscore_ops = {
38792+	.resume = gic_resume,
38793+};
38794+static void gic_syscore_init(void)
38795+{
38796+	register_syscore_ops(&gic_syscore_ops);
38797+}
38798+#else
38799+static inline void gic_syscore_init(void) { }
38800+void gic_resume(void) { }
38801+#endif
38802 
38803 static struct irq_chip gic_chip = {
38804 	.name			= "GICv3",
38805@@ -1787,6 +1777,7 @@ static int __init gic_init_bases(void __iomem *dist_base,
38806 	gic_cpu_init();
38807 	gic_smp_init();
38808 	gic_cpu_pm_init();
38809+	gic_syscore_init();
38810 
38811 	if (gic_dist_supports_lpis()) {
38812 		its_init(handle, &gic_data.rdists, gic_data.domain);
38813diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c
38814index 0c18714ae..5d1dc9915 100644
38815--- a/drivers/irqchip/irq-gic-v4.c
38816+++ b/drivers/irqchip/irq-gic-v4.c
38817@@ -232,6 +232,8 @@ int its_make_vpe_non_resident(struct its_vpe *vpe, bool db)
38818 	if (!ret)
38819 		vpe->resident = false;
38820 
38821+	vpe->ready = false;
38822+
38823 	return ret;
38824 }
38825 
38826@@ -258,6 +260,23 @@ int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en)
38827 	return ret;
38828 }
38829 
38830+int its_commit_vpe(struct its_vpe *vpe)
38831+{
38832+	struct its_cmd_info info = {
38833+		.cmd_type = COMMIT_VPE,
38834+	};
38835+	int ret;
38836+
38837+	WARN_ON(preemptible());
38838+
38839+	ret = its_send_vpe_cmd(vpe, &info);
38840+	if (!ret)
38841+		vpe->ready = true;
38842+
38843+	return ret;
38844+}
38845+
38846+
38847 int its_invall_vpe(struct its_vpe *vpe)
38848 {
38849 	struct its_cmd_info info = {
38850diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
38851index 7e152bbb4..119e8c2e8 100644
38852--- a/drivers/media/platform/Kconfig
38853+++ b/drivers/media/platform/Kconfig
38854@@ -211,6 +211,7 @@ config VIDEO_MEDIATEK_JPEG
38855 	depends on MTK_IOMMU_V1 || MTK_IOMMU || COMPILE_TEST
38856 	depends on VIDEO_DEV && VIDEO_V4L2
38857 	depends on ARCH_MEDIATEK || COMPILE_TEST
38858+	depends on MTK_SMI || (COMPILE_TEST && MTK_SMI=n)
38859 	select VIDEOBUF2_DMA_CONTIG
38860 	select V4L2_MEM2MEM_DEV
38861 	help
38862@@ -238,6 +239,7 @@ config VIDEO_MEDIATEK_MDP
38863 	depends on MTK_IOMMU || COMPILE_TEST
38864 	depends on VIDEO_DEV && VIDEO_V4L2
38865 	depends on ARCH_MEDIATEK || COMPILE_TEST
38866+	depends on MTK_SMI || (COMPILE_TEST && MTK_SMI=n)
38867 	select VIDEOBUF2_DMA_CONTIG
38868 	select V4L2_MEM2MEM_DEV
38869 	select VIDEO_MEDIATEK_VPU
38870@@ -258,6 +260,7 @@ config VIDEO_MEDIATEK_VCODEC
38871 	# our dependencies, to avoid missing symbols during link.
38872 	depends on VIDEO_MEDIATEK_VPU || !VIDEO_MEDIATEK_VPU
38873 	depends on MTK_SCP || !MTK_SCP
38874+	depends on MTK_SMI || (COMPILE_TEST && MTK_SMI=n)
38875 	select VIDEOBUF2_DMA_CONTIG
38876 	select V4L2_MEM2MEM_DEV
38877 	select VIDEO_MEDIATEK_VCODEC_VPU if VIDEO_MEDIATEK_VPU
38878diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
38879index 62b6cdc8c..df76a5ca5 100644
38880--- a/drivers/media/platform/Makefile
38881+++ b/drivers/media/platform/Makefile
38882@@ -54,6 +54,7 @@ obj-$(CONFIG_VIDEO_RENESAS_VSP1)	+= vsp1/
38883 
38884 obj-$(CONFIG_VIDEO_ROCKCHIP_RGA)	+= rockchip/rga/
38885 
38886+
38887 obj-y	+= omap/
38888 
38889 obj-$(CONFIG_VIDEO_AM437X_VPFE)		+= am437x/
38890diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
38891index 282f3d238..5f3f90123 100644
38892--- a/drivers/media/usb/uvc/uvc_driver.c
38893+++ b/drivers/media/usb/uvc/uvc_driver.c
38894@@ -12,6 +12,7 @@
38895 #include <linux/module.h>
38896 #include <linux/slab.h>
38897 #include <linux/usb.h>
38898+#include <linux/usb/quirks.h>
38899 #include <linux/videodev2.h>
38900 #include <linux/vmalloc.h>
38901 #include <linux/wait.h>
38902@@ -2341,7 +2342,11 @@ static int uvc_probe(struct usb_interface *intf,
38903 	}
38904 
38905 	uvc_trace(UVC_TRACE_PROBE, "UVC device initialized.\n");
38906-	usb_enable_autosuspend(udev);
38907+	if (udev->quirks & USB_QUIRK_AUTO_SUSPEND ||
38908+	    udev->parent->quirks & USB_QUIRK_AUTO_SUSPEND)
38909+		uvc_printk(KERN_INFO, "auto-suspend is blacklisted for this device\n");
38910+	else
38911+		usb_enable_autosuspend(udev);
38912 	return 0;
38913 
38914 error:
38915diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
38916index 33babe6e8..1b5d4d99e 100644
38917--- a/drivers/media/v4l2-core/v4l2-async.c
38918+++ b/drivers/media/v4l2-core/v4l2-async.c
38919@@ -555,6 +555,60 @@ int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
38920 }
38921 EXPORT_SYMBOL(v4l2_async_notifier_register);
38922 
38923+#if IS_ENABLED(CONFIG_NO_GKI)
38924+static int __v4l2_async_notifier_clr_unready_dev(
38925+	struct v4l2_async_notifier *notifier)
38926+{
38927+	struct v4l2_subdev *sd, *tmp;
38928+	int clr_num = 0;
38929+
38930+	list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
38931+		struct v4l2_async_notifier *subdev_notifier =
38932+			v4l2_async_find_subdev_notifier(sd);
38933+
38934+		if (subdev_notifier)
38935+			clr_num += __v4l2_async_notifier_clr_unready_dev(
38936+					subdev_notifier);
38937+	}
38938+
38939+	list_for_each_entry_safe(sd, tmp, &notifier->waiting, async_list) {
38940+		list_del_init(&sd->async_list);
38941+		sd->asd = NULL;
38942+		sd->dev = NULL;
38943+		clr_num++;
38944+	}
38945+
38946+	return clr_num;
38947+}
38948+
38949+int v4l2_async_notifier_clr_unready_dev(struct v4l2_async_notifier *notifier)
38950+{
38951+	int ret = 0;
38952+	int clr_num = 0;
38953+
38954+	mutex_lock(&list_lock);
38955+
38956+	while (notifier->parent)
38957+		notifier = notifier->parent;
38958+
38959+	if (!notifier->v4l2_dev)
38960+		goto out;
38961+
38962+	clr_num = __v4l2_async_notifier_clr_unready_dev(notifier);
38963+	dev_info(notifier->v4l2_dev->dev,
38964+		 "clear unready subdev num: %d\n", clr_num);
38965+
38966+	if (clr_num > 0)
38967+		ret = v4l2_async_notifier_try_complete(notifier);
38968+
38969+out:
38970+	mutex_unlock(&list_lock);
38971+
38972+	return ret;
38973+}
38974+EXPORT_SYMBOL(v4l2_async_notifier_clr_unready_dev);
38975+#endif
38976+
38977 int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd,
38978 					struct v4l2_async_notifier *notifier)
38979 {
38980diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c
38981index d109b9f14..fd755d98a 100644
38982--- a/drivers/mfd/rk808.c
38983+++ b/drivers/mfd/rk808.c
38984@@ -2,7 +2,7 @@
38985 /*
38986  * MFD core driver for Rockchip RK808/RK818
38987  *
38988- * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
38989+ * Copyright (c) 2014-2018, Fuzhou Rockchip Electronics Co., Ltd
38990  *
38991  * Author: Chris Zhong <zyw@rock-chips.com>
38992  * Author: Zhang Qing <zhangqing@rock-chips.com>
38993@@ -18,7 +18,11 @@
38994 #include <linux/mfd/core.h>
38995 #include <linux/module.h>
38996 #include <linux/of_device.h>
38997+#include <linux/reboot.h>
38998 #include <linux/regmap.h>
38999+#include <linux/syscore_ops.h>
39000+#include <linux/pinctrl/consumer.h>
39001+#include <linux/pinctrl/devinfo.h>
39002 
39003 struct rk808_reg_data {
39004 	int addr;
39005@@ -65,22 +69,61 @@ static bool rk817_is_volatile_reg(struct device *dev, unsigned int reg)
39006 	switch (reg) {
39007 	case RK817_SECONDS_REG ... RK817_WEEKS_REG:
39008 	case RK817_RTC_STATUS_REG:
39009+	case RK817_ADC_CONFIG0 ... RK817_CURE_ADC_K0:
39010+	case RK817_CHRG_STS:
39011+	case RK817_CHRG_OUT:
39012+	case RK817_CHRG_IN:
39013+	case RK817_SYS_STS:
39014 	case RK817_INT_STS_REG0:
39015 	case RK817_INT_STS_REG1:
39016 	case RK817_INT_STS_REG2:
39017-	case RK817_SYS_STS:
39018 		return true;
39019 	}
39020 
39021-	return true;
39022+	return false;
39023+}
39024+
39025+static bool rk818_is_volatile_reg(struct device *dev, unsigned int reg)
39026+{
39027+	/*
39028+	 * Notes:
39029+	 * - Technically the ROUND_30s bit makes RTC_CTRL_REG volatile, but
39030+	 *   we don't use that feature.  It's better to cache.
39031+	 * - It's unlikely we care that RK808_DEVCTRL_REG is volatile since
39032+	 *   bits are cleared in case when we shutoff anyway, but better safe.
39033+	 */
39034+
39035+	switch (reg) {
39036+	case RK808_SECONDS_REG ... RK808_WEEKS_REG:
39037+	case RK808_RTC_STATUS_REG:
39038+	case RK808_VB_MON_REG:
39039+	case RK808_THERMAL_REG:
39040+	case RK808_DCDC_EN_REG:
39041+	case RK808_LDO_EN_REG:
39042+	case RK808_DCDC_UV_STS_REG:
39043+	case RK808_LDO_UV_STS_REG:
39044+	case RK808_DCDC_PG_REG:
39045+	case RK808_LDO_PG_REG:
39046+	case RK808_DEVCTRL_REG:
39047+	case RK808_INT_STS_REG1:
39048+	case RK808_INT_STS_REG2:
39049+	case RK808_INT_STS_MSK_REG1:
39050+	case RK808_INT_STS_MSK_REG2:
39051+	case RK816_INT_STS_REG1:
39052+	case RK816_INT_STS_MSK_REG1:
39053+	case RK818_SUP_STS_REG ... RK818_SAVE_DATA19:
39054+		return true;
39055+	}
39056+
39057+	return false;
39058 }
39059 
39060 static const struct regmap_config rk818_regmap_config = {
39061 	.reg_bits = 8,
39062 	.val_bits = 8,
39063-	.max_register = RK818_USB_CTRL_REG,
39064+	.max_register = RK818_SAVE_DATA19,
39065 	.cache_type = REGCACHE_RBTREE,
39066-	.volatile_reg = rk808_is_volatile_reg,
39067+	.volatile_reg = rk818_is_volatile_reg,
39068 };
39069 
39070 static const struct regmap_config rk805_regmap_config = {
39071@@ -99,11 +142,20 @@ static const struct regmap_config rk808_regmap_config = {
39072 	.volatile_reg = rk808_is_volatile_reg,
39073 };
39074 
39075+static const struct regmap_config rk816_regmap_config = {
39076+	.reg_bits = 8,
39077+	.val_bits = 8,
39078+	.max_register = RK816_DATA18_REG,
39079+	.cache_type = REGCACHE_RBTREE,
39080+	.volatile_reg = rk818_is_volatile_reg,
39081+};
39082+
39083 static const struct regmap_config rk817_regmap_config = {
39084 	.reg_bits = 8,
39085 	.val_bits = 8,
39086 	.max_register = RK817_GPIO_INT_CFG,
39087-	.cache_type = REGCACHE_NONE,
39088+	.num_reg_defaults_raw = RK817_GPIO_INT_CFG + 1,
39089+	.cache_type = REGCACHE_RBTREE,
39090 	.volatile_reg = rk817_is_volatile_reg,
39091 };
39092 
39093@@ -111,18 +163,27 @@ static struct resource rtc_resources[] = {
39094 	DEFINE_RES_IRQ(RK808_IRQ_RTC_ALARM),
39095 };
39096 
39097+static struct resource rk816_rtc_resources[] = {
39098+	DEFINE_RES_IRQ(RK816_IRQ_RTC_ALARM),
39099+};
39100+
39101 static struct resource rk817_rtc_resources[] = {
39102 	DEFINE_RES_IRQ(RK817_IRQ_RTC_ALARM),
39103 };
39104 
39105 static struct resource rk805_key_resources[] = {
39106-	DEFINE_RES_IRQ(RK805_IRQ_PWRON_RISE),
39107 	DEFINE_RES_IRQ(RK805_IRQ_PWRON_FALL),
39108+	DEFINE_RES_IRQ(RK805_IRQ_PWRON_RISE),
39109+};
39110+
39111+static struct resource rk816_pwrkey_resources[] = {
39112+	DEFINE_RES_IRQ(RK816_IRQ_PWRON_FALL),
39113+	DEFINE_RES_IRQ(RK816_IRQ_PWRON_RISE),
39114 };
39115 
39116 static struct resource rk817_pwrkey_resources[] = {
39117-	DEFINE_RES_IRQ(RK817_IRQ_PWRON_RISE),
39118 	DEFINE_RES_IRQ(RK817_IRQ_PWRON_FALL),
39119+	DEFINE_RES_IRQ(RK817_IRQ_PWRON_RISE),
39120 };
39121 
39122 static const struct mfd_cell rk805s[] = {
39123@@ -150,9 +211,28 @@ static const struct mfd_cell rk808s[] = {
39124 	},
39125 };
39126 
39127+static const struct mfd_cell rk816s[] = {
39128+	{ .name = "rk808-clkout", },
39129+	{ .name = "rk808-regulator", },
39130+	{ .name = "rk805-pinctrl", },
39131+	{ .name = "rk816-battery", .of_compatible = "rk816-battery", },
39132+	{
39133+		.name = "rk805-pwrkey",
39134+		.num_resources = ARRAY_SIZE(rk816_pwrkey_resources),
39135+		.resources = &rk816_pwrkey_resources[0],
39136+	},
39137+	{
39138+		.name = "rk808-rtc",
39139+		.num_resources = ARRAY_SIZE(rk816_rtc_resources),
39140+		.resources = &rk816_rtc_resources[0],
39141+	},
39142+};
39143+
39144 static const struct mfd_cell rk817s[] = {
39145 	{ .name = "rk808-clkout",},
39146 	{ .name = "rk808-regulator",},
39147+	{ .name = "rk817-battery", .of_compatible = "rk817,battery", },
39148+	{ .name = "rk817-charger", .of_compatible = "rk817,charger", },
39149 	{
39150 		.name = "rk805-pwrkey",
39151 		.num_resources = ARRAY_SIZE(rk817_pwrkey_resources),
39152@@ -163,11 +243,17 @@ static const struct mfd_cell rk817s[] = {
39153 		.num_resources = ARRAY_SIZE(rk817_rtc_resources),
39154 		.resources = &rk817_rtc_resources[0],
39155 	},
39156+	{
39157+		.name = "rk817-codec",
39158+		.of_compatible = "rockchip,rk817-codec",
39159+	},
39160 };
39161 
39162 static const struct mfd_cell rk818s[] = {
39163 	{ .name = "rk808-clkout", },
39164 	{ .name = "rk808-regulator", },
39165+	{ .name = "rk818-battery", .of_compatible = "rk818-battery", },
39166+	{ .name = "rk818-charger", },
39167 	{
39168 		.name = "rk808-rtc",
39169 		.num_resources = ARRAY_SIZE(rtc_resources),
39170@@ -176,16 +262,18 @@ static const struct mfd_cell rk818s[] = {
39171 };
39172 
39173 static const struct rk808_reg_data rk805_pre_init_reg[] = {
39174-	{RK805_BUCK1_CONFIG_REG, RK805_BUCK1_2_ILMAX_MASK,
39175-				 RK805_BUCK1_2_ILMAX_4000MA},
39176-	{RK805_BUCK2_CONFIG_REG, RK805_BUCK1_2_ILMAX_MASK,
39177-				 RK805_BUCK1_2_ILMAX_4000MA},
39178-	{RK805_BUCK3_CONFIG_REG, RK805_BUCK3_4_ILMAX_MASK,
39179-				 RK805_BUCK3_ILMAX_3000MA},
39180-	{RK805_BUCK4_CONFIG_REG, RK805_BUCK3_4_ILMAX_MASK,
39181-				 RK805_BUCK4_ILMAX_3500MA},
39182 	{RK805_BUCK4_CONFIG_REG, BUCK_ILMIN_MASK, BUCK_ILMIN_400MA},
39183+	{RK805_GPIO_IO_POL_REG, SLP_SD_MSK, SLEEP_FUN},
39184 	{RK805_THERMAL_REG, TEMP_HOTDIE_MSK, TEMP115C},
39185+	{RK808_RTC_CTRL_REG, RTC_STOP, RTC_STOP},
39186+};
39187+
39188+static struct rk808_reg_data rk805_suspend_reg[] = {
39189+	{RK805_BUCK3_CONFIG_REG, PWM_MODE_MSK, AUTO_PWM_MODE},
39190+};
39191+
39192+static struct rk808_reg_data rk805_resume_reg[] = {
39193+	{RK805_BUCK3_CONFIG_REG, PWM_MODE_MSK, FPWM_MODE},
39194 };
39195 
39196 static const struct rk808_reg_data rk808_pre_init_reg[] = {
39197@@ -195,11 +283,40 @@ static const struct rk808_reg_data rk808_pre_init_reg[] = {
39198 	{ RK808_BUCK1_CONFIG_REG, BUCK1_RATE_MASK,  BUCK_ILMIN_200MA },
39199 	{ RK808_BUCK2_CONFIG_REG, BUCK2_RATE_MASK,  BUCK_ILMIN_200MA },
39200 	{ RK808_DCDC_UV_ACT_REG,  BUCK_UV_ACT_MASK, BUCK_UV_ACT_DISABLE},
39201+	{ RK808_RTC_CTRL_REG, RTC_STOP, RTC_STOP},
39202 	{ RK808_VB_MON_REG,       MASK_ALL,         VB_LO_ACT |
39203 						    VB_LO_SEL_3500MV },
39204 };
39205 
39206+static const struct rk808_reg_data rk816_pre_init_reg[] = {
39207+	/* buck4 Max ILMIT*/
39208+	{ RK816_BUCK4_CONFIG_REG, REG_WRITE_MSK, BUCK4_MAX_ILIMIT },
39209+	/* hotdie temperature: 105c*/
39210+	{ RK816_THERMAL_REG, REG_WRITE_MSK, TEMP105C },
39211+	/* set buck 12.5mv/us */
39212+	{ RK816_BUCK1_CONFIG_REG, BUCK_RATE_MSK, BUCK_RATE_12_5MV_US },
39213+	{ RK816_BUCK2_CONFIG_REG, BUCK_RATE_MSK, BUCK_RATE_12_5MV_US },
39214+	/* enable RTC_PERIOD & RTC_ALARM int */
39215+	{ RK816_INT_STS_MSK_REG2, REG_WRITE_MSK, RTC_PERIOD_ALARM_INT_EN },
39216+	/* set bat 3.0 low and act shutdown */
39217+	{ RK816_VB_MON_REG, VBAT_LOW_VOL_MASK | VBAT_LOW_ACT_MASK,
39218+	  RK816_VBAT_LOW_3V0 | EN_VABT_LOW_SHUT_DOWN },
39219+	/* enable PWRON rising/faling int */
39220+	{ RK816_INT_STS_MSK_REG1, REG_WRITE_MSK, RK816_PWRON_FALL_RISE_INT_EN },
39221+	/* enable PLUG IN/OUT int */
39222+	{ RK816_INT_STS_MSK_REG3, REG_WRITE_MSK, PLUGIN_OUT_INT_EN },
39223+	/* clear int flags */
39224+	{ RK816_INT_STS_REG1, REG_WRITE_MSK, ALL_INT_FLAGS_ST },
39225+	{ RK816_INT_STS_REG2, REG_WRITE_MSK, ALL_INT_FLAGS_ST },
39226+	{ RK816_INT_STS_REG3, REG_WRITE_MSK, ALL_INT_FLAGS_ST },
39227+	{ RK816_DCDC_EN_REG2, BOOST_EN_MASK, BOOST_DISABLE },
39228+	/* set write mask bit 1, otherwise 'is_enabled()' get wrong status */
39229+	{ RK816_LDO_EN_REG1, REGS_WMSK, REGS_WMSK },
39230+	{ RK816_LDO_EN_REG2, REGS_WMSK, REGS_WMSK },
39231+};
39232+
39233 static const struct rk808_reg_data rk817_pre_init_reg[] = {
39234+	{RK817_SYS_CFG(3), RK817_SLPPOL_MSK, RK817_SLPPOL_L},
39235 	{RK817_RTC_CTRL_REG, RTC_STOP, RTC_STOP},
39236 	{RK817_GPIO_INT_CFG, RK817_INT_POL_MSK, RK817_INT_POL_L},
39237 	{RK817_SYS_CFG(1), RK817_HOTDIE_TEMP_MSK | RK817_TSD_TEMP_MSK,
39238@@ -220,8 +337,10 @@ static const struct rk808_reg_data rk818_pre_init_reg[] = {
39239 	{ RK818_H5V_EN_REG,	  BIT(1),	    RK818_REF_RDY_CTRL },
39240 	/* enable HDMI 5V */
39241 	{ RK818_H5V_EN_REG,	  BIT(0),	    RK818_H5V_EN },
39242+	{ RK808_RTC_CTRL_REG, RTC_STOP, RTC_STOP},
39243 	{ RK808_VB_MON_REG,	  MASK_ALL,	    VB_LO_ACT |
39244 						    VB_LO_SEL_3500MV },
39245+	{RK808_CLK32OUT_REG, CLK32KOUT2_FUNC_MASK, CLK32KOUT2_FUNC},
39246 };
39247 
39248 static const struct regmap_irq rk805_irqs[] = {
39249@@ -301,6 +420,70 @@ static const struct regmap_irq rk808_irqs[] = {
39250 	},
39251 };
39252 
39253+static struct rk808_reg_data rk816_suspend_reg[] = {
39254+	/* set bat 3.4v low and act irq */
39255+	{ RK816_VB_MON_REG, VBAT_LOW_VOL_MASK | VBAT_LOW_ACT_MASK,
39256+	  RK816_VBAT_LOW_3V4 | EN_VBAT_LOW_IRQ },
39257+};
39258+
39259+static struct rk808_reg_data rk816_resume_reg[] = {
39260+	/* set bat 3.0v low and act shutdown */
39261+	{ RK816_VB_MON_REG, VBAT_LOW_VOL_MASK | VBAT_LOW_ACT_MASK,
39262+	  RK816_VBAT_LOW_3V0 | EN_VABT_LOW_SHUT_DOWN },
39263+};
39264+
39265+static const struct regmap_irq rk816_irqs[] = {
39266+	/* INT_STS */
39267+	[RK816_IRQ_PWRON_FALL] = {
39268+		.mask = RK816_IRQ_PWRON_FALL_MSK,
39269+		.reg_offset = 0,
39270+	},
39271+	[RK816_IRQ_PWRON_RISE] = {
39272+		.mask = RK816_IRQ_PWRON_RISE_MSK,
39273+		.reg_offset = 0,
39274+	},
39275+	[RK816_IRQ_VB_LOW] = {
39276+		.mask = RK816_IRQ_VB_LOW_MSK,
39277+		.reg_offset = 1,
39278+	},
39279+	[RK816_IRQ_PWRON] = {
39280+		.mask = RK816_IRQ_PWRON_MSK,
39281+		.reg_offset = 1,
39282+	},
39283+	[RK816_IRQ_PWRON_LP] = {
39284+		.mask = RK816_IRQ_PWRON_LP_MSK,
39285+		.reg_offset = 1,
39286+	},
39287+	[RK816_IRQ_HOTDIE] = {
39288+		.mask = RK816_IRQ_HOTDIE_MSK,
39289+		.reg_offset = 1,
39290+	},
39291+	[RK816_IRQ_RTC_ALARM] = {
39292+		.mask = RK816_IRQ_RTC_ALARM_MSK,
39293+		.reg_offset = 1,
39294+	},
39295+	[RK816_IRQ_RTC_PERIOD] = {
39296+		.mask = RK816_IRQ_RTC_PERIOD_MSK,
39297+		.reg_offset = 1,
39298+	},
39299+	[RK816_IRQ_USB_OV] = {
39300+		.mask = RK816_IRQ_USB_OV_MSK,
39301+		.reg_offset = 1,
39302+	},
39303+};
39304+
39305+static struct rk808_reg_data rk818_suspend_reg[] = {
39306+	/* set bat 3.4v low and act irq */
39307+	{ RK808_VB_MON_REG, VBAT_LOW_VOL_MASK | VBAT_LOW_ACT_MASK,
39308+	  RK808_VBAT_LOW_3V4 | EN_VBAT_LOW_IRQ },
39309+};
39310+
39311+static struct rk808_reg_data rk818_resume_reg[] = {
39312+	/* set bat 3.0v low and act shutdown */
39313+	{ RK808_VB_MON_REG, VBAT_LOW_VOL_MASK | VBAT_LOW_ACT_MASK,
39314+	  RK808_VBAT_LOW_3V0 | EN_VABT_LOW_SHUT_DOWN },
39315+};
39316+
39317 static const struct regmap_irq rk818_irqs[] = {
39318 	/* INT_STS */
39319 	[RK818_IRQ_VOUT_LO] = {
39320@@ -421,6 +604,61 @@ static const struct regmap_irq_chip rk808_irq_chip = {
39321 	.init_ack_masked = true,
39322 };
39323 
39324+static const struct regmap_irq rk816_battery_irqs[] = {
39325+	/* INT_STS */
39326+	[RK816_IRQ_PLUG_IN] = {
39327+		.mask = RK816_IRQ_PLUG_IN_MSK,
39328+		.reg_offset = 0,
39329+	},
39330+	[RK816_IRQ_PLUG_OUT] = {
39331+		.mask = RK816_IRQ_PLUG_OUT_MSK,
39332+		.reg_offset = 0,
39333+	},
39334+	[RK816_IRQ_CHG_OK] = {
39335+		.mask = RK816_IRQ_CHG_OK_MSK,
39336+		.reg_offset = 0,
39337+	},
39338+	[RK816_IRQ_CHG_TE] = {
39339+		.mask = RK816_IRQ_CHG_TE_MSK,
39340+		.reg_offset = 0,
39341+	},
39342+	[RK816_IRQ_CHG_TS] = {
39343+		.mask = RK816_IRQ_CHG_TS_MSK,
39344+		.reg_offset = 0,
39345+	},
39346+	[RK816_IRQ_CHG_CVTLIM] = {
39347+		.mask = RK816_IRQ_CHG_CVTLIM_MSK,
39348+		.reg_offset = 0,
39349+	},
39350+	[RK816_IRQ_DISCHG_ILIM] = {
39351+		.mask = RK816_IRQ_DISCHG_ILIM_MSK,
39352+		.reg_offset = 0,
39353+	},
39354+};
39355+
39356+static struct regmap_irq_chip rk816_irq_chip = {
39357+	.name = "rk816",
39358+	.irqs = rk816_irqs,
39359+	.num_irqs = ARRAY_SIZE(rk816_irqs),
39360+	.num_regs = 2,
39361+	.irq_reg_stride = 3,
39362+	.status_base = RK816_INT_STS_REG1,
39363+	.mask_base = RK816_INT_STS_MSK_REG1,
39364+	.ack_base = RK816_INT_STS_REG1,
39365+	.init_ack_masked = true,
39366+};
39367+
39368+static struct regmap_irq_chip rk816_battery_irq_chip = {
39369+	.name = "rk816_battery",
39370+	.irqs = rk816_battery_irqs,
39371+	.num_irqs = ARRAY_SIZE(rk816_battery_irqs),
39372+	.num_regs = 1,
39373+	.status_base = RK816_INT_STS_REG3,
39374+	.mask_base = RK816_INT_STS_MSK_REG3,
39375+	.ack_base = RK816_INT_STS_REG3,
39376+	.init_ack_masked = true,
39377+};
39378+
39379 static struct regmap_irq_chip rk817_irq_chip = {
39380 	.name = "rk817",
39381 	.irqs = rk817_irqs,
39382@@ -446,8 +684,70 @@ static const struct regmap_irq_chip rk818_irq_chip = {
39383 };
39384 
39385 static struct i2c_client *rk808_i2c_client;
39386+static struct rk808_reg_data *suspend_reg, *resume_reg;
39387+static int suspend_reg_num, resume_reg_num;
39388+
39389+static void rk805_device_shutdown_prepare(void)
39390+{
39391+	int ret;
39392+	struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
39393+
39394+	if (!rk808)
39395+		return;
39396+
39397+	ret = regmap_update_bits(rk808->regmap,
39398+				 RK805_GPIO_IO_POL_REG,
39399+				 SLP_SD_MSK, SHUTDOWN_FUN);
39400+	if (ret)
39401+		dev_err(&rk808_i2c_client->dev, "Failed to shutdown device!\n");
39402+}
39403+
39404+static void rk817_shutdown_prepare(void)
39405+{
39406+	int ret;
39407+	struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
39408+
39409+	/* close rtc int when power off */
39410+	regmap_update_bits(rk808->regmap,
39411+			   RK817_INT_STS_MSK_REG0,
39412+			   (0x3 << 5), (0x3 << 5));
39413+	regmap_update_bits(rk808->regmap,
39414+			   RK817_RTC_INT_REG,
39415+			   (0x3 << 2), (0x0 << 2));
39416+
39417+	if (rk808->pins && rk808->pins->p && rk808->pins->power_off) {
39418+		ret = regmap_update_bits(rk808->regmap,
39419+					 RK817_SYS_CFG(3),
39420+					 RK817_SLPPIN_FUNC_MSK,
39421+					 SLPPIN_NULL_FUN);
39422+		if (ret)
39423+			pr_err("shutdown: config SLPPIN_NULL_FUN error!\n");
39424+
39425+		ret = regmap_update_bits(rk808->regmap,
39426+					 RK817_SYS_CFG(3),
39427+					 RK817_SLPPOL_MSK,
39428+					 RK817_SLPPOL_H);
39429+		if (ret)
39430+			pr_err("shutdown: config RK817_SLPPOL_H error!\n");
39431+
39432+		ret = pinctrl_select_state(rk808->pins->p,
39433+					   rk808->pins->power_off);
39434+		if (ret)
39435+			pr_info("%s:failed to activate pwroff state\n",
39436+				__func__);
39437+	}
39438+
39439+	/* pmic sleep shutdown function */
39440+	ret = regmap_update_bits(rk808->regmap,
39441+				 RK817_SYS_CFG(3),
39442+				 RK817_SLPPIN_FUNC_MSK, SLPPIN_DN_FUN);
39443+	if (ret)
39444+		dev_err(&rk808_i2c_client->dev, "Failed to shutdown device!\n");
39445+	/* pmic need the SCL clock to synchronize register */
39446+	mdelay(2);
39447+}
39448 
39449-static void rk808_pm_power_off(void)
39450+static void rk8xx_device_shutdown(void)
39451 {
39452 	int ret;
39453 	unsigned int reg, bit;
39454@@ -462,6 +762,10 @@ static void rk808_pm_power_off(void)
39455 		reg = RK808_DEVCTRL_REG,
39456 		bit = DEV_OFF_RST;
39457 		break;
39458+	case RK816_ID:
39459+		reg = RK816_DEV_CTRL_REG;
39460+		bit = DEV_OFF;
39461+		break;
39462 	case RK818_ID:
39463 		reg = RK818_DEVCTRL_REG;
39464 		bit = DEV_OFF;
39465@@ -469,42 +773,345 @@ static void rk808_pm_power_off(void)
39466 	default:
39467 		return;
39468 	}
39469+
39470 	ret = regmap_update_bits(rk808->regmap, reg, bit, bit);
39471 	if (ret)
39472 		dev_err(&rk808_i2c_client->dev, "Failed to shutdown device!\n");
39473 }
39474 
39475-static void rk8xx_shutdown(struct i2c_client *client)
39476+/* Called in syscore shutdown */
39477+static void (*pm_shutdown)(void);
39478+
39479+static void rk8xx_syscore_shutdown(void)
39480 {
39481-	struct rk808 *rk808 = i2c_get_clientdata(client);
39482 	int ret;
39483+	struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
39484 
39485-	switch (rk808->variant) {
39486-	case RK805_ID:
39487-		ret = regmap_update_bits(rk808->regmap,
39488-					 RK805_GPIO_IO_POL_REG,
39489-					 SLP_SD_MSK,
39490-					 SHUTDOWN_FUN);
39491+	if (!rk808) {
39492+		dev_warn(&rk808_i2c_client->dev,
39493+			 "have no rk808, so do nothing here\n");
39494+		return;
39495+	}
39496+
39497+	/* close rtc int when power off */
39498+	regmap_update_bits(rk808->regmap,
39499+			   RK808_INT_STS_MSK_REG1,
39500+			   (0x3 << 5), (0x3 << 5));
39501+	regmap_update_bits(rk808->regmap,
39502+			   RK808_RTC_INT_REG,
39503+			   (0x3 << 2), (0x0 << 2));
39504+	/*
39505+	 * For PMIC that power off supplies by write register via i2c bus,
39506+	 * it's better to do power off at syscore shutdown here.
39507+	 *
39508+	 * Because when run to kernel's "pm_power_off" call, i2c may has
39509+	 * been stopped or PMIC may not be able to get i2c transfer while
39510+	 * there are too many devices are competiting.
39511+	 */
39512+	if (system_state == SYSTEM_POWER_OFF) {
39513+		if (rk808->variant == RK809_ID || rk808->variant == RK817_ID) {
39514+			ret = regmap_update_bits(rk808->regmap,
39515+						 RK817_SYS_CFG(3),
39516+						 RK817_SLPPIN_FUNC_MSK,
39517+						 SLPPIN_DN_FUN);
39518+			if (ret) {
39519+				dev_warn(&rk808_i2c_client->dev,
39520+					 "Cannot switch to power down function\n");
39521+			}
39522+		}
39523+
39524+		if (pm_shutdown) {
39525+			dev_info(&rk808_i2c_client->dev, "System power off\n");
39526+			pm_shutdown();
39527+			mdelay(10);
39528+			dev_info(&rk808_i2c_client->dev,
39529+				 "Power off failed !\n");
39530+			while (1)
39531+				;
39532+		}
39533+	}
39534+}
39535+
39536+static struct syscore_ops rk808_syscore_ops = {
39537+	.shutdown = rk8xx_syscore_shutdown,
39538+};
39539+
39540+/*
39541+ * RK8xx PMICs would do real power off in syscore shutdown, if "pm_power_off"
39542+ * is not assigned(e.g. PSCI is not enabled), we have to provide a dummy
39543+ * callback for it, otherwise there comes a halt in Reboot system call:
39544+ *
39545+ * if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
39546+ *		cmd = LINUX_REBOOT_CMD_HALT;
39547+ */
39548+static void rk808_pm_power_off_dummy(void)
39549+{
39550+	pr_info("Dummy power off for RK8xx PMICs, should never reach here!\n");
39551+
39552+	while (1)
39553+		;
39554+}
39555+
39556+static ssize_t rk8xx_dbg_store(struct device *dev,
39557+			       struct device_attribute *attr,
39558+			       const char *buf, size_t count)
39559+{
39560+	int ret;
39561+	char cmd;
39562+	u32 input[2], addr, data;
39563+	struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
39564+
39565+	ret = sscanf(buf, "%c ", &cmd);
39566+	if (ret != 1) {
39567+		pr_err("Unknown command\n");
39568+		goto out;
39569+	}
39570+	switch (cmd) {
39571+	case 'w':
39572+		ret = sscanf(buf, "%c %x %x ", &cmd, &input[0], &input[1]);
39573+		if (ret != 3) {
39574+			pr_err("error! cmd format: echo w [addr] [value]\n");
39575+			goto out;
39576+		};
39577+		addr = input[0] & 0xff;
39578+		data = input[1] & 0xff;
39579+		pr_info("cmd : %c %x %x\n\n", cmd, input[0], input[1]);
39580+		regmap_write(rk808->regmap, addr, data);
39581+		regmap_read(rk808->regmap, addr, &data);
39582+		pr_info("new: %x %x\n", addr, data);
39583 		break;
39584-	case RK809_ID:
39585-	case RK817_ID:
39586-		ret = regmap_update_bits(rk808->regmap,
39587-					 RK817_SYS_CFG(3),
39588-					 RK817_SLPPIN_FUNC_MSK,
39589-					 SLPPIN_DN_FUN);
39590+	case 'r':
39591+		ret = sscanf(buf, "%c %x ", &cmd, &input[0]);
39592+		if (ret != 2) {
39593+			pr_err("error! cmd format: echo r [addr]\n");
39594+			goto out;
39595+		};
39596+		pr_info("cmd : %c %x\n\n", cmd, input[0]);
39597+		addr = input[0] & 0xff;
39598+		regmap_read(rk808->regmap, addr, &data);
39599+		pr_info("%x %x\n", input[0], data);
39600 		break;
39601 	default:
39602-		return;
39603+		pr_err("Unknown command\n");
39604+		break;
39605+	}
39606+
39607+out:
39608+	return count;
39609+}
39610+
39611+static int rk817_pinctrl_init(struct device *dev, struct rk808 *rk808)
39612+{
39613+	int ret;
39614+	struct platform_device	*pinctrl_dev;
39615+	struct pinctrl_state *default_st;
39616+
39617+	pinctrl_dev = platform_device_alloc("rk805-pinctrl", -1);
39618+	if (!pinctrl_dev) {
39619+		dev_err(dev, "Alloc pinctrl dev failed!\n");
39620+		return -ENOMEM;
39621 	}
39622+
39623+	pinctrl_dev->dev.parent = dev;
39624+
39625+	ret = platform_device_add(pinctrl_dev);
39626+
39627+	if (ret) {
39628+		platform_device_put(pinctrl_dev);
39629+		dev_err(dev, "Add rk805-pinctrl dev failed!\n");
39630+		return ret;
39631+	}
39632+	if (dev->pins && !IS_ERR(dev->pins->p)) {
39633+		dev_info(dev, "had get a pinctrl!\n");
39634+		return 0;
39635+	}
39636+
39637+	rk808->pins = devm_kzalloc(dev, sizeof(struct rk808_pin_info),
39638+				   GFP_KERNEL);
39639+	if (!rk808->pins)
39640+		return -ENOMEM;
39641+
39642+	rk808->pins->p = devm_pinctrl_get(dev);
39643+	if (IS_ERR(rk808->pins->p)) {
39644+		rk808->pins->p = NULL;
39645+		dev_err(dev, "no pinctrl handle\n");
39646+		return 0;
39647+	}
39648+
39649+	default_st = pinctrl_lookup_state(rk808->pins->p,
39650+					  PINCTRL_STATE_DEFAULT);
39651+
39652+	if (IS_ERR(default_st)) {
39653+		dev_dbg(dev, "no default pinctrl state\n");
39654+			return -EINVAL;
39655+	}
39656+
39657+	ret = pinctrl_select_state(rk808->pins->p, default_st);
39658+	if (ret) {
39659+		dev_dbg(dev, "failed to activate default pinctrl state\n");
39660+		return -EINVAL;
39661+	}
39662+
39663+	rk808->pins->power_off = pinctrl_lookup_state(rk808->pins->p,
39664+						      "pmic-power-off");
39665+	if (IS_ERR(rk808->pins->power_off)) {
39666+		rk808->pins->power_off = NULL;
39667+		dev_dbg(dev, "no power-off pinctrl state\n");
39668+	}
39669+
39670+	rk808->pins->sleep = pinctrl_lookup_state(rk808->pins->p,
39671+						  "pmic-sleep");
39672+	if (IS_ERR(rk808->pins->sleep)) {
39673+		rk808->pins->sleep = NULL;
39674+		dev_dbg(dev, "no sleep-setting state\n");
39675+	}
39676+
39677+	rk808->pins->reset = pinctrl_lookup_state(rk808->pins->p,
39678+						  "pmic-reset");
39679+	if (IS_ERR(rk808->pins->reset)) {
39680+		rk808->pins->reset = NULL;
39681+		dev_dbg(dev, "no reset-setting pinctrl state\n");
39682+		return 0;
39683+	}
39684+
39685+	ret = pinctrl_select_state(rk808->pins->p, rk808->pins->reset);
39686+
39687 	if (ret)
39688-		dev_warn(&client->dev,
39689-			 "Cannot switch to power down function\n");
39690+		dev_dbg(dev, "failed to activate reset-setting pinctrl state\n");
39691+
39692+	return 0;
39693+}
39694+
39695+struct rk817_reboot_data_t {
39696+	struct rk808 *rk808;
39697+	struct notifier_block reboot_notifier;
39698+};
39699+
39700+static struct rk817_reboot_data_t rk817_reboot_data;
39701+
39702+static int rk817_reboot_notifier_handler(struct notifier_block *nb,
39703+					 unsigned long action, void *cmd)
39704+{
39705+	struct rk817_reboot_data_t *data;
39706+	struct device *dev;
39707+	int value, power_en_active0, power_en_active1;
39708+	int ret, i;
39709+	static const char * const pmic_rst_reg_only_cmd[] = {
39710+		"loader", "bootloader", "fastboot", "recovery",
39711+		"ums", "panic", "watchdog", "charge",
39712+	};
39713+
39714+	data = container_of(nb, struct rk817_reboot_data_t, reboot_notifier);
39715+	dev = &data->rk808->i2c->dev;
39716+
39717+	regmap_read(data->rk808->regmap, RK817_POWER_EN_SAVE0,
39718+		    &power_en_active0);
39719+	if (power_en_active0 != 0) {
39720+		regmap_read(data->rk808->regmap, RK817_POWER_EN_SAVE1,
39721+			    &power_en_active1);
39722+		value = power_en_active0 & 0x0f;
39723+		regmap_write(data->rk808->regmap,
39724+			     RK817_POWER_EN_REG(0),
39725+			     value | 0xf0);
39726+		value = (power_en_active0 & 0xf0) >> 4;
39727+		regmap_write(data->rk808->regmap,
39728+			     RK817_POWER_EN_REG(1),
39729+			     value | 0xf0);
39730+		value = power_en_active1 & 0x0f;
39731+		regmap_write(data->rk808->regmap,
39732+			     RK817_POWER_EN_REG(2),
39733+			     value | 0xf0);
39734+		value = (power_en_active1 & 0xf0) >> 4;
39735+		regmap_write(data->rk808->regmap,
39736+			     RK817_POWER_EN_REG(3),
39737+			     value | 0xf0);
39738+	} else {
39739+		dev_info(dev, "reboot: not restore POWER_EN\n");
39740+	}
39741+
39742+	if (action != SYS_RESTART || !cmd)
39743+		return NOTIFY_OK;
39744+
39745+	/*
39746+	 * When system restart, there are two rst actions of PMIC sleep if
39747+	 * board hardware support:
39748+	 *
39749+	 *	0b'00: reset the PMIC itself completely.
39750+	 *	0b'01: reset the 'RST' related register only.
39751+	 *
39752+	 * In the case of 0b'00, PMIC reset itself which triggers SoC NPOR-reset
39753+	 * at the same time, so the command: reboot load/bootload/recovery, etc
39754+	 * is not effect any more.
39755+	 *
39756+	 * Here we check if this reboot cmd is what we expect for 0b'01.
39757+	 */
39758+	for (i = 0; i < ARRAY_SIZE(pmic_rst_reg_only_cmd); i++) {
39759+		if (!strcmp(cmd, pmic_rst_reg_only_cmd[i])) {
39760+			ret = regmap_update_bits(data->rk808->regmap,
39761+						 RK817_SYS_CFG(3),
39762+						 RK817_RST_FUNC_MSK,
39763+						 RK817_RST_FUNC_REG);
39764+			if (ret)
39765+				dev_err(dev, "reboot: force RK817_RST_FUNC_REG error!\n");
39766+			else
39767+				dev_info(dev, "reboot: force RK817_RST_FUNC_REG ok!\n");
39768+			break;
39769+		}
39770+	}
39771+
39772+	return NOTIFY_OK;
39773 }
39774 
39775+static void rk817_of_property_prepare(struct rk808 *rk808, struct device *dev)
39776+{
39777+	u32 inner;
39778+	int ret, func, msk, val;
39779+	struct device_node *np = dev->of_node;
39780+
39781+	ret = of_property_read_u32_index(np, "fb-inner-reg-idxs", 0, &inner);
39782+	if (!ret && inner == RK817_ID_DCDC3)
39783+		regmap_update_bits(rk808->regmap, RK817_POWER_CONFIG,
39784+				   RK817_BUCK3_FB_RES_MSK,
39785+				   RK817_BUCK3_FB_RES_INTER);
39786+	else
39787+		regmap_update_bits(rk808->regmap, RK817_POWER_CONFIG,
39788+				   RK817_BUCK3_FB_RES_MSK,
39789+				   RK817_BUCK3_FB_RES_EXT);
39790+	dev_info(dev, "support dcdc3 fb mode:%d, %d\n", ret, inner);
39791+
39792+	ret = of_property_read_u32(np, "pmic-reset-func", &func);
39793+
39794+	msk = RK817_SLPPIN_FUNC_MSK | RK817_RST_FUNC_MSK;
39795+	val = SLPPIN_NULL_FUN;
39796+
39797+	if (!ret && func < RK817_RST_FUNC_CNT) {
39798+		val |= RK817_RST_FUNC_MSK &
39799+		       (func << RK817_RST_FUNC_SFT);
39800+	} else {
39801+		val |= RK817_RST_FUNC_REG;
39802+	}
39803+
39804+	regmap_update_bits(rk808->regmap, RK817_SYS_CFG(3), msk, val);
39805+
39806+	dev_info(dev, "support pmic reset mode:%d,%d\n", ret, func);
39807+
39808+	rk817_reboot_data.rk808 = rk808;
39809+	rk817_reboot_data.reboot_notifier.notifier_call =
39810+		rk817_reboot_notifier_handler;
39811+	ret = register_reboot_notifier(&rk817_reboot_data.reboot_notifier);
39812+	if (ret)
39813+		dev_err(dev, "failed to register reboot nb\n");
39814+}
39815+
39816+static struct kobject *rk8xx_kobj;
39817+static struct device_attribute rk8xx_attrs =
39818+		__ATTR(rk8xx_dbg, 0200, NULL, rk8xx_dbg_store);
39819+
39820 static const struct of_device_id rk808_of_match[] = {
39821 	{ .compatible = "rockchip,rk805" },
39822 	{ .compatible = "rockchip,rk808" },
39823 	{ .compatible = "rockchip,rk809" },
39824+	{ .compatible = "rockchip,rk816" },
39825 	{ .compatible = "rockchip,rk817" },
39826 	{ .compatible = "rockchip,rk818" },
39827 	{ },
39828@@ -517,13 +1124,20 @@ static int rk808_probe(struct i2c_client *client,
39829 	struct device_node *np = client->dev.of_node;
39830 	struct rk808 *rk808;
39831 	const struct rk808_reg_data *pre_init_reg;
39832+	const struct regmap_irq_chip *battery_irq_chip = NULL;
39833 	const struct mfd_cell *cells;
39834+	unsigned char pmic_id_msb, pmic_id_lsb;
39835+	u8 on_source = 0, off_source = 0;
39836+	unsigned int on, off;
39837+	int pm_off = 0, msb, lsb;
39838 	int nr_pre_init_regs;
39839 	int nr_cells;
39840-	int msb, lsb;
39841-	unsigned char pmic_id_msb, pmic_id_lsb;
39842 	int ret;
39843 	int i;
39844+	void (*of_property_prepare_fn)(struct rk808 *rk808,
39845+				       struct device *dev) = NULL;
39846+	int (*pinctrl_init)(struct device *dev, struct rk808 *rk808) = NULL;
39847+	void (*device_shutdown_fn)(void) = NULL;
39848 
39849 	rk808 = devm_kzalloc(&client->dev, sizeof(*rk808), GFP_KERNEL);
39850 	if (!rk808)
39851@@ -564,6 +1178,14 @@ static int rk808_probe(struct i2c_client *client,
39852 		nr_pre_init_regs = ARRAY_SIZE(rk805_pre_init_reg);
39853 		cells = rk805s;
39854 		nr_cells = ARRAY_SIZE(rk805s);
39855+		on_source = RK805_ON_SOURCE_REG;
39856+		off_source = RK805_OFF_SOURCE_REG;
39857+		suspend_reg = rk805_suspend_reg;
39858+		suspend_reg_num = ARRAY_SIZE(rk805_suspend_reg);
39859+		resume_reg = rk805_resume_reg;
39860+		resume_reg_num = ARRAY_SIZE(rk805_resume_reg);
39861+		device_shutdown_fn = rk8xx_device_shutdown;
39862+		rk808->pm_pwroff_prep_fn = rk805_device_shutdown_prepare;
39863 		break;
39864 	case RK808_ID:
39865 		rk808->regmap_cfg = &rk808_regmap_config;
39866@@ -572,6 +1194,23 @@ static int rk808_probe(struct i2c_client *client,
39867 		nr_pre_init_regs = ARRAY_SIZE(rk808_pre_init_reg);
39868 		cells = rk808s;
39869 		nr_cells = ARRAY_SIZE(rk808s);
39870+		device_shutdown_fn = rk8xx_device_shutdown;
39871+		break;
39872+	case RK816_ID:
39873+		rk808->regmap_cfg = &rk816_regmap_config;
39874+		rk808->regmap_irq_chip = &rk816_irq_chip;
39875+		battery_irq_chip = &rk816_battery_irq_chip;
39876+		pre_init_reg = rk816_pre_init_reg;
39877+		nr_pre_init_regs = ARRAY_SIZE(rk816_pre_init_reg);
39878+		cells = rk816s;
39879+		nr_cells = ARRAY_SIZE(rk816s);
39880+		on_source = RK816_ON_SOURCE_REG;
39881+		off_source = RK816_OFF_SOURCE_REG;
39882+		suspend_reg = rk816_suspend_reg;
39883+		suspend_reg_num = ARRAY_SIZE(rk816_suspend_reg);
39884+		resume_reg = rk816_resume_reg;
39885+		resume_reg_num = ARRAY_SIZE(rk816_resume_reg);
39886+		device_shutdown_fn = rk8xx_device_shutdown;
39887 		break;
39888 	case RK818_ID:
39889 		rk808->regmap_cfg = &rk818_regmap_config;
39890@@ -580,6 +1219,13 @@ static int rk808_probe(struct i2c_client *client,
39891 		nr_pre_init_regs = ARRAY_SIZE(rk818_pre_init_reg);
39892 		cells = rk818s;
39893 		nr_cells = ARRAY_SIZE(rk818s);
39894+		on_source = RK818_ON_SOURCE_REG;
39895+		off_source = RK818_OFF_SOURCE_REG;
39896+		suspend_reg = rk818_suspend_reg;
39897+		suspend_reg_num = ARRAY_SIZE(rk818_suspend_reg);
39898+		resume_reg = rk818_resume_reg;
39899+		resume_reg_num = ARRAY_SIZE(rk818_resume_reg);
39900+		device_shutdown_fn = rk8xx_device_shutdown;
39901 		break;
39902 	case RK809_ID:
39903 	case RK817_ID:
39904@@ -589,6 +1235,11 @@ static int rk808_probe(struct i2c_client *client,
39905 		nr_pre_init_regs = ARRAY_SIZE(rk817_pre_init_reg);
39906 		cells = rk817s;
39907 		nr_cells = ARRAY_SIZE(rk817s);
39908+		on_source = RK817_ON_SOURCE_REG;
39909+		off_source = RK817_OFF_SOURCE_REG;
39910+		rk808->pm_pwroff_prep_fn = rk817_shutdown_prepare;
39911+		of_property_prepare_fn = rk817_of_property_prepare;
39912+		pinctrl_init = rk817_pinctrl_init;
39913 		break;
39914 	default:
39915 		dev_err(&client->dev, "Unsupported RK8XX ID %lu\n",
39916@@ -597,6 +1248,7 @@ static int rk808_probe(struct i2c_client *client,
39917 	}
39918 
39919 	rk808->i2c = client;
39920+	rk808_i2c_client = client;
39921 	i2c_set_clientdata(client, rk808);
39922 
39923 	rk808->regmap = devm_regmap_init_i2c(client, rk808->regmap_cfg);
39924@@ -605,11 +1257,50 @@ static int rk808_probe(struct i2c_client *client,
39925 		return PTR_ERR(rk808->regmap);
39926 	}
39927 
39928+	if (on_source && off_source) {
39929+		ret = regmap_read(rk808->regmap, on_source, &on);
39930+		if (ret) {
39931+			dev_err(&client->dev, "read 0x%x failed\n", on_source);
39932+			return ret;
39933+		}
39934+
39935+		ret = regmap_read(rk808->regmap, off_source, &off);
39936+		if (ret) {
39937+			dev_err(&client->dev, "read 0x%x failed\n", off_source);
39938+			return ret;
39939+		}
39940+
39941+		dev_info(&client->dev, "source: on=0x%02x, off=0x%02x\n",
39942+			 on, off);
39943+	}
39944+
39945 	if (!client->irq) {
39946 		dev_err(&client->dev, "No interrupt support, no core IRQ\n");
39947 		return -EINVAL;
39948 	}
39949 
39950+	if (of_property_prepare_fn)
39951+		of_property_prepare_fn(rk808, &client->dev);
39952+
39953+	for (i = 0; i < nr_pre_init_regs; i++) {
39954+		ret = regmap_update_bits(rk808->regmap,
39955+					 pre_init_reg[i].addr,
39956+					 pre_init_reg[i].mask,
39957+					 pre_init_reg[i].value);
39958+		if (ret) {
39959+			dev_err(&client->dev,
39960+				"0x%x write err\n",
39961+				pre_init_reg[i].addr);
39962+			return ret;
39963+		}
39964+	}
39965+
39966+	if (pinctrl_init) {
39967+		ret = pinctrl_init(&client->dev, rk808);
39968+		if (ret)
39969+			return ret;
39970+	}
39971+
39972 	ret = regmap_add_irq_chip(rk808->regmap, client->irq,
39973 				  IRQF_ONESHOT, -1,
39974 				  rk808->regmap_irq_chip, &rk808->irq_data);
39975@@ -618,15 +1309,15 @@ static int rk808_probe(struct i2c_client *client,
39976 		return ret;
39977 	}
39978 
39979-	for (i = 0; i < nr_pre_init_regs; i++) {
39980-		ret = regmap_update_bits(rk808->regmap,
39981-					pre_init_reg[i].addr,
39982-					pre_init_reg[i].mask,
39983-					pre_init_reg[i].value);
39984+	if (battery_irq_chip) {
39985+		ret = regmap_add_irq_chip(rk808->regmap, client->irq,
39986+					  IRQF_ONESHOT | IRQF_SHARED, -1,
39987+					  battery_irq_chip,
39988+					  &rk808->battery_irq_data);
39989 		if (ret) {
39990 			dev_err(&client->dev,
39991-				"0x%x write err\n",
39992-				pre_init_reg[i].addr);
39993+				"Failed to add batterry irq_chip %d\n", ret);
39994+			regmap_del_irq_chip(client->irq, rk808->irq_data);
39995 			return ret;
39996 		}
39997 	}
39998@@ -639,15 +1330,34 @@ static int rk808_probe(struct i2c_client *client,
39999 		goto err_irq;
40000 	}
40001 
40002-	if (of_property_read_bool(np, "rockchip,system-power-controller")) {
40003-		rk808_i2c_client = client;
40004-		pm_power_off = rk808_pm_power_off;
40005+	pm_off = of_property_read_bool(np, "rockchip,system-power-controller");
40006+	if (pm_off) {
40007+		if (!pm_power_off_prepare)
40008+			pm_power_off_prepare = rk808->pm_pwroff_prep_fn;
40009+
40010+		if (device_shutdown_fn) {
40011+			register_syscore_ops(&rk808_syscore_ops);
40012+			/* power off system in the syscore shutdown ! */
40013+			pm_shutdown = device_shutdown_fn;
40014+		}
40015+	}
40016+
40017+	rk8xx_kobj = kobject_create_and_add("rk8xx", NULL);
40018+	if (rk8xx_kobj) {
40019+		ret = sysfs_create_file(rk8xx_kobj, &rk8xx_attrs.attr);
40020+		if (ret)
40021+			dev_err(&client->dev, "create rk8xx sysfs error\n");
40022 	}
40023 
40024+	if (!pm_power_off)
40025+		pm_power_off = rk808_pm_power_off_dummy;
40026+
40027 	return 0;
40028 
40029 err_irq:
40030 	regmap_del_irq_chip(client->irq, rk808->irq_data);
40031+	if (battery_irq_chip)
40032+		regmap_del_irq_chip(client->irq, rk808->battery_irq_data);
40033 	return ret;
40034 }
40035 
40036@@ -656,21 +1366,45 @@ static int rk808_remove(struct i2c_client *client)
40037 	struct rk808 *rk808 = i2c_get_clientdata(client);
40038 
40039 	regmap_del_irq_chip(client->irq, rk808->irq_data);
40040+	mfd_remove_devices(&client->dev);
40041 
40042 	/**
40043 	 * pm_power_off may points to a function from another module.
40044 	 * Check if the pointer is set by us and only then overwrite it.
40045 	 */
40046-	if (pm_power_off == rk808_pm_power_off)
40047+	if (pm_power_off == rk808_pm_power_off_dummy)
40048 		pm_power_off = NULL;
40049 
40050+	/**
40051+	 * As above, check if the pointer is set by us before overwrite.
40052+	 */
40053+	if (rk808->pm_pwroff_prep_fn &&
40054+	    pm_power_off_prepare == rk808->pm_pwroff_prep_fn)
40055+		pm_power_off_prepare = NULL;
40056+
40057+	if (pm_shutdown)
40058+		unregister_syscore_ops(&rk808_syscore_ops);
40059+
40060 	return 0;
40061 }
40062 
40063 static int __maybe_unused rk8xx_suspend(struct device *dev)
40064 {
40065-	struct rk808 *rk808 = i2c_get_clientdata(to_i2c_client(dev));
40066-	int ret = 0;
40067+	struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
40068+	int i, ret = 0;
40069+	int value;
40070+
40071+	for (i = 0; i < suspend_reg_num; i++) {
40072+		ret = regmap_update_bits(rk808->regmap,
40073+					 suspend_reg[i].addr,
40074+					 suspend_reg[i].mask,
40075+					 suspend_reg[i].value);
40076+		if (ret) {
40077+			dev_err(dev, "0x%x write err\n",
40078+				suspend_reg[i].addr);
40079+			return ret;
40080+		}
40081+	}
40082 
40083 	switch (rk808->variant) {
40084 	case RK805_ID:
40085@@ -681,10 +1415,34 @@ static int __maybe_unused rk8xx_suspend(struct device *dev)
40086 		break;
40087 	case RK809_ID:
40088 	case RK817_ID:
40089-		ret = regmap_update_bits(rk808->regmap,
40090-					 RK817_SYS_CFG(3),
40091-					 RK817_SLPPIN_FUNC_MSK,
40092-					 SLPPIN_SLP_FUN);
40093+		if (rk808->pins && rk808->pins->p && rk808->pins->sleep) {
40094+			ret = regmap_update_bits(rk808->regmap,
40095+						 RK817_SYS_CFG(3),
40096+						 RK817_SLPPIN_FUNC_MSK,
40097+						 SLPPIN_NULL_FUN);
40098+			if (ret) {
40099+				dev_err(dev, "suspend: config SLPPIN_NULL_FUN error!\n");
40100+				return ret;
40101+			}
40102+
40103+			ret = regmap_update_bits(rk808->regmap,
40104+						 RK817_SYS_CFG(3),
40105+						 RK817_SLPPOL_MSK,
40106+						 RK817_SLPPOL_H);
40107+			if (ret) {
40108+				dev_err(dev, "suspend: config RK817_SLPPOL_H error!\n");
40109+				return ret;
40110+			}
40111+
40112+			/* pmic need the SCL clock to synchronize register */
40113+			regmap_read(rk808->regmap, RK817_SYS_STS, &value);
40114+			mdelay(2);
40115+			ret = pinctrl_select_state(rk808->pins->p, rk808->pins->sleep);
40116+			if (ret) {
40117+				dev_err(dev, "failed to act slp pinctrl state\n");
40118+				return ret;
40119+			}
40120+		}
40121 		break;
40122 	default:
40123 		break;
40124@@ -695,16 +1453,51 @@ static int __maybe_unused rk8xx_suspend(struct device *dev)
40125 
40126 static int __maybe_unused rk8xx_resume(struct device *dev)
40127 {
40128-	struct rk808 *rk808 = i2c_get_clientdata(to_i2c_client(dev));
40129-	int ret = 0;
40130+	struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
40131+	int i, ret = 0;
40132+	int value;
40133+
40134+	for (i = 0; i < resume_reg_num; i++) {
40135+		ret = regmap_update_bits(rk808->regmap,
40136+					 resume_reg[i].addr,
40137+					 resume_reg[i].mask,
40138+					 resume_reg[i].value);
40139+		if (ret) {
40140+			dev_err(dev, "0x%x write err\n",
40141+				resume_reg[i].addr);
40142+			return ret;
40143+		}
40144+	}
40145 
40146 	switch (rk808->variant) {
40147 	case RK809_ID:
40148 	case RK817_ID:
40149-		ret = regmap_update_bits(rk808->regmap,
40150-					 RK817_SYS_CFG(3),
40151-					 RK817_SLPPIN_FUNC_MSK,
40152-					 SLPPIN_NULL_FUN);
40153+		if (rk808->pins && rk808->pins->p && rk808->pins->reset) {
40154+			ret = regmap_update_bits(rk808->regmap,
40155+						 RK817_SYS_CFG(3),
40156+						 RK817_SLPPIN_FUNC_MSK,
40157+						 SLPPIN_NULL_FUN);
40158+			if (ret) {
40159+				dev_err(dev, "resume: config SLPPIN_NULL_FUN error!\n");
40160+				return ret;
40161+			}
40162+
40163+			ret = regmap_update_bits(rk808->regmap,
40164+						 RK817_SYS_CFG(3),
40165+						 RK817_SLPPOL_MSK,
40166+						 RK817_SLPPOL_L);
40167+			if (ret) {
40168+				dev_err(dev, "resume: config RK817_SLPPOL_L error!\n");
40169+				return ret;
40170+			}
40171+
40172+			/* pmic need the SCL clock to synchronize register */
40173+			regmap_read(rk808->regmap, RK817_SYS_STS, &value);
40174+			mdelay(2);
40175+			ret = pinctrl_select_state(rk808->pins->p, rk808->pins->reset);
40176+			if (ret)
40177+				dev_dbg(dev, "failed to act reset pinctrl state\n");
40178+		}
40179 		break;
40180 	default:
40181 		break;
40182@@ -712,7 +1505,7 @@ static int __maybe_unused rk8xx_resume(struct device *dev)
40183 
40184 	return ret;
40185 }
40186-static SIMPLE_DEV_PM_OPS(rk8xx_pm_ops, rk8xx_suspend, rk8xx_resume);
40187+SIMPLE_DEV_PM_OPS(rk8xx_pm_ops, rk8xx_suspend, rk8xx_resume);
40188 
40189 static struct i2c_driver rk808_i2c_driver = {
40190 	.driver = {
40191@@ -722,10 +1515,23 @@ static struct i2c_driver rk808_i2c_driver = {
40192 	},
40193 	.probe    = rk808_probe,
40194 	.remove   = rk808_remove,
40195-	.shutdown = rk8xx_shutdown,
40196 };
40197 
40198+#ifdef CONFIG_ROCKCHIP_THUNDER_BOOT
40199+static int __init rk808_i2c_driver_init(void)
40200+{
40201+	return i2c_add_driver(&rk808_i2c_driver);
40202+}
40203+subsys_initcall(rk808_i2c_driver_init);
40204+
40205+static void __exit rk808_i2c_driver_exit(void)
40206+{
40207+	i2c_del_driver(&rk808_i2c_driver);
40208+}
40209+module_exit(rk808_i2c_driver_exit);
40210+#else
40211 module_i2c_driver(rk808_i2c_driver);
40212+#endif
40213 
40214 MODULE_LICENSE("GPL");
40215 MODULE_AUTHOR("Chris Zhong <zyw@rock-chips.com>");
40216diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
40217index 6622e3262..99cf93f58 100644
40218--- a/drivers/mmc/core/block.c
40219+++ b/drivers/mmc/core/block.c
40220@@ -47,10 +47,13 @@
40221 
40222 #include <linux/uaccess.h>
40223 
40224+#include <trace/hooks/mmc_core.h>
40225+
40226 #include "queue.h"
40227 #include "block.h"
40228 #include "core.h"
40229 #include "card.h"
40230+#include "../../../vendor/include/linux/crypto.h"
40231 #include "host.h"
40232 #include "bus.h"
40233 #include "mmc_ops.h"
40234@@ -541,7 +544,6 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
40235 		return mmc_sanitize(card);
40236 
40237 	mmc_wait_for_req(card->host, &mrq);
40238-	memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp));
40239 
40240 	if (cmd.error) {
40241 		dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
40242@@ -591,6 +593,8 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
40243 	if (idata->ic.postsleep_min_us)
40244 		usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
40245 
40246+	memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
40247+
40248 	if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
40249 		/*
40250 		 * Ensure RPMB/R1B command has completed by polling CMD13
40251@@ -961,6 +965,11 @@ static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
40252 		struct mmc_blk_data *main_md =
40253 			dev_get_drvdata(&host->card->dev);
40254 		int part_err;
40255+		bool allow = true;
40256+
40257+		trace_android_vh_mmc_blk_reset(host, err, &allow);
40258+		if (!allow)
40259+			return -ENODEV;
40260 
40261 		main_md->part_curr = main_md->part_type;
40262 		part_err = mmc_blk_part_switch(host->card, md->part_type);
40263@@ -1271,6 +1280,8 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
40264 
40265 	memset(brq, 0, sizeof(struct mmc_blk_request));
40266 
40267+	mmc_crypto_prepare_req(mqrq);
40268+
40269 	brq->mrq.data = &brq->data;
40270 	brq->mrq.tag = req->tag;
40271 
40272@@ -1804,6 +1815,7 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
40273 	    err && mmc_blk_reset(md, card->host, type)) {
40274 		pr_err("%s: recovery failed!\n", req->rq_disk->disk_name);
40275 		mqrq->retries = MMC_NO_RETRIES;
40276+		trace_android_vh_mmc_blk_mq_rw_recovery(card);
40277 		return;
40278 	}
40279 
40280@@ -2897,6 +2909,9 @@ static void mmc_blk_remove_debugfs(struct mmc_card *card,
40281 
40282 #endif /* CONFIG_DEBUG_FS */
40283 
40284+struct mmc_card *this_card;
40285+EXPORT_SYMBOL(this_card);
40286+
40287 static int mmc_blk_probe(struct mmc_card *card)
40288 {
40289 	struct mmc_blk_data *md, *part_md;
40290@@ -2932,6 +2947,11 @@ static int mmc_blk_probe(struct mmc_card *card)
40291 
40292 	dev_set_drvdata(&card->dev, md);
40293 
40294+#if defined(CONFIG_MMC_DW_ROCKCHIP) || defined(CONFIG_MMC_SDHCI_OF_ARASAN)
40295+	if (card->type == MMC_TYPE_MMC)
40296+		this_card = card;
40297+#endif
40298+
40299 	if (mmc_add_disk(md))
40300 		goto out;
40301 
40302@@ -2968,6 +2988,12 @@ static void mmc_blk_remove(struct mmc_card *card)
40303 	struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
40304 
40305 	mmc_blk_remove_debugfs(card, md);
40306+
40307+	#if defined(CONFIG_MMC_DW_ROCKCHIP)
40308+	if (card->type == MMC_TYPE_MMC)
40309+		this_card = NULL;
40310+	#endif
40311+
40312 	mmc_blk_remove_parts(card, md);
40313 	pm_runtime_get_sync(&card->dev);
40314 	if (md->part_curr != md->part_type) {
40315diff --git a/drivers/mmc/core/block.h b/drivers/mmc/core/block.h
40316index 31153f656..0ce5d97d4 100644
40317--- a/drivers/mmc/core/block.h
40318+++ b/drivers/mmc/core/block.h
40319@@ -17,4 +17,6 @@ struct work_struct;
40320 
40321 void mmc_blk_mq_complete_work(struct work_struct *work);
40322 
40323+extern struct mmc_card *this_card;
40324+
40325 #endif
40326diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
40327index 8f2465394..34b406fd7 100644
40328--- a/drivers/mmc/core/core.c
40329+++ b/drivers/mmc/core/core.c
40330@@ -37,6 +37,7 @@
40331 
40332 #include "core.h"
40333 #include "card.h"
40334+#include "../../../vendor/include/linux/crypto.h"
40335 #include "bus.h"
40336 #include "host.h"
40337 #include "sdio_bus.h"
40338@@ -50,8 +51,6 @@
40339 #define MMC_ERASE_TIMEOUT_MS	(60 * 1000) /* 60 s */
40340 #define SD_DISCARD_TIMEOUT_MS	(250)
40341 
40342-static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
40343-
40344 /*
40345  * Enabling software CRCs on the data blocks can be a significant (30%)
40346  * performance cost, and for other reasons may not always be desired.
40347@@ -916,6 +915,7 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz)
40348 	host->ios.clock = hz;
40349 	mmc_set_ios(host);
40350 }
40351+EXPORT_SYMBOL_GPL(mmc_set_clock);
40352 
40353 int mmc_execute_tuning(struct mmc_card *card)
40354 {
40355@@ -995,7 +995,10 @@ void mmc_set_initial_state(struct mmc_host *host)
40356 		host->ops->hs400_enhanced_strobe(host, &host->ios);
40357 
40358 	mmc_set_ios(host);
40359+
40360+	mmc_crypto_set_initial_state(host);
40361 }
40362+EXPORT_SYMBOL_GPL(mmc_set_initial_state);
40363 
40364 /**
40365  * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
40366@@ -1265,6 +1268,7 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)
40367 	host->ios.timing = timing;
40368 	mmc_set_ios(host);
40369 }
40370+EXPORT_SYMBOL_GPL(mmc_set_timing);
40371 
40372 /*
40373  * Select appropriate driver type for host.
40374@@ -2068,6 +2072,7 @@ int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
40375 }
40376 EXPORT_SYMBOL(mmc_set_blocklen);
40377 
40378+#ifndef CONFIG_ROCKCHIP_THUNDER_BOOT
40379 static void mmc_hw_reset_for_init(struct mmc_host *host)
40380 {
40381 	mmc_pwrseq_reset(host);
40382@@ -2076,6 +2081,7 @@ static void mmc_hw_reset_for_init(struct mmc_host *host)
40383 		return;
40384 	host->ops->hw_reset(host);
40385 }
40386+#endif
40387 
40388 /**
40389  * mmc_hw_reset - reset the card in hardware
40390@@ -2148,7 +2154,9 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
40391 	 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
40392 	 * do a hardware reset if possible.
40393 	 */
40394+#ifndef CONFIG_ROCKCHIP_THUNDER_BOOT
40395 	mmc_hw_reset_for_init(host);
40396+#endif
40397 
40398 	/*
40399 	 * sdio_reset sends CMD52 to reset card.  Since we do not know
40400diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
40401index a6c814fdb..61284786a 100644
40402--- a/drivers/mmc/core/core.h
40403+++ b/drivers/mmc/core/core.h
40404@@ -17,6 +17,8 @@ struct mmc_request;
40405 
40406 #define MMC_CMD_RETRIES        3
40407 
40408+static const unsigned int freqs[] = { 400000, 300000, 200000, 100000 };
40409+
40410 struct mmc_bus_ops {
40411 	void (*remove)(struct mmc_host *);
40412 	void (*detect)(struct mmc_host *);
40413@@ -30,6 +32,8 @@ struct mmc_bus_ops {
40414 	int (*hw_reset)(struct mmc_host *);
40415 	int (*sw_reset)(struct mmc_host *);
40416 	bool (*cache_enabled)(struct mmc_host *);
40417+
40418+	//ANDROID_VENDOR_DATA_ARRAY(1, 2);
40419 };
40420 
40421 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
40422@@ -41,7 +45,7 @@ struct device_node *mmc_of_find_child_device(struct mmc_host *host,
40423 void mmc_init_erase(struct mmc_card *card);
40424 
40425 void mmc_set_chip_select(struct mmc_host *host, int mode);
40426-void mmc_set_clock(struct mmc_host *host, unsigned int hz);
40427+extern void mmc_set_clock(struct mmc_host *host, unsigned int hz);
40428 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
40429 void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
40430 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
40431@@ -81,6 +85,8 @@ int mmc_attach_mmc(struct mmc_host *host);
40432 int mmc_attach_sd(struct mmc_host *host);
40433 int mmc_attach_sdio(struct mmc_host *host);
40434 
40435+int sdio_reset_comm(struct mmc_card *card);
40436+
40437 /* Module parameters */
40438 extern bool use_spi_crc;
40439 
40440diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
40441index 87807ef01..6da97f26f 100644
40442--- a/drivers/mmc/core/mmc.c
40443+++ b/drivers/mmc/core/mmc.c
40444@@ -9,13 +9,15 @@
40445 
40446 #include <linux/err.h>
40447 #include <linux/of.h>
40448+#include <linux/of_address.h>
40449 #include <linux/slab.h>
40450 #include <linux/stat.h>
40451 #include <linux/pm_runtime.h>
40452-
40453+#include <linux/mm.h>
40454 #include <linux/mmc/host.h>
40455 #include <linux/mmc/card.h>
40456 #include <linux/mmc/mmc.h>
40457+#include <linux/resource.h>
40458 
40459 #include "core.h"
40460 #include "card.h"
40461@@ -65,6 +67,7 @@ static const unsigned int taac_mant[] = {
40462 /*
40463  * Given the decoded CSD structure, decode the raw CID to our CID structure.
40464  */
40465+#ifndef CONFIG_ROCKCHIP_THUNDER_BOOT
40466 static int mmc_decode_cid(struct mmc_card *card)
40467 {
40468 	u32 *resp = card->raw_cid;
40469@@ -116,6 +119,7 @@ static int mmc_decode_cid(struct mmc_card *card)
40470 
40471 	return 0;
40472 }
40473+#endif
40474 
40475 static void mmc_set_erase_size(struct mmc_card *card)
40476 {
40477@@ -662,14 +666,72 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
40478 	return err;
40479 }
40480 
40481+#ifdef CONFIG_ROCKCHIP_THUNDER_BOOT
40482+static void *mmc_tb_map_ecsd(phys_addr_t start, size_t len)
40483+{
40484+	int i;
40485+	void *vaddr;
40486+	pgprot_t pgprot = PAGE_KERNEL;
40487+	phys_addr_t phys;
40488+	int npages = PAGE_ALIGN(len) / PAGE_SIZE;
40489+	struct page **p = vmalloc(sizeof(struct page *) * npages);
40490+
40491+	if (!p)
40492+		return NULL;
40493+
40494+	phys = start;
40495+	for (i = 0; i < npages; i++) {
40496+		p[i] = phys_to_page(phys);
40497+		phys += PAGE_SIZE;
40498+	}
40499+
40500+	vaddr = vmap(p, npages, VM_MAP, pgprot);
40501+	vfree(p);
40502+
40503+	return vaddr;
40504+}
40505+#endif
40506+
40507 static int mmc_read_ext_csd(struct mmc_card *card)
40508 {
40509 	u8 *ext_csd;
40510 	int err;
40511-
40512+#ifdef CONFIG_ROCKCHIP_THUNDER_BOOT
40513+	void *ecsd;
40514+	bool valid_ecsd = false;
40515+	struct device_node *mem;
40516+	struct resource reg;
40517+	struct device *dev = card->host->parent;
40518+#endif
40519 	if (!mmc_can_ext_csd(card))
40520 		return 0;
40521 
40522+#ifdef CONFIG_ROCKCHIP_THUNDER_BOOT
40523+	mem = of_parse_phandle(dev->of_node, "memory-region-ecsd", 0);
40524+	if (mem) {
40525+		err = of_address_to_resource(mem, 0, &reg);
40526+		if (err < 0) {
40527+			dev_err(dev, "fail to get resource\n");
40528+			goto get_ecsd;
40529+		}
40530+
40531+		ecsd = mmc_tb_map_ecsd(reg.start, resource_size(&reg));
40532+		if (!ecsd)
40533+			goto get_ecsd;
40534+
40535+		if (readl(ecsd + SZ_512) == 0x55aa55aa) {
40536+			ext_csd = ecsd;
40537+			valid_ecsd = true;
40538+			goto decode;
40539+		} else {
40540+			dev_dbg(dev, "invalid ecsd tag!");
40541+		}
40542+	} else {
40543+		dev_info(dev, "not find \"memory-region\" property\n");
40544+	}
40545+
40546+get_ecsd:
40547+#endif
40548 	err = mmc_get_ext_csd(card, &ext_csd);
40549 	if (err) {
40550 		/* If the host or the card can't do the switch,
40551@@ -694,12 +756,22 @@ static int mmc_read_ext_csd(struct mmc_card *card)
40552 
40553 		return err;
40554 	}
40555-
40556+#ifdef CONFIG_ROCKCHIP_THUNDER_BOOT
40557+decode:
40558+#endif
40559 	err = mmc_decode_ext_csd(card, ext_csd);
40560+#ifdef CONFIG_ROCKCHIP_THUNDER_BOOT
40561+	if (!valid_ecsd)
40562+		kfree(ext_csd);
40563+	else
40564+		vunmap(ecsd);
40565+#else
40566 	kfree(ext_csd);
40567+#endif
40568 	return err;
40569 }
40570 
40571+#ifndef CONFIG_ROCKCHIP_THUNDER_BOOT
40572 static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
40573 {
40574 	u8 *bw_ext_csd;
40575@@ -772,6 +844,7 @@ static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
40576 	kfree(bw_ext_csd);
40577 	return err;
40578 }
40579+#endif
40580 
40581 MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
40582 	card->raw_cid[2], card->raw_cid[3]);
40583@@ -972,7 +1045,7 @@ static int mmc_select_powerclass(struct mmc_card *card)
40584 /*
40585  * Set the bus speed for the selected speed mode.
40586  */
40587-static void mmc_set_bus_speed(struct mmc_card *card)
40588+void mmc_set_bus_speed(struct mmc_card *card)
40589 {
40590 	unsigned int max_dtr = (unsigned int)-1;
40591 
40592@@ -992,7 +1065,7 @@ static void mmc_set_bus_speed(struct mmc_card *card)
40593  * If the bus width is changed successfully, return the selected width value.
40594  * Zero is returned instead of error value if the wide width is not supported.
40595  */
40596-static int mmc_select_bus_width(struct mmc_card *card)
40597+int mmc_select_bus_width(struct mmc_card *card)
40598 {
40599 	static unsigned ext_csd_bits[] = {
40600 		EXT_CSD_BUS_WIDTH_8,
40601@@ -1041,11 +1114,12 @@ static int mmc_select_bus_width(struct mmc_card *card)
40602 		 * compare ext_csd previously read in 1 bit mode
40603 		 * against ext_csd at new bus width
40604 		 */
40605+#ifndef CONFIG_ROCKCHIP_THUNDER_BOOT
40606 		if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
40607 			err = mmc_compare_ext_csds(card, bus_width);
40608 		else
40609 			err = mmc_bus_test(card, bus_width);
40610-
40611+#endif
40612 		if (!err) {
40613 			err = bus_width;
40614 			break;
40615@@ -1057,11 +1131,12 @@ static int mmc_select_bus_width(struct mmc_card *card)
40616 
40617 	return err;
40618 }
40619+EXPORT_SYMBOL_GPL(mmc_select_bus_width);
40620 
40621 /*
40622  * Switch to the high-speed mode
40623  */
40624-static int mmc_select_hs(struct mmc_card *card)
40625+int mmc_select_hs(struct mmc_card *card)
40626 {
40627 	int err;
40628 
40629@@ -1075,11 +1150,12 @@ static int mmc_select_hs(struct mmc_card *card)
40630 
40631 	return err;
40632 }
40633+EXPORT_SYMBOL_GPL(mmc_select_hs);
40634 
40635 /*
40636  * Activate wide bus and DDR if supported.
40637  */
40638-static int mmc_select_hs_ddr(struct mmc_card *card)
40639+int mmc_select_hs_ddr(struct mmc_card *card)
40640 {
40641 	struct mmc_host *host = card->host;
40642 	u32 bus_width, ext_csd_bits;
40643@@ -1148,8 +1224,9 @@ static int mmc_select_hs_ddr(struct mmc_card *card)
40644 
40645 	return err;
40646 }
40647+EXPORT_SYMBOL_GPL(mmc_select_hs_ddr);
40648 
40649-static int mmc_select_hs400(struct mmc_card *card)
40650+int mmc_select_hs400(struct mmc_card *card)
40651 {
40652 	struct mmc_host *host = card->host;
40653 	unsigned int max_dtr;
40654@@ -1235,6 +1312,7 @@ static int mmc_select_hs400(struct mmc_card *card)
40655 	       __func__, err);
40656 	return err;
40657 }
40658+EXPORT_SYMBOL_GPL(mmc_select_hs400);
40659 
40660 int mmc_hs200_to_hs400(struct mmc_card *card)
40661 {
40662@@ -1378,12 +1456,9 @@ static int mmc_select_hs400es(struct mmc_card *card)
40663 		goto out_err;
40664 	}
40665 
40666-	/*
40667-	 * Bump to HS timing and frequency. Some cards don't handle
40668-	 * SEND_STATUS reliably at the initial frequency.
40669-	 */
40670 	mmc_set_timing(host, MMC_TIMING_MMC_HS);
40671-	mmc_set_bus_speed(card);
40672+	/* Set clock immediately after changing timing */
40673+	mmc_set_clock(host, card->ext_csd.hs_max_dtr);
40674 
40675 	err = mmc_switch_status(card, true);
40676 	if (err)
40677@@ -1446,7 +1521,7 @@ static int mmc_select_hs400es(struct mmc_card *card)
40678 static int mmc_select_hs200(struct mmc_card *card)
40679 {
40680 	struct mmc_host *host = card->host;
40681-	unsigned int old_timing, old_signal_voltage, old_clock;
40682+	unsigned int old_timing, old_signal_voltage;
40683 	int err = -EINVAL;
40684 	u8 val;
40685 
40686@@ -1477,17 +1552,8 @@ static int mmc_select_hs200(struct mmc_card *card)
40687 				   false, true);
40688 		if (err)
40689 			goto err;
40690-
40691-		/*
40692-		 * Bump to HS timing and frequency. Some cards don't handle
40693-		 * SEND_STATUS reliably at the initial frequency.
40694-		 * NB: We can't move to full (HS200) speeds until after we've
40695-		 * successfully switched over.
40696-		 */
40697 		old_timing = host->ios.timing;
40698-		old_clock = host->ios.clock;
40699 		mmc_set_timing(host, MMC_TIMING_MMC_HS200);
40700-		mmc_set_clock(card->host, card->ext_csd.hs_max_dtr);
40701 
40702 		/*
40703 		 * For HS200, CRC errors are not a reliable way to know the
40704@@ -1500,10 +1566,8 @@ static int mmc_select_hs200(struct mmc_card *card)
40705 		 * mmc_select_timing() assumes timing has not changed if
40706 		 * it is a switch error.
40707 		 */
40708-		if (err == -EBADMSG) {
40709-			mmc_set_clock(host, old_clock);
40710+		if (err == -EBADMSG)
40711 			mmc_set_timing(host, old_timing);
40712-		}
40713 	}
40714 err:
40715 	if (err) {
40716@@ -1520,7 +1584,7 @@ static int mmc_select_hs200(struct mmc_card *card)
40717 /*
40718  * Activate High Speed, HS200 or HS400ES mode if supported.
40719  */
40720-static int mmc_select_timing(struct mmc_card *card)
40721+int mmc_select_timing(struct mmc_card *card)
40722 {
40723 	int err = 0;
40724 
40725@@ -1545,12 +1609,13 @@ static int mmc_select_timing(struct mmc_card *card)
40726 	mmc_set_bus_speed(card);
40727 	return 0;
40728 }
40729+EXPORT_SYMBOL_GPL(mmc_select_timing);
40730 
40731 /*
40732  * Execute tuning sequence to seek the proper bus operating
40733  * conditions for HS200 and HS400, which sends CMD21 to the device.
40734  */
40735-static int mmc_hs200_tuning(struct mmc_card *card)
40736+int mmc_hs200_tuning(struct mmc_card *card)
40737 {
40738 	struct mmc_host *host = card->host;
40739 
40740@@ -1565,6 +1630,7 @@ static int mmc_hs200_tuning(struct mmc_card *card)
40741 
40742 	return mmc_execute_tuning(card);
40743 }
40744+EXPORT_SYMBOL_GPL(mmc_hs200_tuning);
40745 
40746 /*
40747  * Handle the detection and initialisation of a card.
40748@@ -1593,7 +1659,9 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
40749 	 * respond.
40750 	 * mmc_go_idle is needed for eMMC that are asleep
40751 	 */
40752+#ifndef CONFIG_ROCKCHIP_THUNDER_BOOT
40753 	mmc_go_idle(host);
40754+#endif
40755 
40756 	/* The extra bit indicates that we support high capacity */
40757 	err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
40758@@ -1638,7 +1706,9 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
40759 		card->ocr = ocr;
40760 		card->type = MMC_TYPE_MMC;
40761 		card->rca = 1;
40762+#ifndef CONFIG_ROCKCHIP_THUNDER_BOOT
40763 		memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
40764+#endif
40765 	}
40766 
40767 	/*
40768@@ -1669,9 +1739,11 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
40769 		err = mmc_decode_csd(card);
40770 		if (err)
40771 			goto free_card;
40772+#ifndef CONFIG_ROCKCHIP_THUNDER_BOOT
40773 		err = mmc_decode_cid(card);
40774 		if (err)
40775 			goto free_card;
40776+#endif
40777 	}
40778 
40779 	/*
40780@@ -1814,6 +1886,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
40781 	/*
40782 	 * Enable HPI feature (if supported)
40783 	 */
40784+#ifndef CONFIG_ROCKCHIP_THUNDER_BOOT
40785 	if (card->ext_csd.hpi) {
40786 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
40787 				EXT_CSD_HPI_MGMT, 1,
40788@@ -1829,7 +1902,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
40789 			card->ext_csd.hpi_en = 1;
40790 		}
40791 	}
40792-
40793+#endif
40794 	/*
40795 	 * If cache size is higher than 0, this indicates the existence of cache
40796 	 * and it can be turned on. Note that some eMMCs from Micron has been
40797@@ -2110,14 +2183,29 @@ static int mmc_suspend(struct mmc_host *host)
40798 static int _mmc_resume(struct mmc_host *host)
40799 {
40800 	int err = 0;
40801+	int i;
40802 
40803 	mmc_claim_host(host);
40804 
40805 	if (!mmc_card_suspended(host->card))
40806 		goto out;
40807 
40808-	mmc_power_up(host, host->card->ocr);
40809-	err = mmc_init_card(host, host->card->ocr, host->card);
40810+	/*
40811+	 * Let's try to fallback the host->f_init
40812+	 * if failing to init mmc card after resume.
40813+	 */
40814+	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
40815+		if (host->f_init < max(freqs[i], host->f_min))
40816+			continue;
40817+		else
40818+			host->f_init = max(freqs[i], host->f_min);
40819+
40820+		mmc_power_up(host, host->card->ocr);
40821+		err = mmc_init_card(host, host->card->ocr, host->card);
40822+		if (!err)
40823+			break;
40824+	}
40825+
40826 	mmc_card_clr_suspended(host->card);
40827 
40828 out:
40829diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
40830index ebad70e44..2715e5031 100644
40831--- a/drivers/mmc/core/mmc_ops.c
40832+++ b/drivers/mmc/core/mmc_ops.c
40833@@ -145,24 +145,26 @@ int mmc_go_idle(struct mmc_host *host)
40834 	 * rules that must accommodate non-MMC slaves which this layer
40835 	 * won't even know about.
40836 	 */
40837+#ifndef CONFIG_ROCKCHIP_THUNDER_BOOT
40838 	if (!mmc_host_is_spi(host)) {
40839 		mmc_set_chip_select(host, MMC_CS_HIGH);
40840 		mmc_delay(1);
40841 	}
40842-
40843+#endif
40844 	cmd.opcode = MMC_GO_IDLE_STATE;
40845 	cmd.arg = 0;
40846 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
40847 
40848 	err = mmc_wait_for_cmd(host, &cmd, 0);
40849 
40850+#ifndef CONFIG_ROCKCHIP_THUNDER_BOOT
40851 	mmc_delay(1);
40852 
40853 	if (!mmc_host_is_spi(host)) {
40854 		mmc_set_chip_select(host, MMC_CS_DONTCARE);
40855 		mmc_delay(1);
40856 	}
40857-
40858+#endif
40859 	host->use_spi_crc = 0;
40860 
40861 	return err;
40862@@ -182,6 +184,15 @@ int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
40863 		if (err)
40864 			break;
40865 
40866+		/*
40867+		 * According to eMMC specification v5.1 section A6.1, the R3
40868+		 * response value should be 0x00FF8080, 0x40FF8080, 0x80FF8080
40869+		 * or 0xC0FF8080. The EMMC device may be abnormal if a wrong
40870+		 * OCR data is configured.
40871+		 */
40872+		if ((cmd.resp[0] & 0xFFFFFF) != 0x00FF8080)
40873+			continue;
40874+
40875 		/* wait until reset completes */
40876 		if (mmc_host_is_spi(host)) {
40877 			if (!(cmd.resp[0] & R1_SPI_IDLE))
40878@@ -193,8 +204,6 @@ int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
40879 
40880 		err = -ETIMEDOUT;
40881 
40882-		mmc_delay(10);
40883-
40884 		/*
40885 		 * According to eMMC specification v5.1 section 6.4.3, we
40886 		 * should issue CMD1 repeatedly in the idle state until
40887@@ -204,6 +213,11 @@ int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
40888 		 */
40889 		if (!ocr && !mmc_host_is_spi(host))
40890 			cmd.arg = cmd.resp[0] | BIT(30);
40891+#ifndef CONFIG_ROCKCHIP_THUNDER_BOOT
40892+		mmc_delay(1);
40893+#else
40894+		udelay(1);
40895+#endif
40896 	}
40897 
40898 	if (rocr && !mmc_host_is_spi(host))
40899@@ -989,9 +1003,10 @@ int mmc_flush_cache(struct mmc_card *card)
40900 	int err = 0;
40901 
40902 	if (mmc_cache_enabled(card->host)) {
40903-		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
40904-				 EXT_CSD_FLUSH_CACHE, 1,
40905-				 MMC_CACHE_FLUSH_TIMEOUT_MS);
40906+		err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
40907+				   EXT_CSD_FLUSH_CACHE, 1,
40908+				   MMC_CACHE_FLUSH_TIMEOUT_MS, 0,
40909+				   false, false);
40910 		if (err)
40911 			pr_err("%s: cache flush error %d\n",
40912 					mmc_hostname(card->host), err);
40913diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
40914index 002426e3c..2e2db41be 100644
40915--- a/drivers/mmc/core/queue.c
40916+++ b/drivers/mmc/core/queue.c
40917@@ -19,6 +19,7 @@
40918 #include "block.h"
40919 #include "core.h"
40920 #include "card.h"
40921+#include "../../../vendor/include/linux/crypto.h"
40922 #include "host.h"
40923 
40924 #define MMC_DMA_MAP_MERGE_SEGMENTS	512
40925@@ -70,6 +71,7 @@ enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
40926 
40927 	return MMC_ISSUE_SYNC;
40928 }
40929+EXPORT_SYMBOL_GPL(mmc_issue_type);
40930 
40931 static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq)
40932 {
40933@@ -407,6 +409,8 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
40934 	mutex_init(&mq->complete_lock);
40935 
40936 	init_waitqueue_head(&mq->wait);
40937+
40938+	mmc_crypto_setup_queue(mq->queue, host);
40939 }
40940 
40941 static inline bool mmc_merge_capable(struct mmc_host *host)
40942diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
40943index 868b121ce..e7f2eebd8 100644
40944--- a/drivers/mmc/core/sd.c
40945+++ b/drivers/mmc/core/sd.c
40946@@ -18,6 +18,8 @@
40947 #include <linux/mmc/mmc.h>
40948 #include <linux/mmc/sd.h>
40949 
40950+#include <trace/hooks/mmc_core.h>
40951+
40952 #include "core.h"
40953 #include "card.h"
40954 #include "host.h"
40955@@ -462,6 +464,8 @@ static void sd_update_bus_speed_mode(struct mmc_card *card)
40956 		    SD_MODE_UHS_SDR12)) {
40957 			card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
40958 	}
40959+
40960+	trace_android_vh_sd_update_bus_speed_mode(card);
40961 }
40962 
40963 static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
40964@@ -1228,6 +1232,49 @@ static int _mmc_sd_suspend(struct mmc_host *host)
40965 	return err;
40966 }
40967 
40968+static int _mmc_sd_shutdown(struct mmc_host *host)
40969+{
40970+	int err = 0;
40971+
40972+	if (WARN_ON(!host) || WARN_ON(!host->card))
40973+		return 0;
40974+
40975+	mmc_claim_host(host);
40976+
40977+	if (mmc_card_suspended(host->card))
40978+		goto out;
40979+
40980+	if (!mmc_host_is_spi(host))
40981+		err = mmc_deselect_cards(host);
40982+
40983+	if (!err) {
40984+		mmc_power_off(host);
40985+		mmc_card_set_suspended(host->card);
40986+	}
40987+
40988+	host->ios.signal_voltage = MMC_SIGNAL_VOLTAGE_330;
40989+	host->ios.vdd = fls(host->ocr_avail) - 1;
40990+	mmc_regulator_set_vqmmc(host, &host->ios);
40991+	pr_info("Set signal voltage to initial state\n");
40992+
40993+out:
40994+	mmc_release_host(host);
40995+	return err;
40996+}
40997+
40998+static int mmc_sd_shutdown(struct mmc_host *host)
40999+{
41000+	int err;
41001+
41002+	err = _mmc_sd_shutdown(host);
41003+	if (!err) {
41004+		pm_runtime_disable(&host->card->dev);
41005+		pm_runtime_set_suspended(&host->card->dev);
41006+	}
41007+
41008+	return err;
41009+}
41010+
41011 /*
41012  * Callback for suspend
41013  */
41014@@ -1322,7 +1369,7 @@ static const struct mmc_bus_ops mmc_sd_ops = {
41015 	.suspend = mmc_sd_suspend,
41016 	.resume = mmc_sd_resume,
41017 	.alive = mmc_sd_alive,
41018-	.shutdown = mmc_sd_suspend,
41019+	.shutdown = mmc_sd_shutdown,
41020 	.hw_reset = mmc_sd_hw_reset,
41021 };
41022 
41023@@ -1396,5 +1443,7 @@ int mmc_attach_sd(struct mmc_host *host)
41024 	pr_err("%s: error %d whilst initialising SD card\n",
41025 		mmc_hostname(host), err);
41026 
41027+	trace_android_vh_mmc_attach_sd(host, ocr, err);
41028+
41029 	return err;
41030 }
41031diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
41032index 99a4ce68d..108b0f2ae 100644
41033--- a/drivers/mmc/core/sdio.c
41034+++ b/drivers/mmc/core/sdio.c
41035@@ -1308,3 +1308,49 @@ int mmc_attach_sdio(struct mmc_host *host)
41036 	return err;
41037 }
41038 
41039+int sdio_reset_comm(struct mmc_card *card)
41040+{
41041+	struct mmc_host *host = card->host;
41042+	u32 ocr;
41043+	u32 rocr;
41044+	int err;
41045+
41046+#ifdef CONFIG_SDIO_KEEPALIVE
41047+	if (host->chip_alive)
41048+		host->chip_alive = 0;
41049+#endif
41050+
41051+	printk("%s():\n", __func__);
41052+	mmc_claim_host(host);
41053+
41054+	mmc_retune_disable(host);
41055+
41056+	mmc_power_cycle(host, host->card->ocr);
41057+	mmc_go_idle(host);
41058+
41059+	mmc_set_clock(host, host->f_min);
41060+
41061+	err = mmc_send_io_op_cond(host, 0, &ocr);
41062+	if (err)
41063+		goto err;
41064+
41065+	rocr = mmc_select_voltage(host, ocr);
41066+	if (!rocr) {
41067+		err = -EINVAL;
41068+		goto err;
41069+	}
41070+
41071+	err = mmc_sdio_init_card(host, rocr, card);
41072+	if (err)
41073+		goto err;
41074+
41075+	mmc_release_host(host);
41076+	return 0;
41077+err:
41078+	printk("%s: Error resetting SDIO communications (%d)\n",
41079+	       mmc_hostname(host), err);
41080+	mmc_release_host(host);
41081+	return err;
41082+	return 0;
41083+}
41084+EXPORT_SYMBOL(sdio_reset_comm);
41085diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
41086index 05e907451..298877a50 100644
41087--- a/drivers/mmc/core/slot-gpio.c
41088+++ b/drivers/mmc/core/slot-gpio.c
41089@@ -14,6 +14,8 @@
41090 #include <linux/module.h>
41091 #include <linux/slab.h>
41092 
41093+#include <trace/hooks/mmc_core.h>
41094+
41095 #include "slot-gpio.h"
41096 
41097 struct mmc_gpio {
41098@@ -30,6 +32,11 @@ static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id)
41099 	/* Schedule a card detection after a debounce timeout */
41100 	struct mmc_host *host = dev_id;
41101 	struct mmc_gpio *ctx = host->slot.handler_priv;
41102+	bool allow = true;
41103+
41104+	trace_android_vh_mmc_gpio_cd_irqt(host, &allow);
41105+	if (!allow)
41106+		return IRQ_HANDLED;
41107 
41108 	host->trigger_card_event = true;
41109 	mmc_detect_change(host, msecs_to_jiffies(ctx->cd_debounce_delay_ms));
41110diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
41111index 82e1fbd6b..c73238181 100644
41112--- a/drivers/mmc/host/Kconfig
41113+++ b/drivers/mmc/host/Kconfig
41114@@ -544,6 +544,7 @@ config MMC_SDHCI_MSM
41115 	depends on MMC_SDHCI_PLTFM
41116 	select MMC_SDHCI_IO_ACCESSORS
41117 	select MMC_CQHCI
41118+	select QCOM_SCM if MMC_CRYPTO && ARCH_QCOM
41119 	help
41120 	  This selects the Secure Digital Host Controller Interface (SDHCI)
41121 	  support present in Qualcomm SOCs. The controller supports
41122diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
41123index 451c25fc2..d4fec19a1 100644
41124--- a/drivers/mmc/host/Makefile
41125+++ b/drivers/mmc/host/Makefile
41126@@ -103,7 +103,7 @@ obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32)	+= sdhci-pic32.o
41127 obj-$(CONFIG_MMC_SDHCI_BRCMSTB)		+= sdhci-brcmstb.o
41128 obj-$(CONFIG_MMC_SDHCI_OMAP)		+= sdhci-omap.o
41129 obj-$(CONFIG_MMC_SDHCI_SPRD)		+= sdhci-sprd.o
41130-obj-$(CONFIG_MMC_CQHCI)			+= cqhci.o
41131+#obj-$(CONFIG_MMC_CQHCI)			+= cqhci.o
41132 obj-$(CONFIG_MMC_HSQ)			+= mmc_hsq.o
41133 
41134 ifeq ($(CONFIG_CB710_DEBUG),y)
41135diff --git a/drivers/mmc/host/cqhci.h b/drivers/mmc/host/cqhci.h
41136index 89bf6adbc..ba9387ed9 100644
41137--- a/drivers/mmc/host/cqhci.h
41138+++ b/drivers/mmc/host/cqhci.h
41139@@ -22,10 +22,13 @@
41140 
41141 /* capabilities */
41142 #define CQHCI_CAP			0x04
41143+#define CQHCI_CAP_CS			0x10000000 /* Crypto Support */
41144+
41145 /* configuration */
41146 #define CQHCI_CFG			0x08
41147 #define CQHCI_DCMD			0x00001000
41148 #define CQHCI_TASK_DESC_SZ		0x00000100
41149+#define CQHCI_CRYPTO_GENERAL_ENABLE	0x00000002
41150 #define CQHCI_ENABLE			0x00000001
41151 
41152 /* control */
41153@@ -39,8 +42,11 @@
41154 #define CQHCI_IS_TCC			BIT(1)
41155 #define CQHCI_IS_RED			BIT(2)
41156 #define CQHCI_IS_TCL			BIT(3)
41157+#define CQHCI_IS_GCE			BIT(4) /* General Crypto Error */
41158+#define CQHCI_IS_ICCE			BIT(5) /* Invalid Crypto Config Error */
41159 
41160-#define CQHCI_IS_MASK (CQHCI_IS_TCC | CQHCI_IS_RED)
41161+#define CQHCI_IS_MASK (CQHCI_IS_TCC | CQHCI_IS_RED | \
41162+		       CQHCI_IS_GCE | CQHCI_IS_ICCE)
41163 
41164 /* interrupt status enable */
41165 #define CQHCI_ISTE			0x14
41166@@ -78,6 +84,9 @@
41167 /* task clear */
41168 #define CQHCI_TCLR			0x38
41169 
41170+/* task descriptor processing error */
41171+#define CQHCI_TDPE			0x3c
41172+
41173 /* send status config 1 */
41174 #define CQHCI_SSC1			0x40
41175 #define CQHCI_SSC1_CBC_MASK		GENMASK(19, 16)
41176@@ -107,6 +116,10 @@
41177 /* command response argument */
41178 #define CQHCI_CRA			0x5C
41179 
41180+/* crypto capabilities */
41181+#define CQHCI_CCAP			0x100
41182+#define CQHCI_CRYPTOCAP			0x104
41183+
41184 #define CQHCI_INT_ALL			0xF
41185 #define CQHCI_IC_DEFAULT_ICCTH		31
41186 #define CQHCI_IC_DEFAULT_ICTOVAL	1
41187@@ -133,11 +146,70 @@
41188 #define CQHCI_CMD_TIMING(x)		(((x) & 1) << 22)
41189 #define CQHCI_RESP_TYPE(x)		(((x) & 0x3) << 23)
41190 
41191+/* crypto task descriptor fields (for bits 64-127 of task descriptor) */
41192+#define CQHCI_CRYPTO_ENABLE_BIT		(1ULL << 47)
41193+#define CQHCI_CRYPTO_KEYSLOT(x)		((u64)(x) << 32)
41194+
41195 /* transfer descriptor fields */
41196 #define CQHCI_DAT_LENGTH(x)		(((x) & 0xFFFF) << 16)
41197 #define CQHCI_DAT_ADDR_LO(x)		(((x) & 0xFFFFFFFF) << 32)
41198 #define CQHCI_DAT_ADDR_HI(x)		(((x) & 0xFFFFFFFF) << 0)
41199 
41200+/* CCAP - Crypto Capability 100h */
41201+union cqhci_crypto_capabilities {
41202+	__le32 reg_val;
41203+	struct {
41204+		u8 num_crypto_cap;
41205+		u8 config_count;
41206+		u8 reserved;
41207+		u8 config_array_ptr;
41208+	};
41209+};
41210+
41211+enum cqhci_crypto_key_size {
41212+	CQHCI_CRYPTO_KEY_SIZE_INVALID	= 0,
41213+	CQHCI_CRYPTO_KEY_SIZE_128	= 1,
41214+	CQHCI_CRYPTO_KEY_SIZE_192	= 2,
41215+	CQHCI_CRYPTO_KEY_SIZE_256	= 3,
41216+	CQHCI_CRYPTO_KEY_SIZE_512	= 4,
41217+};
41218+
41219+enum cqhci_crypto_alg {
41220+	CQHCI_CRYPTO_ALG_AES_XTS		= 0,
41221+	CQHCI_CRYPTO_ALG_BITLOCKER_AES_CBC	= 1,
41222+	CQHCI_CRYPTO_ALG_AES_ECB		= 2,
41223+	CQHCI_CRYPTO_ALG_ESSIV_AES_CBC		= 3,
41224+};
41225+
41226+/* x-CRYPTOCAP - Crypto Capability X */
41227+union cqhci_crypto_cap_entry {
41228+	__le32 reg_val;
41229+	struct {
41230+		u8 algorithm_id;
41231+		u8 sdus_mask; /* Supported data unit size mask */
41232+		u8 key_size;
41233+		u8 reserved;
41234+	};
41235+};
41236+
41237+#define CQHCI_CRYPTO_CONFIGURATION_ENABLE (1 << 7)
41238+#define CQHCI_CRYPTO_KEY_MAX_SIZE 64
41239+/* x-CRYPTOCFG - Crypto Configuration X */
41240+union cqhci_crypto_cfg_entry {
41241+	__le32 reg_val[32];
41242+	struct {
41243+		u8 crypto_key[CQHCI_CRYPTO_KEY_MAX_SIZE];
41244+		u8 data_unit_size;
41245+		u8 crypto_cap_idx;
41246+		u8 reserved_1;
41247+		u8 config_enable;
41248+		u8 reserved_multi_host;
41249+		u8 reserved_2;
41250+		u8 vsb[2];
41251+		u8 reserved_3[56];
41252+	};
41253+};
41254+
41255 struct cqhci_host_ops;
41256 struct mmc_host;
41257 struct mmc_request;
41258@@ -196,6 +268,12 @@ struct cqhci_host {
41259 	struct completion halt_comp;
41260 	wait_queue_head_t wait_queue;
41261 	struct cqhci_slot *slot;
41262+
41263+#ifdef CONFIG_MMC_CRYPTO
41264+	union cqhci_crypto_capabilities crypto_capabilities;
41265+	union cqhci_crypto_cap_entry *crypto_cap_array;
41266+	u32 crypto_cfg_register;
41267+#endif
41268 };
41269 
41270 struct cqhci_host_ops {
41271@@ -208,6 +286,10 @@ struct cqhci_host_ops {
41272 				 u64 *data);
41273 	void (*pre_enable)(struct mmc_host *mmc);
41274 	void (*post_disable)(struct mmc_host *mmc);
41275+#ifdef CONFIG_MMC_CRYPTO
41276+	int (*program_key)(struct cqhci_host *cq_host,
41277+			   const union cqhci_crypto_cfg_entry *cfg, int slot);
41278+#endif
41279 };
41280 
41281 static inline void cqhci_writel(struct cqhci_host *host, u32 val, int reg)
41282diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
41283index 753502ce3..4a788d4c4 100644
41284--- a/drivers/mmc/host/dw_mmc-rockchip.c
41285+++ b/drivers/mmc/host/dw_mmc-rockchip.c
41286@@ -22,6 +22,9 @@ struct dw_mci_rockchip_priv_data {
41287 	struct clk		*sample_clk;
41288 	int			default_sample_phase;
41289 	int			num_phases;
41290+	bool			use_v2_tuning;
41291+	int			last_degree;
41292+	u32			f_min;
41293 };
41294 
41295 static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
41296@@ -43,6 +46,11 @@ static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
41297 	 * Note: div can only be 0 or 1, but div must be set to 1 for eMMC
41298 	 * DDR52 8-bit mode.
41299 	 */
41300+	if (ios->clock < priv->f_min) {
41301+		ios->clock = priv->f_min;
41302+		host->slot->clock = ios->clock;
41303+	}
41304+
41305 	if (ios->bus_width == MMC_BUS_WIDTH_8 &&
41306 	    ios->timing == MMC_TIMING_MMC_DDR52)
41307 		cclkin = 2 * ios->clock * RK3288_CLKGEN_DIV;
41308@@ -61,7 +69,7 @@ static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
41309 	}
41310 
41311 	/* Make sure we use phases which we can enumerate with */
41312-	if (!IS_ERR(priv->sample_clk))
41313+	if (!IS_ERR(priv->sample_clk) && ios->timing <= MMC_TIMING_SD_HS)
41314 		clk_set_phase(priv->sample_clk, priv->default_sample_phase);
41315 
41316 	/*
41317@@ -132,6 +140,49 @@ static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
41318 #define TUNING_ITERATION_TO_PHASE(i, num_phases) \
41319 		(DIV_ROUND_UP((i) * 360, num_phases))
41320 
41321+static int dw_mci_v2_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
41322+{
41323+	struct dw_mci *host = slot->host;
41324+	struct dw_mci_rockchip_priv_data *priv = host->priv;
41325+	struct mmc_host *mmc = slot->mmc;
41326+	u32 degrees[4] = {0, 90, 180, 270}, degree;
41327+	int i;
41328+	static bool inherit = true;
41329+
41330+	if (inherit) {
41331+		inherit = false;
41332+		i = clk_get_phase(priv->sample_clk) / 90;
41333+		degree = degrees[i];
41334+		goto done;
41335+	}
41336+
41337+	/*
41338+	 * v2 only support 4 degrees in theory.
41339+	 * First we inherit sample phases from firmware, which should
41340+	 * be able work fine, at least in the first place.
41341+	 * If retune is needed, we search forward to pick the last
41342+	 * one phase from degree list and loop around until we get one.
41343+	 * It's impossible all 4 fixed phase won't be able to work.
41344+	 */
41345+	for (i = 0; i < ARRAY_SIZE(degrees); i++) {
41346+		degree = degrees[i] + priv->last_degree;
41347+		degree = degree % 360;
41348+		clk_set_phase(priv->sample_clk, degree);
41349+		if (!mmc_send_tuning(mmc, opcode, NULL))
41350+			break;
41351+	}
41352+
41353+	if (i == ARRAY_SIZE(degrees)) {
41354+		dev_warn(host->dev, "All phases bad!");
41355+		return -EIO;
41356+	}
41357+
41358+done:
41359+	dev_info(host->dev, "Successfully tuned phase to %d\n", degrees[i]);
41360+	priv->last_degree = degree;
41361+	return 0;
41362+}
41363+
41364 static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
41365 {
41366 	struct dw_mci *host = slot->host;
41367@@ -148,13 +199,20 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
41368 	unsigned int range_count = 0;
41369 	int longest_range_len = -1;
41370 	int longest_range = -1;
41371-	int middle_phase;
41372+	int middle_phase, real_middle_phase;
41373 
41374 	if (IS_ERR(priv->sample_clk)) {
41375 		dev_err(host->dev, "Tuning clock (sample_clk) not defined.\n");
41376 		return -EIO;
41377 	}
41378 
41379+	if (priv->use_v2_tuning) {
41380+		ret = dw_mci_v2_execute_tuning(slot, opcode);
41381+		if (!ret)
41382+			return 0;
41383+		/* Otherwise we continue using fine tuning */
41384+	}
41385+
41386 	ranges = kmalloc_array(priv->num_phases / 2 + 1,
41387 			       sizeof(*ranges), GFP_KERNEL);
41388 	if (!ranges)
41389@@ -162,6 +220,9 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
41390 
41391 	/* Try each phase and extract good ranges */
41392 	for (i = 0; i < priv->num_phases; ) {
41393+		/* Cannot guarantee any phases larger than 270 would work well */
41394+		if (TUNING_ITERATION_TO_PHASE(i, priv->num_phases) > 270)
41395+			break;
41396 		clk_set_phase(priv->sample_clk,
41397 			      TUNING_ITERATION_TO_PHASE(i, priv->num_phases));
41398 
41399@@ -246,12 +307,30 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
41400 
41401 	middle_phase = ranges[longest_range].start + longest_range_len / 2;
41402 	middle_phase %= priv->num_phases;
41403+	real_middle_phase = TUNING_ITERATION_TO_PHASE(middle_phase, priv->num_phases);
41404+
41405+	/*
41406+	 * Since we cut out 270 ~ 360, the original algorithm
41407+	 * still rolling ranges before and after 270 together
41408+	 * in some corner cases, we should adjust it to avoid
41409+	 * using any middle phase located between 270 and 360.
41410+	 * By calculatiion, it happends due to the bad phases
41411+	 * lay between 90 ~ 180. So others are all fine to chose.
41412+	 * Pick 270 is a better choice in those cases. In case of
41413+	 * bad phases exceed 180, the middle phase of rollback
41414+	 * would be bigger than 315, so we chose 360.
41415+	 */
41416+	if (real_middle_phase > 270) {
41417+		if (real_middle_phase < 315)
41418+			real_middle_phase = 270;
41419+		else
41420+			real_middle_phase = 360;
41421+	}
41422+
41423 	dev_info(host->dev, "Successfully tuned phase to %d\n",
41424-		 TUNING_ITERATION_TO_PHASE(middle_phase, priv->num_phases));
41425+		 real_middle_phase);
41426 
41427-	clk_set_phase(priv->sample_clk,
41428-		      TUNING_ITERATION_TO_PHASE(middle_phase,
41429-						priv->num_phases));
41430+	clk_set_phase(priv->sample_clk, real_middle_phase);
41431 
41432 free:
41433 	kfree(ranges);
41434@@ -267,6 +346,17 @@ static int dw_mci_rk3288_parse_dt(struct dw_mci *host)
41435 	if (!priv)
41436 		return -ENOMEM;
41437 
41438+	/*
41439+	 * RK356X SoCs only support 375KHz for ID mode, so any clk request
41440+	 * that less than 1.6MHz(2 * 400KHz * RK3288_CLKGEN_DIV) should be
41441+	 * wrapped  into 375KHz
41442+	 */
41443+	if (of_device_is_compatible(host->dev->of_node,
41444+				    "rockchip,rk3568-dw-mshc"))
41445+		priv->f_min = 375000;
41446+	else
41447+		priv->f_min = 100000;
41448+
41449 	if (of_property_read_u32(np, "rockchip,desired-num-phases",
41450 					&priv->num_phases))
41451 		priv->num_phases = 360;
41452@@ -275,6 +365,9 @@ static int dw_mci_rk3288_parse_dt(struct dw_mci *host)
41453 					&priv->default_sample_phase))
41454 		priv->default_sample_phase = 0;
41455 
41456+	if (of_property_read_bool(np, "rockchip,use-v2-tuning"))
41457+		priv->use_v2_tuning = true;
41458+
41459 	priv->drv_clk = devm_clk_get(host->dev, "ciu-drive");
41460 	if (IS_ERR(priv->drv_clk))
41461 		dev_dbg(host->dev, "ciu-drive not available\n");
41462@@ -297,6 +390,7 @@ static int dw_mci_rockchip_init(struct dw_mci *host)
41463 				    "rockchip,rk3288-dw-mshc"))
41464 		host->bus_hz /= RK3288_CLKGEN_DIV;
41465 
41466+	host->need_xfer_timer = true;
41467 	return 0;
41468 }
41469 
41470@@ -335,28 +429,43 @@ static int dw_mci_rockchip_probe(struct platform_device *pdev)
41471 	const struct dw_mci_drv_data *drv_data;
41472 	const struct of_device_id *match;
41473 	int ret;
41474+	bool use_rpm = true;
41475 
41476 	if (!pdev->dev.of_node)
41477 		return -ENODEV;
41478 
41479+	if (!device_property_read_bool(&pdev->dev, "non-removable") &&
41480+	    !device_property_read_bool(&pdev->dev, "cd-gpios"))
41481+		use_rpm = false;
41482+
41483 	match = of_match_node(dw_mci_rockchip_match, pdev->dev.of_node);
41484 	drv_data = match->data;
41485 
41486+	/*
41487+	 * increase rpm usage count in order to make
41488+	 * pm_runtime_force_resume calls rpm resume callback
41489+	 */
41490 	pm_runtime_get_noresume(&pdev->dev);
41491 	pm_runtime_set_active(&pdev->dev);
41492-	pm_runtime_enable(&pdev->dev);
41493-	pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
41494-	pm_runtime_use_autosuspend(&pdev->dev);
41495+
41496+	if (use_rpm) {
41497+		pm_runtime_enable(&pdev->dev);
41498+		pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
41499+		pm_runtime_use_autosuspend(&pdev->dev);
41500+	}
41501 
41502 	ret = dw_mci_pltfm_register(pdev, drv_data);
41503 	if (ret) {
41504-		pm_runtime_disable(&pdev->dev);
41505-		pm_runtime_set_suspended(&pdev->dev);
41506+		if (use_rpm) {
41507+			pm_runtime_disable(&pdev->dev);
41508+			pm_runtime_set_suspended(&pdev->dev);
41509+		}
41510 		pm_runtime_put_noidle(&pdev->dev);
41511 		return ret;
41512 	}
41513 
41514-	pm_runtime_put_autosuspend(&pdev->dev);
41515+	if (use_rpm)
41516+		pm_runtime_put_autosuspend(&pdev->dev);
41517 
41518 	return 0;
41519 }
41520diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
41521index a6170f80b..338fac33f 100644
41522--- a/drivers/mmc/host/dw_mmc.c
41523+++ b/drivers/mmc/host/dw_mmc.c
41524@@ -18,6 +18,7 @@
41525 #include <linux/iopoll.h>
41526 #include <linux/ioport.h>
41527 #include <linux/module.h>
41528+#include <linux/of_address.h>
41529 #include <linux/platform_device.h>
41530 #include <linux/pm_runtime.h>
41531 #include <linux/seq_file.h>
41532@@ -35,6 +36,7 @@
41533 #include <linux/of.h>
41534 #include <linux/of_gpio.h>
41535 #include <linux/mmc/slot-gpio.h>
41536+#include <linux/soc/rockchip/rockchip_decompress.h>
41537 
41538 #include "dw_mmc.h"
41539 
41540@@ -208,6 +210,7 @@ static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
41541 static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
41542 {
41543 	u32 status;
41544+	u32 delay = 10;
41545 
41546 	/*
41547 	 * Databook says that before issuing a new data transfer command
41548@@ -217,12 +220,17 @@ static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
41549 	 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
41550 	 * expected.
41551 	 */
41552+#ifdef CONFIG_ROCKCHIP_THUNDER_BOOT
41553+	if (host->slot->mmc->caps2 & MMC_CAP2_NO_SD &&
41554+	    host->slot->mmc->caps2 & MMC_CAP2_NO_SDIO)
41555+		delay = 0;
41556+#endif
41557 	if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
41558 	    !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
41559 		if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
41560 					      status,
41561 					      !(status & SDMMC_STATUS_BUSY),
41562-					      10, 500 * USEC_PER_MSEC))
41563+					      delay, 500 * USEC_PER_MSEC))
41564 			dev_err(host->dev, "Busy; trying anyway\n");
41565 	}
41566 }
41567@@ -491,6 +499,10 @@ static void dw_mci_dmac_complete_dma(void *arg)
41568 		set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
41569 		tasklet_schedule(&host->tasklet);
41570 	}
41571+
41572+	if (host->need_xfer_timer &&
41573+	    host->dir_status == DW_MCI_RECV_STATUS)
41574+		del_timer(&host->xfer_timer);
41575 }
41576 
41577 static int dw_mci_idmac_init(struct dw_mci *host)
41578@@ -1923,6 +1935,30 @@ static void dw_mci_set_drto(struct dw_mci *host)
41579 	spin_unlock_irqrestore(&host->irq_lock, irqflags);
41580 }
41581 
41582+static void dw_mci_set_xfer_timeout(struct dw_mci *host)
41583+{
41584+	unsigned int xfer_clks;
41585+	unsigned int xfer_div;
41586+	unsigned int xfer_ms;
41587+	unsigned long irqflags;
41588+
41589+	xfer_clks = mci_readl(host, TMOUT) >> 8;
41590+	xfer_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
41591+	if (xfer_div == 0)
41592+		xfer_div = 1;
41593+	xfer_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * xfer_clks * xfer_div,
41594+				   host->bus_hz);
41595+
41596+	/* add a bit spare time */
41597+	xfer_ms += 100;
41598+
41599+	spin_lock_irqsave(&host->irq_lock, irqflags);
41600+	if (!test_bit(EVENT_XFER_COMPLETE, &host->pending_events))
41601+		mod_timer(&host->xfer_timer,
41602+			  jiffies + msecs_to_jiffies(xfer_ms));
41603+	spin_unlock_irqrestore(&host->irq_lock, irqflags);
41604+}
41605+
41606 static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
41607 {
41608 	if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
41609@@ -2061,6 +2097,9 @@ static void dw_mci_tasklet_func(unsigned long priv)
41610 				 */
41611 				if (host->dir_status == DW_MCI_RECV_STATUS)
41612 					dw_mci_set_drto(host);
41613+				if (host->need_xfer_timer &&
41614+				    host->dir_status == DW_MCI_RECV_STATUS)
41615+					dw_mci_set_xfer_timeout(host);
41616 				break;
41617 			}
41618 
41619@@ -2535,6 +2574,8 @@ static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
41620 	host->sg = NULL;
41621 	smp_wmb(); /* drain writebuffer */
41622 	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
41623+	if (host->need_xfer_timer)
41624+		del_timer(&host->xfer_timer);
41625 }
41626 
41627 static void dw_mci_write_data_pio(struct dw_mci *host)
41628@@ -2647,6 +2688,9 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
41629 			del_timer(&host->cto_timer);
41630 			mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
41631 			host->cmd_status = pending;
41632+			if ((host->need_xfer_timer) &&
41633+			     host->dir_status == DW_MCI_RECV_STATUS)
41634+				del_timer(&host->xfer_timer);
41635 			smp_wmb(); /* drain writebuffer */
41636 			set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
41637 
41638@@ -3032,6 +3076,36 @@ static void dw_mci_cto_timer(struct timer_list *t)
41639 	spin_unlock_irqrestore(&host->irq_lock, irqflags);
41640 }
41641 
41642+static void dw_mci_xfer_timer(struct timer_list *t)
41643+{
41644+	struct dw_mci *host = from_timer(host, t, xfer_timer);
41645+	unsigned long irqflags;
41646+
41647+	spin_lock_irqsave(&host->irq_lock, irqflags);
41648+
41649+	if (test_bit(EVENT_XFER_COMPLETE, &host->pending_events)) {
41650+		/* Presumably interrupt handler couldn't delete the timer */
41651+		dev_warn(host->dev, "xfer when already completed\n");
41652+		goto exit;
41653+	}
41654+
41655+	switch (host->state) {
41656+	case STATE_SENDING_DATA:
41657+		host->data_status = SDMMC_INT_DRTO;
41658+		set_bit(EVENT_DATA_ERROR, &host->pending_events);
41659+		set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
41660+		tasklet_schedule(&host->tasklet);
41661+		break;
41662+	default:
41663+		dev_warn(host->dev, "Unexpected xfer timeout, state %d\n",
41664+			 host->state);
41665+		break;
41666+	}
41667+
41668+exit:
41669+	spin_unlock_irqrestore(&host->irq_lock, irqflags);
41670+}
41671+
41672 static void dw_mci_dto_timer(struct timer_list *t)
41673 {
41674 	struct dw_mci *host = from_timer(host, t, dto_timer);
41675@@ -3161,6 +3235,60 @@ int dw_mci_probe(struct dw_mci *host)
41676 	int width, i, ret = 0;
41677 	u32 fifo_size;
41678 
41679+#if defined(CONFIG_ROCKCHIP_THUNDER_BOOT) && defined(CONFIG_ROCKCHIP_HW_DECOMPRESS)
41680+	struct resource idmac, ramdisk_src, ramdisk_dst;
41681+	struct device_node *dma, *rds, *rdd;
41682+	struct device *dev = host->dev;
41683+	u32 intr;
41684+
41685+	if (host->slot->mmc->caps2 & MMC_CAP2_NO_SD &&
41686+	    host->slot->mmc->caps2 & MMC_CAP2_NO_SDIO) {
41687+		if (readl_poll_timeout(host->regs + SDMMC_STATUS,
41688+				fifo_size,
41689+				!(fifo_size & (BIT(10) | GENMASK(7, 4))),
41690+				0, 500 * USEC_PER_MSEC))
41691+			dev_err(dev, "Controller is occupied!\n");
41692+
41693+		if (readl_poll_timeout(host->regs + SDMMC_IDSTS,
41694+				fifo_size, !(fifo_size & GENMASK(16, 13)),
41695+				0, 500 * USEC_PER_MSEC))
41696+			dev_err(dev, "DMA is still running!\n");
41697+
41698+		intr = mci_readl(host, RINTSTS);
41699+		if (intr & DW_MCI_CMD_ERROR_FLAGS || intr & DW_MCI_DATA_ERROR_FLAGS) {
41700+			WARN_ON(1);
41701+			return -EINVAL;
41702+		}
41703+
41704+		/* Release idmac descriptor */
41705+		dma = of_parse_phandle(dev->of_node, "memory-region-idamc", 0);
41706+		if (dma) {
41707+			ret = of_address_to_resource(dma, 0, &idmac);
41708+			if (ret >= 0)
41709+				free_reserved_area(phys_to_virt(idmac.start),
41710+					phys_to_virt(idmac.start) + resource_size(&idmac),
41711+					-1, NULL);
41712+		}
41713+
41714+		/* Parse ramdisk addr and help start decompressing */
41715+		rds = of_parse_phandle(dev->of_node, "memory-region-src", 0);
41716+		rdd = of_parse_phandle(dev->of_node, "memory-region-dst", 0);
41717+		if (rds && rdd) {
41718+			if (of_address_to_resource(rds, 0, &ramdisk_src) >= 0 &&
41719+				of_address_to_resource(rdd, 0, &ramdisk_dst) >= 0)
41720+				/*
41721+				 * Decompress HW driver will free reserved area of
41722+				 * memory-region-src.
41723+				 */
41724+				ret = rk_decom_start(GZIP_MOD, ramdisk_src.start,
41725+						     ramdisk_dst.start,
41726+						     resource_size(&ramdisk_dst));
41727+			if (ret < 0)
41728+				dev_err(dev, "fail to start decom\n");
41729+		}
41730+	}
41731+#endif
41732+
41733 	if (!host->pdata) {
41734 		host->pdata = dw_mci_parse_dt(host);
41735 		if (IS_ERR(host->pdata))
41736@@ -3225,6 +3353,8 @@ int dw_mci_probe(struct dw_mci *host)
41737 	timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0);
41738 	timer_setup(&host->cto_timer, dw_mci_cto_timer, 0);
41739 	timer_setup(&host->dto_timer, dw_mci_dto_timer, 0);
41740+	if (host->need_xfer_timer)
41741+		timer_setup(&host->xfer_timer, dw_mci_xfer_timer, 0);
41742 
41743 	spin_lock_init(&host->lock);
41744 	spin_lock_init(&host->irq_lock);
41745diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
41746index da5923a92..0b53484e9 100644
41747--- a/drivers/mmc/host/dw_mmc.h
41748+++ b/drivers/mmc/host/dw_mmc.h
41749@@ -230,6 +230,8 @@ struct dw_mci {
41750 	struct timer_list       cmd11_timer;
41751 	struct timer_list       cto_timer;
41752 	struct timer_list       dto_timer;
41753+	bool			need_xfer_timer;
41754+	struct timer_list       xfer_timer;
41755 };
41756 
41757 /* DMA ops for Internal/External DMAC interface */
41758diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
41759index 59d8d96ce..e38844c2d 100644
41760--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
41761+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
41762@@ -9,9 +9,13 @@
41763 
41764 #include <linux/clk.h>
41765 #include <linux/dma-mapping.h>
41766+#include <linux/iopoll.h>
41767 #include <linux/kernel.h>
41768 #include <linux/module.h>
41769 #include <linux/of.h>
41770+#include <linux/of_device.h>
41771+#include <linux/pm_runtime.h>
41772+#include <linux/reset.h>
41773 #include <linux/sizes.h>
41774 
41775 #include "sdhci-pltfm.h"
41776@@ -21,11 +25,74 @@
41777 /* DWCMSHC specific Mode Select value */
41778 #define DWCMSHC_CTRL_HS400		0x7
41779 
41780+#define DWCMSHC_VER_ID			0x500
41781+#define DWCMSHC_VER_TYPE		0x504
41782+#define DWCMSHC_HOST_CTRL3		0x508
41783+#define DWCMSHC_EMMC_CONTROL		0x52c
41784+#define DWCMSHC_EMMC_ATCTRL		0x540
41785+
41786+/* Rockchip specific Registers */
41787+#define DWCMSHC_EMMC_DLL_CTRL		0x800
41788+#define DWCMSHC_EMMC_DLL_RXCLK		0x804
41789+#define DWCMSHC_EMMC_DLL_TXCLK		0x808
41790+#define DWCMSHC_EMMC_DLL_STRBIN		0x80c
41791+#define DECMSHC_EMMC_DLL_CMDOUT		0x810
41792+#define DWCMSHC_EMMC_DLL_STATUS0	0x840
41793+
41794+#define DWCMSHC_EMMC_DLL_START		BIT(0)
41795+#define DWCMSHC_EMMC_DLL_LOCKED		BIT(8)
41796+#define DWCMSHC_EMMC_DLL_TIMEOUT	BIT(9)
41797+#define DWCMSHC_EMMC_DLL_START_POINT	16
41798+#define DWCMSHC_EMMC_DLL_INC		8
41799+#define DWCMSHC_EMMC_DLL_DLYENA		BIT(27)
41800+
41801+#define DLL_TXCLK_TAPNUM_DEFAULT	0x10
41802+#define DLL_TXCLK_TAPNUM_90_DEGREES	0xA
41803+#define DLL_TXCLK_TAPNUM_FROM_SW	BIT(24)
41804+#define DLL_TXCLK_NO_INVERTER		BIT(29)
41805+
41806+#define DLL_STRBIN_TAPNUM_DEFAULT	0x8
41807+#define DLL_STRBIN_TAPNUM_FROM_SW	BIT(24)
41808+#define DLL_STRBIN_DELAY_NUM_SEL	BIT(26)
41809+#define DLL_STRBIN_DELAY_NUM_OFFSET	16
41810+#define DLL_STRBIN_DELAY_NUM_DEFAULT	0x16
41811+
41812+#define DLL_RXCLK_NO_INVERTER		BIT(29)
41813+
41814+#define DWCMSHC_CARD_IS_EMMC		BIT(0)
41815+#define DWCMSHC_ENHANCED_STROBE		BIT(8)
41816+
41817+#define DLL_CMDOUT_TAPNUM_90_DEGREES	0x8
41818+#define DLL_CMDOUT_TAPNUM_FROM_SW	BIT(24)
41819+#define DLL_CMDOUT_SRC_CLK_NEG		BIT(28)
41820+#define DLL_CMDOUT_EN_SRC_CLK_NEG	BIT(29)
41821+
41822+#define DLL_LOCK_WO_TMOUT(x) \
41823+	((((x) & DWCMSHC_EMMC_DLL_LOCKED) == DWCMSHC_EMMC_DLL_LOCKED) && \
41824+	(((x) & DWCMSHC_EMMC_DLL_TIMEOUT) == 0))
41825+#define ROCKCHIP_MAX_CLKS		3
41826+
41827 #define BOUNDARY_OK(addr, len) \
41828 	((addr | (SZ_128M - 1)) == ((addr + len - 1) | (SZ_128M - 1)))
41829 
41830 struct dwcmshc_priv {
41831 	struct clk	*bus_clk;
41832+	u32 cclk_rate;
41833+
41834+	/* Rockchip specified optional clocks */
41835+	struct clk_bulk_data rockchip_clks[ROCKCHIP_MAX_CLKS];
41836+	struct reset_control *reset;
41837+	int txclk_tapnum;
41838+	unsigned int actual_clk;
41839+	u32 flags;
41840+};
41841+
41842+struct dwcmshc_driver_data {
41843+	const struct sdhci_pltfm_data *pdata;
41844+	u32 flags;
41845+#define RK_PLATFROM		BIT(0)
41846+#define RK_DLL_CMD_OUT		BIT(1)
41847+#define RK_RXCLK_NO_INVERTER	BIT(2)
41848 };
41849 
41850 /*
41851@@ -77,7 +144,7 @@ static void dwcmshc_request(struct mmc_host *mmc, struct mmc_request *mrq)
41852 static void dwcmshc_set_uhs_signaling(struct sdhci_host *host,
41853 				      unsigned int timing)
41854 {
41855-	u16 ctrl_2;
41856+	u16 ctrl_2, ctrl;
41857 
41858 	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
41859 	/* Select Bus Speed Mode for host */
41860@@ -95,11 +162,157 @@ static void dwcmshc_set_uhs_signaling(struct sdhci_host *host,
41861 	else if ((timing == MMC_TIMING_UHS_DDR50) ||
41862 		 (timing == MMC_TIMING_MMC_DDR52))
41863 		ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
41864-	else if (timing == MMC_TIMING_MMC_HS400)
41865+	else if (timing == MMC_TIMING_MMC_HS400) {
41866+		/* set CARD_IS_EMMC bit to enable Data Strobe for HS400 */
41867+		ctrl = sdhci_readw(host, DWCMSHC_EMMC_CONTROL);
41868+		ctrl |= DWCMSHC_CARD_IS_EMMC;
41869+		sdhci_writew(host, ctrl, DWCMSHC_EMMC_CONTROL);
41870+
41871 		ctrl_2 |= DWCMSHC_CTRL_HS400;
41872+	}
41873+
41874 	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
41875 }
41876 
41877+static void dwcmshc_hs400_enhanced_strobe(struct mmc_host *mmc,
41878+					  struct mmc_ios *ios)
41879+{
41880+	u32 vendor;
41881+	struct sdhci_host *host = mmc_priv(mmc);
41882+
41883+	vendor = sdhci_readl(host, DWCMSHC_EMMC_CONTROL);
41884+	if (ios->enhanced_strobe)
41885+		vendor |= DWCMSHC_ENHANCED_STROBE;
41886+	else
41887+		vendor &= ~DWCMSHC_ENHANCED_STROBE;
41888+
41889+	sdhci_writel(host, vendor, DWCMSHC_EMMC_CONTROL);
41890+}
41891+
41892+static void dwcmshc_rk_set_clock(struct sdhci_host *host, unsigned int clock)
41893+{
41894+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
41895+	struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
41896+	u32 txclk_tapnum, extra;
41897+	int err;
41898+
41899+	host->mmc->actual_clock = 0;
41900+
41901+	if (clock == 0) {
41902+		/* Disable interface clock at initial state. */
41903+		sdhci_set_clock(host, clock);
41904+		return;
41905+	}
41906+
41907+	/* Rockchip platform only support 375KHz for identify mode */
41908+	if (clock <= 400000)
41909+		clock = 375000;
41910+
41911+	err = clk_set_rate(pltfm_host->clk, clock);
41912+	if (err)
41913+		dev_err(mmc_dev(host->mmc), "fail to set clock %d", clock);
41914+
41915+	sdhci_set_clock(host, clock);
41916+
41917+	/* Disable cmd conflict check */
41918+	extra = sdhci_readl(host, DWCMSHC_HOST_CTRL3);
41919+	extra &= ~BIT(0);
41920+	sdhci_writel(host, extra, DWCMSHC_HOST_CTRL3);
41921+
41922+	if (clock <= 52000000) {
41923+		/* Disable DLL and reset both of sample and drive clock */
41924+		sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_CTRL);
41925+		sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_RXCLK);
41926+		sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_TXCLK);
41927+		sdhci_writel(host, 0, DECMSHC_EMMC_DLL_CMDOUT);
41928+		/*
41929+		 * Before switching to hs400es mode, the driver will enable
41930+		 * enhanced strobe first. PHY needs to configure the parameters
41931+		 * of enhanced strobe first.
41932+		 */
41933+		extra = DWCMSHC_EMMC_DLL_DLYENA |
41934+			DLL_STRBIN_DELAY_NUM_SEL |
41935+			DLL_STRBIN_DELAY_NUM_DEFAULT << DLL_STRBIN_DELAY_NUM_OFFSET;
41936+		sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_STRBIN);
41937+		return;
41938+	}
41939+
41940+	/* Reset DLL */
41941+	sdhci_writel(host, BIT(1), DWCMSHC_EMMC_DLL_CTRL);
41942+	udelay(1);
41943+	sdhci_writel(host, 0x0, DWCMSHC_EMMC_DLL_CTRL);
41944+
41945+	/*
41946+	 * We shouldn't set DLL_RXCLK_NO_INVERTER for identify mode but
41947+	 * we must set it in higher speed mode.
41948+	 */
41949+	extra = DWCMSHC_EMMC_DLL_DLYENA;
41950+	if (priv->flags & RK_RXCLK_NO_INVERTER)
41951+		extra |= DLL_RXCLK_NO_INVERTER;
41952+	sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_RXCLK);
41953+
41954+	/* Init DLL settings */
41955+	extra = 0x5 << DWCMSHC_EMMC_DLL_START_POINT |
41956+		0x2 << DWCMSHC_EMMC_DLL_INC |
41957+		DWCMSHC_EMMC_DLL_START;
41958+	sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_CTRL);
41959+	err = readl_poll_timeout(host->ioaddr + DWCMSHC_EMMC_DLL_STATUS0,
41960+				 extra, DLL_LOCK_WO_TMOUT(extra), 1,
41961+				 500 * USEC_PER_MSEC);
41962+	if (err) {
41963+		dev_err(mmc_dev(host->mmc), "DLL lock timeout!\n");
41964+		return;
41965+	}
41966+
41967+	extra = 0x1 << 16 | /* tune clock stop en */
41968+		0x2 << 17 | /* pre-change delay */
41969+		0x3 << 19;  /* post-change delay */
41970+	sdhci_writel(host, extra, DWCMSHC_EMMC_ATCTRL);
41971+
41972+	txclk_tapnum = priv->txclk_tapnum;
41973+
41974+	if ((priv->flags & RK_DLL_CMD_OUT) &&
41975+	    host->mmc->ios.timing == MMC_TIMING_MMC_HS400) {
41976+		txclk_tapnum = DLL_TXCLK_TAPNUM_90_DEGREES;
41977+
41978+		extra = DLL_CMDOUT_SRC_CLK_NEG |
41979+			DLL_CMDOUT_EN_SRC_CLK_NEG |
41980+			DWCMSHC_EMMC_DLL_DLYENA |
41981+			DLL_CMDOUT_TAPNUM_90_DEGREES |
41982+			DLL_CMDOUT_TAPNUM_FROM_SW;
41983+		sdhci_writel(host, extra, DECMSHC_EMMC_DLL_CMDOUT);
41984+	}
41985+
41986+	extra = DWCMSHC_EMMC_DLL_DLYENA |
41987+		DLL_TXCLK_TAPNUM_FROM_SW |
41988+		DLL_RXCLK_NO_INVERTER |
41989+		txclk_tapnum;
41990+	sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_TXCLK);
41991+
41992+	extra = DWCMSHC_EMMC_DLL_DLYENA |
41993+		DLL_STRBIN_TAPNUM_DEFAULT |
41994+		DLL_STRBIN_TAPNUM_FROM_SW;
41995+	sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_STRBIN);
41996+}
41997+
41998+static void rockchip_sdhci_reset(struct sdhci_host *host, u8 mask)
41999+{
42000+	struct sdhci_pltfm_host *pltfm_host;
42001+	struct dwcmshc_priv *priv;
42002+
42003+	if (mask & SDHCI_RESET_ALL) {
42004+		pltfm_host = sdhci_priv(host);
42005+		priv = sdhci_pltfm_priv(pltfm_host);
42006+		if (!IS_ERR_OR_NULL(priv->reset)) {
42007+			reset_control_assert(priv->reset);
42008+			udelay(1);
42009+			reset_control_deassert(priv->reset);
42010+		}
42011+	}
42012+
42013+	sdhci_reset(host, mask);
42014+}
42015+
42016 static const struct sdhci_ops sdhci_dwcmshc_ops = {
42017 	.set_clock		= sdhci_set_clock,
42018 	.set_bus_width		= sdhci_set_bus_width,
42019@@ -109,21 +322,118 @@ static const struct sdhci_ops sdhci_dwcmshc_ops = {
42020 	.adma_write_desc	= dwcmshc_adma_write_desc,
42021 };
42022 
42023+static const struct sdhci_ops sdhci_dwcmshc_rk_ops = {
42024+	.set_clock		= dwcmshc_rk_set_clock,
42025+	.set_bus_width		= sdhci_set_bus_width,
42026+	.set_uhs_signaling	= dwcmshc_set_uhs_signaling,
42027+	.get_max_clock		= sdhci_pltfm_clk_get_max_clock,
42028+	.reset			= rockchip_sdhci_reset,
42029+	.adma_write_desc	= dwcmshc_adma_write_desc,
42030+};
42031+
42032 static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = {
42033 	.ops = &sdhci_dwcmshc_ops,
42034 	.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
42035 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
42036 };
42037 
42038+static const struct sdhci_pltfm_data sdhci_dwcmshc_rk_pdata = {
42039+	.ops = &sdhci_dwcmshc_rk_ops,
42040+	.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
42041+		  SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
42042+	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
42043+		   SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
42044+};
42045+
42046+static const struct dwcmshc_driver_data dwcmshc_drvdata = {
42047+	.pdata = &sdhci_dwcmshc_pdata,
42048+	.flags = 0,
42049+};
42050+
42051+static const struct dwcmshc_driver_data rk3568_drvdata = {
42052+	.pdata = &sdhci_dwcmshc_rk_pdata,
42053+	.flags = RK_PLATFROM | RK_RXCLK_NO_INVERTER,
42054+};
42055+
42056+static const struct dwcmshc_driver_data rk3588_drvdata = {
42057+	.pdata = &sdhci_dwcmshc_rk_pdata,
42058+	.flags = RK_PLATFROM | RK_DLL_CMD_OUT,
42059+};
42060+
42061+static int rockchip_pltf_init(struct sdhci_host *host, struct dwcmshc_priv *priv)
42062+{
42063+	int err;
42064+
42065+	priv->rockchip_clks[0].id = "axi";
42066+	priv->rockchip_clks[1].id = "block";
42067+	priv->rockchip_clks[2].id = "timer";
42068+	err = devm_clk_bulk_get_optional(mmc_dev(host->mmc), ROCKCHIP_MAX_CLKS,
42069+					 priv->rockchip_clks);
42070+	if (err) {
42071+		dev_err(mmc_dev(host->mmc), "failed to get clocks %d\n", err);
42072+		return err;
42073+	}
42074+
42075+	err = clk_bulk_prepare_enable(ROCKCHIP_MAX_CLKS, priv->rockchip_clks);
42076+	if (err) {
42077+		dev_err(mmc_dev(host->mmc), "failed to enable clocks %d\n", err);
42078+		return err;
42079+	}
42080+
42081+	if (of_property_read_u32(mmc_dev(host->mmc)->of_node, "rockchip,txclk-tapnum",
42082+				 &priv->txclk_tapnum))
42083+		priv->txclk_tapnum = DLL_TXCLK_TAPNUM_DEFAULT;
42084+
42085+	/* Disable cmd conflict check */
42086+	sdhci_writel(host, 0x0, DWCMSHC_HOST_CTRL3);
42087+	/* Reset previous settings */
42088+	sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_TXCLK);
42089+	sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_STRBIN);
42090+
42091+	/*
42092+	 * Don't support highspeed bus mode with low clk speed as we
42093+	 * cannot use DLL for this condition.
42094+	 */
42095+	if (host->mmc->f_max <= 52000000) {
42096+		host->mmc->caps2 &= ~(MMC_CAP2_HS200 | MMC_CAP2_HS400);
42097+		host->mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR);
42098+	}
42099+
42100+	return 0;
42101+}
42102+
42103+static const struct of_device_id sdhci_dwcmshc_dt_ids[] = {
42104+	{
42105+		.compatible = "snps,dwcmshc-sdhci",
42106+		.data = &dwcmshc_drvdata,
42107+	},
42108+	{
42109+		.compatible = "rockchip,dwcmshc-sdhci",
42110+		.data = &rk3568_drvdata,
42111+	},
42112+	{
42113+		.compatible = "rockchip,rk3588-dwcmshc",
42114+		.data = &rk3588_drvdata,
42115+	},
42116+	{},
42117+};
42118+
42119 static int dwcmshc_probe(struct platform_device *pdev)
42120 {
42121 	struct sdhci_pltfm_host *pltfm_host;
42122 	struct sdhci_host *host;
42123 	struct dwcmshc_priv *priv;
42124+	const struct dwcmshc_driver_data *drv_data;
42125 	int err;
42126 	u32 extra;
42127 
42128-	host = sdhci_pltfm_init(pdev, &sdhci_dwcmshc_pdata,
42129+	drv_data = of_device_get_match_data(&pdev->dev);
42130+	if (!drv_data) {
42131+		dev_err(&pdev->dev, "Error: No device match data found\n");
42132+		return -ENODEV;
42133+	}
42134+
42135+	host = sdhci_pltfm_init(pdev, drv_data->pdata,
42136 				sizeof(struct dwcmshc_priv));
42137 	if (IS_ERR(host))
42138 		return PTR_ERR(host);
42139@@ -139,6 +449,8 @@ static int dwcmshc_probe(struct platform_device *pdev)
42140 	pltfm_host = sdhci_priv(host);
42141 	priv = sdhci_pltfm_priv(pltfm_host);
42142 
42143+	priv->reset = devm_reset_control_array_get_exclusive(&pdev->dev);
42144+
42145 	pltfm_host->clk = devm_clk_get(&pdev->dev, "core");
42146 	if (IS_ERR(pltfm_host->clk)) {
42147 		err = PTR_ERR(pltfm_host->clk);
42148@@ -160,16 +472,33 @@ static int dwcmshc_probe(struct platform_device *pdev)
42149 	sdhci_get_of_property(pdev);
42150 
42151 	host->mmc_host_ops.request = dwcmshc_request;
42152+	host->mmc_host_ops.hs400_enhanced_strobe =
42153+		dwcmshc_hs400_enhanced_strobe;
42154 
42155 	err = sdhci_add_host(host);
42156 	if (err)
42157 		goto err_clk;
42158 
42159+	priv->flags = drv_data->flags;
42160+	if (drv_data->flags & RK_PLATFROM) {
42161+		err = rockchip_pltf_init(host, priv);
42162+		if (err)
42163+			goto err_clk;
42164+	}
42165+
42166+	pm_runtime_get_noresume(&pdev->dev);
42167+	pm_runtime_set_active(&pdev->dev);
42168+	pm_runtime_enable(&pdev->dev);
42169+	pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
42170+	pm_runtime_use_autosuspend(&pdev->dev);
42171+	pm_runtime_put_autosuspend(&pdev->dev);
42172+
42173 	return 0;
42174 
42175 err_clk:
42176 	clk_disable_unprepare(pltfm_host->clk);
42177 	clk_disable_unprepare(priv->bus_clk);
42178+	clk_bulk_disable_unprepare(ROCKCHIP_MAX_CLKS, priv->rockchip_clks);
42179 free_pltfm:
42180 	sdhci_pltfm_free(pdev);
42181 	return err;
42182@@ -185,6 +514,7 @@ static int dwcmshc_remove(struct platform_device *pdev)
42183 
42184 	clk_disable_unprepare(pltfm_host->clk);
42185 	clk_disable_unprepare(priv->bus_clk);
42186+	clk_bulk_disable_unprepare(ROCKCHIP_MAX_CLKS, priv->rockchip_clks);
42187 
42188 	sdhci_pltfm_free(pdev);
42189 
42190@@ -207,6 +537,7 @@ static int dwcmshc_suspend(struct device *dev)
42191 	if (!IS_ERR(priv->bus_clk))
42192 		clk_disable_unprepare(priv->bus_clk);
42193 
42194+	clk_bulk_disable_unprepare(ROCKCHIP_MAX_CLKS, priv->rockchip_clks);
42195 	return ret;
42196 }
42197 
42198@@ -227,15 +558,51 @@ static int dwcmshc_resume(struct device *dev)
42199 			return ret;
42200 	}
42201 
42202+	ret = clk_bulk_prepare_enable(ROCKCHIP_MAX_CLKS, priv->rockchip_clks);
42203+	if (ret)
42204+		return ret;
42205+
42206 	return sdhci_resume_host(host);
42207 }
42208-#endif
42209 
42210-static SIMPLE_DEV_PM_OPS(dwcmshc_pmops, dwcmshc_suspend, dwcmshc_resume);
42211+static int dwcmshc_runtime_suspend(struct device *dev)
42212+{
42213+	struct sdhci_host *host = dev_get_drvdata(dev);
42214+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
42215+	struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
42216 
42217-static const struct of_device_id sdhci_dwcmshc_dt_ids[] = {
42218-	{ .compatible = "snps,dwcmshc-sdhci" },
42219-	{}
42220+	priv->actual_clk = host->mmc->actual_clock;
42221+	sdhci_set_clock(host, 0);
42222+	priv->cclk_rate = clk_get_rate(pltfm_host->clk);
42223+	clk_set_rate(pltfm_host->clk, 24000000);
42224+	clk_bulk_disable_unprepare(ROCKCHIP_MAX_CLKS, priv->rockchip_clks);
42225+
42226+	return 0;
42227+}
42228+
42229+static int dwcmshc_runtime_resume(struct device *dev)
42230+{
42231+	struct sdhci_host *host = dev_get_drvdata(dev);
42232+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
42233+	struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
42234+	int ret = 0;
42235+
42236+	clk_set_rate(pltfm_host->clk, priv->cclk_rate);
42237+	sdhci_set_clock(host, priv->actual_clk);
42238+	ret = clk_bulk_prepare_enable(ROCKCHIP_MAX_CLKS, priv->rockchip_clks);
42239+	/*
42240+	 * DLL will not LOCK after frequency reduction,
42241+	 * and it needs to be reconfigured.
42242+	 */
42243+	dwcmshc_rk_set_clock(host, priv->cclk_rate);
42244+
42245+	return ret;
42246+}
42247+#endif
42248+
42249+static const struct dev_pm_ops dwcmshc_pmops = {
42250+	SET_SYSTEM_SLEEP_PM_OPS(dwcmshc_suspend, dwcmshc_resume)
42251+	SET_RUNTIME_PM_OPS(dwcmshc_runtime_suspend, dwcmshc_runtime_resume, NULL)
42252 };
42253 MODULE_DEVICE_TABLE(of, sdhci_dwcmshc_dt_ids);
42254 
42255diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
42256index b3365b34c..fad503820 100644
42257--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
42258+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
42259@@ -71,7 +71,6 @@ static int dwmac_generic_probe(struct platform_device *pdev)
42260 
42261 static const struct of_device_id dwmac_generic_match[] = {
42262 	{ .compatible = "st,spear600-gmac"},
42263-	{ .compatible = "snps,dwmac-3.40a"},
42264 	{ .compatible = "snps,dwmac-3.50a"},
42265 	{ .compatible = "snps,dwmac-3.610"},
42266 	{ .compatible = "snps,dwmac-3.70a"},
42267diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
42268index e7fbc9b30..4a6f95200 100644
42269--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
42270+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
42271@@ -11,6 +11,7 @@
42272 #include <linux/bitops.h>
42273 #include <linux/clk.h>
42274 #include <linux/phy.h>
42275+#include <linux/phy/phy.h>
42276 #include <linux/of_net.h>
42277 #include <linux/gpio.h>
42278 #include <linux/module.h>
42279@@ -22,22 +23,30 @@
42280 #include <linux/mfd/syscon.h>
42281 #include <linux/regmap.h>
42282 #include <linux/pm_runtime.h>
42283-
42284+#include <linux/soc/rockchip/rk_vendor_storage.h>
42285 #include "stmmac_platform.h"
42286+#include <linux/dwmac-rk-tool.h>
42287+
42288+#define MAX_ETH		2
42289 
42290 struct rk_priv_data;
42291 struct rk_gmac_ops {
42292 	void (*set_to_rgmii)(struct rk_priv_data *bsp_priv,
42293 			     int tx_delay, int rx_delay);
42294 	void (*set_to_rmii)(struct rk_priv_data *bsp_priv);
42295+	void (*set_to_sgmii)(struct rk_priv_data *bsp_priv);
42296+	void (*set_to_qsgmii)(struct rk_priv_data *bsp_priv);
42297 	void (*set_rgmii_speed)(struct rk_priv_data *bsp_priv, int speed);
42298 	void (*set_rmii_speed)(struct rk_priv_data *bsp_priv, int speed);
42299+	void (*set_clock_selection)(struct rk_priv_data *bsp_priv, bool input,
42300+				    bool enable);
42301 	void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv);
42302 };
42303 
42304 struct rk_priv_data {
42305 	struct platform_device *pdev;
42306 	phy_interface_t phy_iface;
42307+	int bus_id;
42308 	struct regulator *regulator;
42309 	bool suspended;
42310 	const struct rk_gmac_ops *ops;
42311@@ -56,6 +65,7 @@ struct rk_priv_data {
42312 	struct clk *aclk_mac;
42313 	struct clk *pclk_mac;
42314 	struct clk *clk_phy;
42315+	struct clk *pclk_xpcs;
42316 
42317 	struct reset_control *phy_reset;
42318 
42319@@ -63,8 +73,132 @@ struct rk_priv_data {
42320 	int rx_delay;
42321 
42322 	struct regmap *grf;
42323+	struct regmap *php_grf;
42324+	struct regmap *xpcs;
42325 };
42326 
42327+/* XPCS */
42328+#define XPCS_APB_INCREMENT		(0x4)
42329+#define XPCS_APB_MASK			GENMASK_ULL(20, 0)
42330+
42331+#define SR_MII_BASE			(0x1F0000)
42332+#define SR_MII1_BASE			(0x1A0000)
42333+
42334+#define VR_MII_DIG_CTRL1		(0x8000)
42335+#define VR_MII_AN_CTRL			(0x8001)
42336+#define VR_MII_AN_INTR_STS		(0x8002)
42337+#define VR_MII_LINK_TIMER_CTRL		(0x800A)
42338+
42339+#define SR_MII_CTRL_AN_ENABLE		\
42340+	(BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000)
42341+#define MII_MAC_AUTO_SW			(0x0200)
42342+#define PCS_MODE_OFFSET			(0x1)
42343+#define MII_AN_INTR_EN			(0x1)
42344+#define PCS_SGMII_MODE			(0x2 << PCS_MODE_OFFSET)
42345+#define PCS_QSGMII_MODE			(0X3 << PCS_MODE_OFFSET)
42346+#define VR_MII_CTRL_SGMII_AN_EN		(PCS_SGMII_MODE | MII_AN_INTR_EN)
42347+#define VR_MII_CTRL_QSGMII_AN_EN	(PCS_QSGMII_MODE | MII_AN_INTR_EN)
42348+
42349+#define SR_MII_OFFSET(_x) ({		\
42350+	typeof(_x) (x) = (_x); \
42351+	(((x) == 0) ? SR_MII_BASE : (SR_MII1_BASE + ((x) - 1) * 0x10000)); \
42352+}) \
42353+
42354+static int xpcs_read(void *priv, int reg)
42355+{
42356+	struct rk_priv_data *bsp_priv = (struct rk_priv_data *)priv;
42357+	int ret, val;
42358+
42359+	ret = regmap_read(bsp_priv->xpcs,
42360+			  (u32)(reg * XPCS_APB_INCREMENT) & XPCS_APB_MASK,
42361+			  &val);
42362+	if (ret)
42363+		return ret;
42364+
42365+	return val;
42366+}
42367+
42368+static int xpcs_write(void *priv, int reg, u16 value)
42369+{
42370+	struct rk_priv_data *bsp_priv = (struct rk_priv_data *)priv;
42371+
42372+	return regmap_write(bsp_priv->xpcs,
42373+			    (reg * XPCS_APB_INCREMENT) & XPCS_APB_MASK, value);
42374+}
42375+
42376+static int xpcs_poll_reset(struct rk_priv_data *bsp_priv, int dev)
42377+{
42378+	/* Poll until the reset bit clears (50ms per retry == 0.6 sec) */
42379+	unsigned int retries = 12;
42380+	int ret;
42381+
42382+	do {
42383+		msleep(50);
42384+		ret = xpcs_read(bsp_priv, SR_MII_OFFSET(dev) + MDIO_CTRL1);
42385+		if (ret < 0)
42386+			return ret;
42387+	} while (ret & MDIO_CTRL1_RESET && --retries);
42388+
42389+	return (ret & MDIO_CTRL1_RESET) ? -ETIMEDOUT : 0;
42390+}
42391+
42392+static int xpcs_soft_reset(struct rk_priv_data *bsp_priv, int dev)
42393+{
42394+	int ret;
42395+
42396+	ret = xpcs_write(bsp_priv, SR_MII_OFFSET(dev) + MDIO_CTRL1,
42397+			 MDIO_CTRL1_RESET);
42398+	if (ret < 0)
42399+		return ret;
42400+
42401+	return xpcs_poll_reset(bsp_priv, dev);
42402+}
42403+
42404+static int xpcs_setup(struct rk_priv_data *bsp_priv, int mode)
42405+{
42406+	int ret, i, id = bsp_priv->bus_id;
42407+	u32 val;
42408+
42409+	if (mode == PHY_INTERFACE_MODE_QSGMII && id > 0)
42410+		return 0;
42411+
42412+	ret = xpcs_soft_reset(bsp_priv, id);
42413+	if (ret) {
42414+		dev_err(&bsp_priv->pdev->dev, "xpcs_soft_reset fail %d\n", ret);
42415+		return ret;
42416+	}
42417+
42418+	xpcs_write(bsp_priv, SR_MII_OFFSET(0) + VR_MII_AN_INTR_STS, 0x0);
42419+	xpcs_write(bsp_priv, SR_MII_OFFSET(0) + VR_MII_LINK_TIMER_CTRL, 0x1);
42420+
42421+	if (mode == PHY_INTERFACE_MODE_SGMII)
42422+		xpcs_write(bsp_priv, SR_MII_OFFSET(0) + VR_MII_AN_CTRL,
42423+			   VR_MII_CTRL_SGMII_AN_EN);
42424+	else
42425+		xpcs_write(bsp_priv, SR_MII_OFFSET(0) + VR_MII_AN_CTRL,
42426+			   VR_MII_CTRL_QSGMII_AN_EN);
42427+
42428+	if (mode == PHY_INTERFACE_MODE_QSGMII) {
42429+		for (i = 0; i < 4; i++) {
42430+			val = xpcs_read(bsp_priv,
42431+					SR_MII_OFFSET(i) + VR_MII_DIG_CTRL1);
42432+			xpcs_write(bsp_priv,
42433+				   SR_MII_OFFSET(i) + VR_MII_DIG_CTRL1,
42434+				   val | MII_MAC_AUTO_SW);
42435+			xpcs_write(bsp_priv, SR_MII_OFFSET(i) + MII_BMCR,
42436+				   SR_MII_CTRL_AN_ENABLE);
42437+		}
42438+	} else {
42439+		val = xpcs_read(bsp_priv, SR_MII_OFFSET(id) + VR_MII_DIG_CTRL1);
42440+		xpcs_write(bsp_priv, SR_MII_OFFSET(id) + VR_MII_DIG_CTRL1,
42441+			   val | MII_MAC_AUTO_SW);
42442+		xpcs_write(bsp_priv, SR_MII_OFFSET(id) + MII_BMCR,
42443+			   SR_MII_CTRL_AN_ENABLE);
42444+	}
42445+
42446+	return ret;
42447+}
42448+
42449 #define HIWORD_UPDATE(val, mask, shift) \
42450 		((val) << (shift) | (mask) << ((shift) + 16))
42451 
42452@@ -72,8 +206,16 @@ struct rk_priv_data {
42453 #define GRF_CLR_BIT(nr)	(BIT(nr+16))
42454 
42455 #define DELAY_ENABLE(soc, tx, rx) \
42456-	(((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
42457-	 ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
42458+	((((tx) >= 0) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
42459+	 (((rx) >= 0) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
42460+
42461+#define DELAY_ENABLE_BY_ID(soc, tx, rx, id) \
42462+	((((tx) >= 0) ? soc##_GMAC_TXCLK_DLY_ENABLE(id) : soc##_GMAC_TXCLK_DLY_DISABLE(id)) | \
42463+	 (((rx) >= 0) ? soc##_GMAC_RXCLK_DLY_ENABLE(id) : soc##_GMAC_RXCLK_DLY_DISABLE(id)))
42464+
42465+#define DELAY_VALUE(soc, tx, rx) \
42466+	((((tx) >= 0) ? soc##_GMAC_CLK_TX_DL_CFG(tx) : 0) | \
42467+	 (((rx) >= 0) ? soc##_GMAC_CLK_RX_DL_CFG(rx) : 0))
42468 
42469 #define PX30_GRF_GMAC_CON1		0x0904
42470 
42471@@ -133,6 +275,127 @@ static const struct rk_gmac_ops px30_ops = {
42472 	.set_rmii_speed = px30_set_rmii_speed,
42473 };
42474 
42475+#define RK1808_GRF_GMAC_CON0		0X0900
42476+#define RK1808_GRF_GMAC_CON1		0X0904
42477+
42478+/* RK1808_GRF_GMAC_CON0 */
42479+#define RK1808_GMAC_CLK_RX_DL_CFG(val)	HIWORD_UPDATE(val, 0x7F, 8)
42480+#define RK1808_GMAC_CLK_TX_DL_CFG(val)	HIWORD_UPDATE(val, 0x7F, 0)
42481+
42482+/* RK1808_GRF_GMAC_CON1 */
42483+#define RK1808_GMAC_PHY_INTF_SEL_RGMII	\
42484+		(GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
42485+#define RK1808_GMAC_PHY_INTF_SEL_RMII	\
42486+		(GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
42487+#define RK1808_GMAC_FLOW_CTRL		GRF_BIT(3)
42488+#define RK1808_GMAC_FLOW_CTRL_CLR	GRF_CLR_BIT(3)
42489+#define RK1808_GMAC_SPEED_10M		GRF_CLR_BIT(2)
42490+#define RK1808_GMAC_SPEED_100M		GRF_BIT(2)
42491+#define RK1808_GMAC_RXCLK_DLY_ENABLE	GRF_BIT(1)
42492+#define RK1808_GMAC_RXCLK_DLY_DISABLE	GRF_CLR_BIT(1)
42493+#define RK1808_GMAC_TXCLK_DLY_ENABLE	GRF_BIT(0)
42494+#define RK1808_GMAC_TXCLK_DLY_DISABLE	GRF_CLR_BIT(0)
42495+
42496+static void rk1808_set_to_rgmii(struct rk_priv_data *bsp_priv,
42497+				int tx_delay, int rx_delay)
42498+{
42499+	struct device *dev = &bsp_priv->pdev->dev;
42500+
42501+	if (IS_ERR(bsp_priv->grf)) {
42502+		dev_err(dev, "Missing rockchip,grf property\n");
42503+		return;
42504+	}
42505+
42506+	regmap_write(bsp_priv->grf, RK1808_GRF_GMAC_CON1,
42507+		     RK1808_GMAC_PHY_INTF_SEL_RGMII |
42508+		     DELAY_ENABLE(RK1808, tx_delay, rx_delay));
42509+
42510+	regmap_write(bsp_priv->grf, RK1808_GRF_GMAC_CON0,
42511+		     DELAY_VALUE(RK1808, tx_delay, rx_delay));
42512+}
42513+
42514+static void rk1808_set_to_rmii(struct rk_priv_data *bsp_priv)
42515+{
42516+	struct device *dev = &bsp_priv->pdev->dev;
42517+
42518+	if (IS_ERR(bsp_priv->grf)) {
42519+		dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
42520+		return;
42521+	}
42522+
42523+	regmap_write(bsp_priv->grf, RK1808_GRF_GMAC_CON1,
42524+		     RK1808_GMAC_PHY_INTF_SEL_RMII);
42525+}
42526+
42527+static void rk1808_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
42528+{
42529+	struct device *dev = &bsp_priv->pdev->dev;
42530+	int ret;
42531+
42532+	if (IS_ERR(bsp_priv->grf)) {
42533+		dev_err(dev, "Missing rockchip,grf property\n");
42534+		return;
42535+	}
42536+
42537+	if (speed == 10) {
42538+		ret = clk_set_rate(bsp_priv->clk_mac_speed, 2500000);
42539+		if (ret)
42540+			dev_err(dev, "%s: set clk_mac_speed rate 2500000 failed: %d\n",
42541+				__func__, ret);
42542+	} else if (speed == 100) {
42543+		ret = clk_set_rate(bsp_priv->clk_mac_speed, 25000000);
42544+		if (ret)
42545+			dev_err(dev, "%s: set clk_mac_speed rate 25000000 failed: %d\n",
42546+				__func__, ret);
42547+	} else if (speed == 1000) {
42548+		ret = clk_set_rate(bsp_priv->clk_mac_speed, 125000000);
42549+		if (ret)
42550+			dev_err(dev, "%s: set clk_mac_speed rate 125000000 failed: %d\n",
42551+				__func__, ret);
42552+	} else {
42553+		dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
42554+	}
42555+}
42556+
42557+static void rk1808_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
42558+{
42559+	struct device *dev = &bsp_priv->pdev->dev;
42560+	int ret;
42561+
42562+	if (IS_ERR(bsp_priv->clk_mac_speed)) {
42563+		dev_err(dev, "%s: Missing clk_mac_speed clock\n", __func__);
42564+		return;
42565+	}
42566+
42567+	if (speed == 10) {
42568+		regmap_write(bsp_priv->grf, RK1808_GRF_GMAC_CON1,
42569+			     RK1808_GMAC_SPEED_10M);
42570+
42571+		ret = clk_set_rate(bsp_priv->clk_mac_speed, 2500000);
42572+		if (ret)
42573+			dev_err(dev, "%s: set clk_mac_speed rate 2500000 failed: %d\n",
42574+				__func__, ret);
42575+	} else if (speed == 100) {
42576+		regmap_write(bsp_priv->grf, RK1808_GRF_GMAC_CON1,
42577+			     RK1808_GMAC_SPEED_100M);
42578+
42579+		ret = clk_set_rate(bsp_priv->clk_mac_speed, 25000000);
42580+		if (ret)
42581+			dev_err(dev, "%s: set clk_mac_speed rate 25000000 failed: %d\n",
42582+				__func__, ret);
42583+
42584+	} else {
42585+		dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
42586+	}
42587+}
42588+
42589+static const struct rk_gmac_ops rk1808_ops = {
42590+	.set_to_rgmii = rk1808_set_to_rgmii,
42591+	.set_to_rmii = rk1808_set_to_rmii,
42592+	.set_rgmii_speed = rk1808_set_rgmii_speed,
42593+	.set_rmii_speed = rk1808_set_rmii_speed,
42594+};
42595+
42596 #define RK3128_GRF_MAC_CON0	0x0168
42597 #define RK3128_GRF_MAC_CON1	0x016c
42598 
42599@@ -176,8 +439,7 @@ static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv,
42600 		     RK3128_GMAC_RMII_MODE_CLR);
42601 	regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON0,
42602 		     DELAY_ENABLE(RK3128, tx_delay, rx_delay) |
42603-		     RK3128_GMAC_CLK_RX_DL_CFG(rx_delay) |
42604-		     RK3128_GMAC_CLK_TX_DL_CFG(tx_delay));
42605+		     DELAY_VALUE(RK3128, tx_delay, rx_delay));
42606 }
42607 
42608 static void rk3128_set_to_rmii(struct rk_priv_data *bsp_priv)
42609@@ -293,8 +555,7 @@ static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
42610 		     DELAY_ENABLE(RK3228, tx_delay, rx_delay));
42611 
42612 	regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON0,
42613-		     RK3228_GMAC_CLK_RX_DL_CFG(rx_delay) |
42614-		     RK3228_GMAC_CLK_TX_DL_CFG(tx_delay));
42615+		     DELAY_VALUE(RK3128, tx_delay, rx_delay));
42616 }
42617 
42618 static void rk3228_set_to_rmii(struct rk_priv_data *bsp_priv)
42619@@ -414,8 +675,7 @@ static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv,
42620 		     RK3288_GMAC_RMII_MODE_CLR);
42621 	regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON3,
42622 		     DELAY_ENABLE(RK3288, tx_delay, rx_delay) |
42623-		     RK3288_GMAC_CLK_RX_DL_CFG(rx_delay) |
42624-		     RK3288_GMAC_CLK_TX_DL_CFG(tx_delay));
42625+		     DELAY_VALUE(RK3288, tx_delay, rx_delay));
42626 }
42627 
42628 static void rk3288_set_to_rmii(struct rk_priv_data *bsp_priv)
42629@@ -482,6 +742,64 @@ static const struct rk_gmac_ops rk3288_ops = {
42630 	.set_rmii_speed = rk3288_set_rmii_speed,
42631 };
42632 
42633+#define RK3308_GRF_MAC_CON0		0x04a0
42634+
42635+/* Rk3308_GRF_MAC_CON1 */
42636+#define RK3308_MAC_PHY_INTF_SEL_RMII	(GRF_CLR_BIT(2) | GRF_CLR_BIT(3) | \
42637+					GRF_BIT(4))
42638+#define RK3308_MAC_SPEED_10M		GRF_CLR_BIT(0)
42639+#define Rk3308_MAC_SPEED_100M		GRF_BIT(0)
42640+
42641+static void rk3308_set_to_rmii(struct rk_priv_data *bsp_priv)
42642+{
42643+	struct device *dev = &bsp_priv->pdev->dev;
42644+
42645+	if (IS_ERR(bsp_priv->grf)) {
42646+		dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
42647+		return;
42648+	}
42649+
42650+	regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
42651+		     RK3308_MAC_PHY_INTF_SEL_RMII);
42652+}
42653+
42654+static void rk3308_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
42655+{
42656+	struct device *dev = &bsp_priv->pdev->dev;
42657+	int ret;
42658+
42659+	if (IS_ERR(bsp_priv->clk_mac_speed)) {
42660+		dev_err(dev, "%s: Missing clk_mac_speed clock\n", __func__);
42661+		return;
42662+	}
42663+
42664+	if (speed == 10) {
42665+		regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
42666+			     RK3308_MAC_SPEED_10M);
42667+
42668+		ret = clk_set_rate(bsp_priv->clk_mac_speed, 2500000);
42669+		if (ret)
42670+			dev_err(dev, "%s: set clk_mac_speed rate 2500000 failed: %d\n",
42671+				__func__, ret);
42672+	} else if (speed == 100) {
42673+		regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
42674+			     Rk3308_MAC_SPEED_100M);
42675+
42676+		ret = clk_set_rate(bsp_priv->clk_mac_speed, 25000000);
42677+		if (ret)
42678+			dev_err(dev, "%s: set clk_mac_speed rate 25000000 failed: %d\n",
42679+				__func__, ret);
42680+
42681+	} else {
42682+		dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
42683+	}
42684+}
42685+
42686+static const struct rk_gmac_ops rk3308_ops = {
42687+	.set_to_rmii = rk3308_set_to_rmii,
42688+	.set_rmii_speed = rk3308_set_rmii_speed,
42689+};
42690+
42691 #define RK3328_GRF_MAC_CON0	0x0900
42692 #define RK3328_GRF_MAC_CON1	0x0904
42693 #define RK3328_GRF_MAC_CON2	0x0908
42694@@ -528,12 +846,10 @@ static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv,
42695 	regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
42696 		     RK3328_GMAC_PHY_INTF_SEL_RGMII |
42697 		     RK3328_GMAC_RMII_MODE_CLR |
42698-		     RK3328_GMAC_RXCLK_DLY_ENABLE |
42699-		     RK3328_GMAC_TXCLK_DLY_ENABLE);
42700+		     DELAY_ENABLE(RK3328, tx_delay, rx_delay));
42701 
42702 	regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON0,
42703-		     RK3328_GMAC_CLK_RX_DL_CFG(rx_delay) |
42704-		     RK3328_GMAC_CLK_TX_DL_CFG(tx_delay));
42705+		     DELAY_VALUE(RK3328, tx_delay, rx_delay));
42706 }
42707 
42708 static void rk3328_set_to_rmii(struct rk_priv_data *bsp_priv)
42709@@ -658,8 +974,7 @@ static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv,
42710 		     RK3366_GMAC_RMII_MODE_CLR);
42711 	regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON7,
42712 		     DELAY_ENABLE(RK3366, tx_delay, rx_delay) |
42713-		     RK3366_GMAC_CLK_RX_DL_CFG(rx_delay) |
42714-		     RK3366_GMAC_CLK_TX_DL_CFG(tx_delay));
42715+		     DELAY_VALUE(RK3366, tx_delay, rx_delay));
42716 }
42717 
42718 static void rk3366_set_to_rmii(struct rk_priv_data *bsp_priv)
42719@@ -769,8 +1084,7 @@ static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv,
42720 		     RK3368_GMAC_RMII_MODE_CLR);
42721 	regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON16,
42722 		     DELAY_ENABLE(RK3368, tx_delay, rx_delay) |
42723-		     RK3368_GMAC_CLK_RX_DL_CFG(rx_delay) |
42724-		     RK3368_GMAC_CLK_TX_DL_CFG(tx_delay));
42725+		     DELAY_VALUE(RK3368, tx_delay, rx_delay));
42726 }
42727 
42728 static void rk3368_set_to_rmii(struct rk_priv_data *bsp_priv)
42729@@ -880,8 +1194,7 @@ static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv,
42730 		     RK3399_GMAC_RMII_MODE_CLR);
42731 	regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON6,
42732 		     DELAY_ENABLE(RK3399, tx_delay, rx_delay) |
42733-		     RK3399_GMAC_CLK_RX_DL_CFG(rx_delay) |
42734-		     RK3399_GMAC_CLK_TX_DL_CFG(tx_delay));
42735+		     DELAY_VALUE(RK3399, tx_delay, rx_delay));
42736 }
42737 
42738 static void rk3399_set_to_rmii(struct rk_priv_data *bsp_priv)
42739@@ -948,6 +1261,283 @@ static const struct rk_gmac_ops rk3399_ops = {
42740 	.set_rmii_speed = rk3399_set_rmii_speed,
42741 };
42742 
42743+#define RK3568_GRF_GMAC0_CON0		0X0380
42744+#define RK3568_GRF_GMAC0_CON1		0X0384
42745+#define RK3568_GRF_GMAC1_CON0		0X0388
42746+#define RK3568_GRF_GMAC1_CON1		0X038c
42747+
42748+/* RK3568_GRF_GMAC0_CON1 && RK3568_GRF_GMAC1_CON1 */
42749+#define RK3568_GMAC_GMII_MODE			GRF_BIT(7)
42750+#define RK3568_GMAC_PHY_INTF_SEL_RGMII	\
42751+		(GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
42752+#define RK3568_GMAC_PHY_INTF_SEL_RMII	\
42753+		(GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
42754+#define RK3568_GMAC_FLOW_CTRL			GRF_BIT(3)
42755+#define RK3568_GMAC_FLOW_CTRL_CLR		GRF_CLR_BIT(3)
42756+#define RK3568_GMAC_RXCLK_DLY_ENABLE		GRF_BIT(1)
42757+#define RK3568_GMAC_RXCLK_DLY_DISABLE		GRF_CLR_BIT(1)
42758+#define RK3568_GMAC_TXCLK_DLY_ENABLE		GRF_BIT(0)
42759+#define RK3568_GMAC_TXCLK_DLY_DISABLE		GRF_CLR_BIT(0)
42760+
42761+/* RK3568_GRF_GMAC0_CON0 && RK3568_GRF_GMAC1_CON0 */
42762+#define RK3568_GMAC_CLK_RX_DL_CFG(val)	HIWORD_UPDATE(val, 0x7F, 8)
42763+#define RK3568_GMAC_CLK_TX_DL_CFG(val)	HIWORD_UPDATE(val, 0x7F, 0)
42764+
42765+#define RK3568_PIPE_GRF_XPCS_CON0	0X0040
42766+
42767+#define RK3568_PIPE_GRF_XPCS_QGMII_MAC_SEL	GRF_BIT(0)
42768+#define RK3568_PIPE_GRF_XPCS_SGMII_MAC_SEL	GRF_BIT(1)
42769+#define RK3568_PIPE_GRF_XPCS_PHY_READY		GRF_BIT(2)
42770+
42771+static void rk3568_set_to_sgmii(struct rk_priv_data *bsp_priv)
42772+{
42773+	struct device *dev = &bsp_priv->pdev->dev;
42774+	u32 offset_con1;
42775+
42776+	if (IS_ERR(bsp_priv->grf)) {
42777+		dev_err(dev, "%s: Missing rockchip,grfs property\n", __func__);
42778+		return;
42779+	}
42780+
42781+	offset_con1 = bsp_priv->bus_id == 1 ? RK3568_GRF_GMAC1_CON1 :
42782+					      RK3568_GRF_GMAC0_CON1;
42783+	regmap_write(bsp_priv->grf, offset_con1, RK3568_GMAC_GMII_MODE);
42784+
42785+	xpcs_setup(bsp_priv, PHY_INTERFACE_MODE_SGMII);
42786+}
42787+
42788+static void rk3568_set_to_qsgmii(struct rk_priv_data *bsp_priv)
42789+{
42790+	struct device *dev = &bsp_priv->pdev->dev;
42791+	u32 offset_con1;
42792+
42793+	if (IS_ERR(bsp_priv->grf)) {
42794+		dev_err(dev, "%s: Missing rockchip,grfs property\n", __func__);
42795+		return;
42796+	}
42797+
42798+	offset_con1 = bsp_priv->bus_id == 1 ? RK3568_GRF_GMAC1_CON1 :
42799+					      RK3568_GRF_GMAC0_CON1;
42800+	regmap_write(bsp_priv->grf, offset_con1, RK3568_GMAC_GMII_MODE);
42801+
42802+	xpcs_setup(bsp_priv, PHY_INTERFACE_MODE_QSGMII);
42803+}
42804+
42805+static void rk3568_set_to_rgmii(struct rk_priv_data *bsp_priv,
42806+				int tx_delay, int rx_delay)
42807+{
42808+	struct device *dev = &bsp_priv->pdev->dev;
42809+	u32 offset_con0, offset_con1;
42810+
42811+	if (IS_ERR(bsp_priv->grf)) {
42812+		dev_err(dev, "Missing rockchip,grf property\n");
42813+		return;
42814+	}
42815+
42816+	offset_con0 = (bsp_priv->bus_id == 1) ? RK3568_GRF_GMAC1_CON0 :
42817+						RK3568_GRF_GMAC0_CON0;
42818+	offset_con1 = (bsp_priv->bus_id == 1) ? RK3568_GRF_GMAC1_CON1 :
42819+						RK3568_GRF_GMAC0_CON1;
42820+
42821+	regmap_write(bsp_priv->grf, offset_con1,
42822+		     RK3568_GMAC_PHY_INTF_SEL_RGMII |
42823+		     DELAY_ENABLE(RK3568, tx_delay, rx_delay));
42824+
42825+	regmap_write(bsp_priv->grf, offset_con0,
42826+		     DELAY_VALUE(RK3568, tx_delay, rx_delay));
42827+}
42828+
42829+static void rk3568_set_to_rmii(struct rk_priv_data *bsp_priv)
42830+{
42831+	struct device *dev = &bsp_priv->pdev->dev;
42832+	u32 offset_con1;
42833+
42834+	if (IS_ERR(bsp_priv->grf)) {
42835+		dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
42836+		return;
42837+	}
42838+
42839+	offset_con1 = (bsp_priv->bus_id == 1) ? RK3568_GRF_GMAC1_CON1 :
42840+						RK3568_GRF_GMAC0_CON1;
42841+
42842+	regmap_write(bsp_priv->grf, offset_con1, RK3568_GMAC_PHY_INTF_SEL_RMII);
42843+}
42844+
42845+static void rk3568_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
42846+{
42847+	struct device *dev = &bsp_priv->pdev->dev;
42848+	unsigned long rate;
42849+	int ret;
42850+
42851+	switch (speed) {
42852+	case 10:
42853+		rate = 2500000;
42854+		break;
42855+	case 100:
42856+		rate = 25000000;
42857+		break;
42858+	case 1000:
42859+		rate = 125000000;
42860+		break;
42861+	default:
42862+		dev_err(dev, "unknown speed value for GMAC speed=%d", speed);
42863+		return;
42864+	}
42865+
42866+	ret = clk_set_rate(bsp_priv->clk_mac_speed, rate);
42867+	if (ret)
42868+		dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
42869+			__func__, rate, ret);
42870+}
42871+
42872+static const struct rk_gmac_ops rk3568_ops = {
42873+	.set_to_rgmii = rk3568_set_to_rgmii,
42874+	.set_to_rmii = rk3568_set_to_rmii,
42875+	.set_to_sgmii = rk3568_set_to_sgmii,
42876+	.set_to_qsgmii = rk3568_set_to_qsgmii,
42877+	.set_rgmii_speed = rk3568_set_gmac_speed,
42878+	.set_rmii_speed = rk3568_set_gmac_speed,
42879+};
42880+
42881+/* sys_grf */
42882+#define RK3588_GRF_GMAC_CON7			0X031c
42883+#define RK3588_GRF_GMAC_CON8			0X0320
42884+#define RK3588_GRF_GMAC_CON9			0X0324
42885+
42886+#define RK3588_GMAC_RXCLK_DLY_ENABLE(id)	GRF_BIT(2 * (id) + 3)
42887+#define RK3588_GMAC_RXCLK_DLY_DISABLE(id)	GRF_CLR_BIT(2 * (id) + 3)
42888+#define RK3588_GMAC_TXCLK_DLY_ENABLE(id)	GRF_BIT(2 * (id) + 2)
42889+#define RK3588_GMAC_TXCLK_DLY_DISABLE(id)	GRF_CLR_BIT(2 * (id) + 2)
42890+
42891+#define RK3588_GMAC_CLK_RX_DL_CFG(val)		HIWORD_UPDATE(val, 0xFF, 8)
42892+#define RK3588_GMAC_CLK_TX_DL_CFG(val)		HIWORD_UPDATE(val, 0xFF, 0)
42893+
42894+/* php_grf */
42895+#define RK3588_GRF_GMAC_CON0			0X0008
42896+#define RK3588_GRF_CLK_CON1			0X0070
42897+
42898+#define RK3588_GMAC_PHY_INTF_SEL_RGMII(id)	\
42899+	(GRF_BIT(3 + (id) * 6) | GRF_CLR_BIT(4 + (id) * 6) | GRF_CLR_BIT(5 + (id) * 6))
42900+#define RK3588_GMAC_PHY_INTF_SEL_RMII(id)	\
42901+	(GRF_CLR_BIT(3 + (id) * 6) | GRF_CLR_BIT(4 + (id) * 6) | GRF_BIT(5 + (id) * 6))
42902+
42903+#define RK3588_GMAC_CLK_RMII_MODE(id)		GRF_BIT(5 * (id))
42904+#define RK3588_GMAC_CLK_RGMII_MODE(id)		GRF_CLR_BIT(5 * (id))
42905+
42906+#define RK3588_GMAC_CLK_SELET_CRU(id)		GRF_BIT(5 * (id) + 4)
42907+#define RK3588_GMAC_CLK_SELET_IO(id)		GRF_CLR_BIT(5 * (id) + 4)
42908+
42909+#define RK3588_GMA_CLK_RMII_DIV2(id)		GRF_BIT(5 * (id) + 2)
42910+#define RK3588_GMA_CLK_RMII_DIV20(id)		GRF_CLR_BIT(5 * (id) + 2)
42911+
42912+#define RK3588_GMAC_CLK_RGMII_DIV1(id)		\
42913+			(GRF_CLR_BIT(5 * (id) + 2) | GRF_CLR_BIT(5 * (id) + 3))
42914+#define RK3588_GMAC_CLK_RGMII_DIV5(id)		\
42915+			(GRF_BIT(5 * (id) + 2) | GRF_BIT(5 * (id) + 3))
42916+#define RK3588_GMAC_CLK_RGMII_DIV50(id)		\
42917+			(GRF_CLR_BIT(5 * (id) + 2) | GRF_BIT(5 * (id) + 3))
42918+
42919+#define RK3588_GMAC_CLK_RMII_GATE(id)		GRF_BIT(5 * (id) + 1)
42920+#define RK3588_GMAC_CLK_RMII_NOGATE(id)		GRF_CLR_BIT(5 * (id) + 1)
42921+
42922+static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv,
42923+				int tx_delay, int rx_delay)
42924+{
42925+	struct device *dev = &bsp_priv->pdev->dev;
42926+	u32 offset_con, id = bsp_priv->bus_id;
42927+
42928+	if (IS_ERR(bsp_priv->grf) || IS_ERR(bsp_priv->php_grf)) {
42929+		dev_err(dev, "Missing rockchip,grf or rockchip,php_grf property\n");
42930+		return;
42931+	}
42932+
42933+	offset_con = bsp_priv->bus_id == 1 ? RK3588_GRF_GMAC_CON9 :
42934+					     RK3588_GRF_GMAC_CON8;
42935+
42936+	regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0,
42937+		     RK3588_GMAC_PHY_INTF_SEL_RGMII(id));
42938+
42939+	regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1,
42940+		     RK3588_GMAC_CLK_RGMII_MODE(id));
42941+
42942+	regmap_write(bsp_priv->grf, RK3588_GRF_GMAC_CON7,
42943+		     DELAY_ENABLE_BY_ID(RK3588, tx_delay, rx_delay, id));
42944+
42945+	regmap_write(bsp_priv->grf, offset_con,
42946+		     DELAY_VALUE(RK3588, tx_delay, rx_delay));
42947+}
42948+
42949+static void rk3588_set_to_rmii(struct rk_priv_data *bsp_priv)
42950+{
42951+	struct device *dev = &bsp_priv->pdev->dev;
42952+
42953+	if (IS_ERR(bsp_priv->php_grf)) {
42954+		dev_err(dev, "%s: Missing rockchip,php_grf property\n", __func__);
42955+		return;
42956+	}
42957+
42958+	regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0,
42959+		     RK3588_GMAC_PHY_INTF_SEL_RMII(bsp_priv->bus_id));
42960+
42961+	regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1,
42962+		     RK3588_GMAC_CLK_RMII_MODE(bsp_priv->bus_id));
42963+}
42964+
42965+static void rk3588_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
42966+{
42967+	struct device *dev = &bsp_priv->pdev->dev;
42968+	unsigned int val = 0, id = bsp_priv->bus_id;
42969+
42970+	switch (speed) {
42971+	case 10:
42972+		if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
42973+			val = RK3588_GMA_CLK_RMII_DIV20(id);
42974+		else
42975+			val = RK3588_GMAC_CLK_RGMII_DIV50(id);
42976+		break;
42977+	case 100:
42978+		if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
42979+			val = RK3588_GMA_CLK_RMII_DIV2(id);
42980+		else
42981+			val = RK3588_GMAC_CLK_RGMII_DIV5(id);
42982+		break;
42983+	case 1000:
42984+		if (bsp_priv->phy_iface != PHY_INTERFACE_MODE_RMII)
42985+			val = RK3588_GMAC_CLK_RGMII_DIV1(id);
42986+		else
42987+			goto err;
42988+		break;
42989+	default:
42990+		goto err;
42991+	}
42992+
42993+	regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1, val);
42994+
42995+	return;
42996+err:
42997+	dev_err(dev, "unknown speed value for GMAC speed=%d", speed);
42998+}
42999+
43000+static void rk3588_set_clock_selection(struct rk_priv_data *bsp_priv, bool input,
43001+				       bool enable)
43002+{
43003+	unsigned int val = input ? RK3588_GMAC_CLK_SELET_IO(bsp_priv->bus_id) :
43004+				   RK3588_GMAC_CLK_SELET_CRU(bsp_priv->bus_id);
43005+
43006+	val |= enable ? RK3588_GMAC_CLK_RMII_NOGATE(bsp_priv->bus_id) :
43007+			RK3588_GMAC_CLK_RMII_GATE(bsp_priv->bus_id);
43008+
43009+	regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1, val);
43010+}
43011+
43012+static const struct rk_gmac_ops rk3588_ops = {
43013+	.set_to_rgmii = rk3588_set_to_rgmii,
43014+	.set_to_rmii = rk3588_set_to_rmii,
43015+	.set_rgmii_speed = rk3588_set_gmac_speed,
43016+	.set_rmii_speed = rk3588_set_gmac_speed,
43017+	.set_clock_selection = rk3588_set_clock_selection,
43018+};
43019+
43020 #define RV1108_GRF_GMAC_CON0		0X0900
43021 
43022 /* RV1108_GRF_GMAC_CON0 */
43023@@ -1000,6 +1590,123 @@ static const struct rk_gmac_ops rv1108_ops = {
43024 	.set_rmii_speed = rv1108_set_rmii_speed,
43025 };
43026 
43027+#define RV1126_GRF_GMAC_CON0		0X0070
43028+#define RV1126_GRF_GMAC_CON1		0X0074
43029+#define RV1126_GRF_GMAC_CON2		0X0078
43030+
43031+/* RV1126_GRF_GMAC_CON0 */
43032+#define RV1126_GMAC_PHY_INTF_SEL_RGMII	\
43033+		(GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
43034+#define RV1126_GMAC_PHY_INTF_SEL_RMII	\
43035+		(GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
43036+#define RV1126_GMAC_FLOW_CTRL			GRF_BIT(7)
43037+#define RV1126_GMAC_FLOW_CTRL_CLR		GRF_CLR_BIT(7)
43038+#define RV1126_M0_GMAC_RXCLK_DLY_ENABLE		GRF_BIT(1)
43039+#define RV1126_M0_GMAC_RXCLK_DLY_DISABLE	GRF_CLR_BIT(1)
43040+#define RV1126_M0_GMAC_TXCLK_DLY_ENABLE		GRF_BIT(0)
43041+#define RV1126_M0_GMAC_TXCLK_DLY_DISABLE	GRF_CLR_BIT(0)
43042+#define RV1126_M1_GMAC_RXCLK_DLY_ENABLE		GRF_BIT(3)
43043+#define RV1126_M1_GMAC_RXCLK_DLY_DISABLE	GRF_CLR_BIT(3)
43044+#define RV1126_M1_GMAC_TXCLK_DLY_ENABLE		GRF_BIT(2)
43045+#define RV1126_M1_GMAC_TXCLK_DLY_DISABLE	GRF_CLR_BIT(2)
43046+
43047+/* RV1126_GRF_GMAC_CON1 && RV1126_GRF_GMAC_CON2 */
43048+#define RV1126_GMAC_CLK_RX_DL_CFG(val)	HIWORD_UPDATE(val, 0x7F, 8)
43049+#define RV1126_GMAC_CLK_TX_DL_CFG(val)	HIWORD_UPDATE(val, 0x7F, 0)
43050+
43051+static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv,
43052+				int tx_delay, int rx_delay)
43053+{
43054+	struct device *dev = &bsp_priv->pdev->dev;
43055+
43056+	if (IS_ERR(bsp_priv->grf)) {
43057+		dev_err(dev, "Missing rockchip,grf property\n");
43058+		return;
43059+	}
43060+
43061+	regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
43062+		     RV1126_GMAC_PHY_INTF_SEL_RGMII |
43063+		     DELAY_ENABLE(RV1126_M0, tx_delay, rx_delay) |
43064+		     DELAY_ENABLE(RV1126_M1, tx_delay, rx_delay));
43065+
43066+	regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON1,
43067+		     DELAY_VALUE(RV1126, tx_delay, rx_delay));
43068+
43069+	regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON2,
43070+		     DELAY_VALUE(RV1126, tx_delay, rx_delay));
43071+}
43072+
43073+static void rv1126_set_to_rmii(struct rk_priv_data *bsp_priv)
43074+{
43075+	struct device *dev = &bsp_priv->pdev->dev;
43076+
43077+	if (IS_ERR(bsp_priv->grf)) {
43078+		dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
43079+		return;
43080+	}
43081+
43082+	regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
43083+		     RV1126_GMAC_PHY_INTF_SEL_RMII);
43084+}
43085+
43086+static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
43087+{
43088+	struct device *dev = &bsp_priv->pdev->dev;
43089+	unsigned long rate;
43090+	int ret;
43091+
43092+	switch (speed) {
43093+	case 10:
43094+		rate = 2500000;
43095+		break;
43096+	case 100:
43097+		rate = 25000000;
43098+		break;
43099+	case 1000:
43100+		rate = 125000000;
43101+		break;
43102+	default:
43103+		dev_err(dev, "unknown speed value for RGMII speed=%d", speed);
43104+		return;
43105+	}
43106+
43107+	ret = clk_set_rate(bsp_priv->clk_mac_speed, rate);
43108+	if (ret)
43109+		dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
43110+			__func__, rate, ret);
43111+}
43112+
43113+static void rv1126_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
43114+{
43115+	struct device *dev = &bsp_priv->pdev->dev;
43116+	unsigned long rate;
43117+	int ret;
43118+
43119+	switch (speed) {
43120+	case 10:
43121+		rate = 2500000;
43122+		break;
43123+	case 100:
43124+		rate = 25000000;
43125+		break;
43126+	default:
43127+		dev_err(dev, "unknown speed value for RGMII speed=%d", speed);
43128+		return;
43129+	}
43130+
43131+	ret = clk_set_rate(bsp_priv->clk_mac_speed, rate);
43132+	if (ret)
43133+		dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
43134+			__func__, rate, ret);
43135+}
43136+
43137+static const struct rk_gmac_ops rv1126_ops = {
43138+	.set_to_rgmii = rv1126_set_to_rgmii,
43139+	.set_to_rmii = rv1126_set_to_rmii,
43140+	.set_rgmii_speed = rv1126_set_rgmii_speed,
43141+	.set_rmii_speed = rv1126_set_rmii_speed,
43142+};
43143+
43144 #define RK_GRF_MACPHY_CON0		0xb00
43145 #define RK_GRF_MACPHY_CON1		0xb04
43146 #define RK_GRF_MACPHY_CON2		0xb08
43147@@ -1090,6 +1797,12 @@ static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
43148 				dev_err(dev, "cannot get clock %s\n",
43149 					"clk_mac_refout");
43150 		}
43151+	} else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_SGMII ||
43152+		   bsp_priv->phy_iface == PHY_INTERFACE_MODE_QSGMII) {
43153+		bsp_priv->pclk_xpcs = devm_clk_get(dev, "pclk_xpcs");
43154+		if (IS_ERR(bsp_priv->pclk_xpcs))
43155+			dev_err(dev, "cannot get clock %s\n",
43156+				"pclk_xpcs");
43157 	}
43158 
43159 	bsp_priv->clk_mac_speed = devm_clk_get(dev, "clk_mac_speed");
43160@@ -1103,14 +1816,17 @@ static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
43161 			clk_set_rate(bsp_priv->clk_mac, 50000000);
43162 	}
43163 
43164-	if (plat->phy_node && bsp_priv->integrated_phy) {
43165+	if (plat->phy_node) {
43166 		bsp_priv->clk_phy = of_clk_get(plat->phy_node, 0);
43167-		if (IS_ERR(bsp_priv->clk_phy)) {
43168-			ret = PTR_ERR(bsp_priv->clk_phy);
43169-			dev_err(dev, "Cannot get PHY clock: %d\n", ret);
43170-			return -EINVAL;
43171+		/* If it is not integrated_phy, clk_phy is optional */
43172+		if (bsp_priv->integrated_phy) {
43173+			if (IS_ERR(bsp_priv->clk_phy)) {
43174+				ret = PTR_ERR(bsp_priv->clk_phy);
43175+				dev_err(dev, "Cannot get PHY clock: %d\n", ret);
43176+				return -EINVAL;
43177+			}
43178+			clk_set_rate(bsp_priv->clk_phy, 50000000);
43179 		}
43180-		clk_set_rate(bsp_priv->clk_phy, 50000000);
43181 	}
43182 
43183 	return 0;
43184@@ -1151,11 +1867,18 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
43185 			if (!IS_ERR(bsp_priv->clk_mac_speed))
43186 				clk_prepare_enable(bsp_priv->clk_mac_speed);
43187 
43188+			if (!IS_ERR(bsp_priv->pclk_xpcs))
43189+				clk_prepare_enable(bsp_priv->pclk_xpcs);
43190+
43191+			if (bsp_priv->ops && bsp_priv->ops->set_clock_selection)
43192+				bsp_priv->ops->set_clock_selection(bsp_priv,
43193+					       bsp_priv->clock_input, true);
43194+
43195 			/**
43196 			 * if (!IS_ERR(bsp_priv->clk_mac))
43197 			 *	clk_prepare_enable(bsp_priv->clk_mac);
43198 			 */
43199-			mdelay(5);
43200+			usleep_range(100, 200);
43201 			bsp_priv->clk_enabled = true;
43202 		}
43203 	} else {
43204@@ -1177,6 +1900,12 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
43205 			clk_disable_unprepare(bsp_priv->mac_clk_tx);
43206 
43207 			clk_disable_unprepare(bsp_priv->clk_mac_speed);
43208+
43209+			clk_disable_unprepare(bsp_priv->pclk_xpcs);
43210+
43211+			if (bsp_priv->ops && bsp_priv->ops->set_clock_selection)
43212+				bsp_priv->ops->set_clock_selection(bsp_priv,
43213+					      bsp_priv->clock_input, false);
43214 			/**
43215 			 * if (!IS_ERR(bsp_priv->clk_mac))
43216 			 *	clk_disable_unprepare(bsp_priv->clk_mac);
43217@@ -1188,7 +1917,7 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
43218 	return 0;
43219 }
43220 
43221-static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
43222+static int rk_gmac_phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
43223 {
43224 	struct regulator *ldo = bsp_priv->regulator;
43225 	int ret;
43226@@ -1226,6 +1955,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
43227 
43228 	of_get_phy_mode(dev->of_node, &bsp_priv->phy_iface);
43229 	bsp_priv->ops = ops;
43230+	bsp_priv->bus_id = plat->bus_id;
43231 
43232 	bsp_priv->regulator = devm_regulator_get_optional(dev, "phy");
43233 	if (IS_ERR(bsp_priv->regulator)) {
43234@@ -1252,7 +1982,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
43235 
43236 	ret = of_property_read_u32(dev->of_node, "tx_delay", &value);
43237 	if (ret) {
43238-		bsp_priv->tx_delay = 0x30;
43239+		bsp_priv->tx_delay = -1;
43240 		dev_err(dev, "Can not read property: tx_delay.");
43241 		dev_err(dev, "set tx_delay to 0x%x\n",
43242 			bsp_priv->tx_delay);
43243@@ -1263,7 +1993,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
43244 
43245 	ret = of_property_read_u32(dev->of_node, "rx_delay", &value);
43246 	if (ret) {
43247-		bsp_priv->rx_delay = 0x10;
43248+		bsp_priv->rx_delay = -1;
43249 		dev_err(dev, "Can not read property: rx_delay.");
43250 		dev_err(dev, "set rx_delay to 0x%x\n",
43251 			bsp_priv->rx_delay);
43252@@ -1274,6 +2004,20 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
43253 
43254 	bsp_priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
43255 							"rockchip,grf");
43256+	bsp_priv->php_grf = syscon_regmap_lookup_by_phandle(dev->of_node,
43257+							    "rockchip,php_grf");
43258+	bsp_priv->xpcs = syscon_regmap_lookup_by_phandle(dev->of_node,
43259+							 "rockchip,xpcs");
43260+	if (!IS_ERR(bsp_priv->xpcs)) {
43261+		struct phy *comphy;
43262+
43263+		comphy = devm_of_phy_get(&pdev->dev, dev->of_node, NULL);
43264+		if (IS_ERR(comphy))
43265+			dev_err(dev, "devm_of_phy_get error\n");
43266+		ret = phy_init(comphy);
43267+		if (ret)
43268+			dev_err(dev, "phy_init error\n");
43269+	}
43270 
43271 	if (plat->phy_node) {
43272 		bsp_priv->integrated_phy = of_property_read_bool(plat->phy_node,
43273@@ -1307,30 +2051,45 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
43274 	switch (bsp_priv->phy_iface) {
43275 	case PHY_INTERFACE_MODE_RGMII:
43276 		dev_info(dev, "init for RGMII\n");
43277-		bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay,
43278-					    bsp_priv->rx_delay);
43279+		if (bsp_priv->ops && bsp_priv->ops->set_to_rgmii)
43280+			bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay,
43281+						    bsp_priv->rx_delay);
43282 		break;
43283 	case PHY_INTERFACE_MODE_RGMII_ID:
43284 		dev_info(dev, "init for RGMII_ID\n");
43285-		bsp_priv->ops->set_to_rgmii(bsp_priv, 0, 0);
43286+		if (bsp_priv->ops && bsp_priv->ops->set_to_rgmii)
43287+			bsp_priv->ops->set_to_rgmii(bsp_priv, -1, -1);
43288 		break;
43289 	case PHY_INTERFACE_MODE_RGMII_RXID:
43290 		dev_info(dev, "init for RGMII_RXID\n");
43291-		bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay, 0);
43292+		if (bsp_priv->ops && bsp_priv->ops->set_to_rgmii)
43293+			bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay, -1);
43294 		break;
43295 	case PHY_INTERFACE_MODE_RGMII_TXID:
43296 		dev_info(dev, "init for RGMII_TXID\n");
43297-		bsp_priv->ops->set_to_rgmii(bsp_priv, 0, bsp_priv->rx_delay);
43298+		if (bsp_priv->ops && bsp_priv->ops->set_to_rgmii)
43299+			bsp_priv->ops->set_to_rgmii(bsp_priv, -1, bsp_priv->rx_delay);
43300 		break;
43301 	case PHY_INTERFACE_MODE_RMII:
43302 		dev_info(dev, "init for RMII\n");
43303-		bsp_priv->ops->set_to_rmii(bsp_priv);
43304+		if (bsp_priv->ops && bsp_priv->ops->set_to_rmii)
43305+			bsp_priv->ops->set_to_rmii(bsp_priv);
43306+		break;
43307+	case PHY_INTERFACE_MODE_SGMII:
43308+		dev_info(dev, "init for SGMII\n");
43309+		if (bsp_priv->ops && bsp_priv->ops->set_to_sgmii)
43310+			bsp_priv->ops->set_to_sgmii(bsp_priv);
43311+		break;
43312+	case PHY_INTERFACE_MODE_QSGMII:
43313+		dev_info(dev, "init for QSGMII\n");
43314+		if (bsp_priv->ops && bsp_priv->ops->set_to_qsgmii)
43315+			bsp_priv->ops->set_to_qsgmii(bsp_priv);
43316 		break;
43317 	default:
43318 		dev_err(dev, "NO interface defined!\n");
43319 	}
43320 
43321-	ret = phy_power_on(bsp_priv, true);
43322+	ret = rk_gmac_phy_power_on(bsp_priv, true);
43323 	if (ret) {
43324 		gmac_clk_enable(bsp_priv, false);
43325 		return ret;
43326@@ -1351,7 +2110,7 @@ static void rk_gmac_powerdown(struct rk_priv_data *gmac)
43327 
43328 	pm_runtime_put_sync(&gmac->pdev->dev);
43329 
43330-	phy_power_on(gmac, false);
43331+	rk_gmac_phy_power_on(gmac, false);
43332 	gmac_clk_enable(gmac, false);
43333 }
43334 
43335@@ -1365,16 +2124,96 @@ static void rk_fix_speed(void *priv, unsigned int speed)
43336 	case PHY_INTERFACE_MODE_RGMII_ID:
43337 	case PHY_INTERFACE_MODE_RGMII_RXID:
43338 	case PHY_INTERFACE_MODE_RGMII_TXID:
43339-		bsp_priv->ops->set_rgmii_speed(bsp_priv, speed);
43340+		if (bsp_priv->ops && bsp_priv->ops->set_rgmii_speed)
43341+			bsp_priv->ops->set_rgmii_speed(bsp_priv, speed);
43342 		break;
43343 	case PHY_INTERFACE_MODE_RMII:
43344-		bsp_priv->ops->set_rmii_speed(bsp_priv, speed);
43345+		if (bsp_priv->ops && bsp_priv->ops->set_rmii_speed)
43346+			bsp_priv->ops->set_rmii_speed(bsp_priv, speed);
43347+		break;
43348+	case PHY_INTERFACE_MODE_SGMII:
43349+	case PHY_INTERFACE_MODE_QSGMII:
43350 		break;
43351 	default:
43352 		dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
43353 	}
43354 }
43355 
43356+void dwmac_rk_set_rgmii_delayline(struct stmmac_priv *priv,
43357+				  int tx_delay, int rx_delay)
43358+{
43359+	struct rk_priv_data *bsp_priv = priv->plat->bsp_priv;
43360+
43361+	if (bsp_priv->ops->set_to_rgmii) {
43362+		bsp_priv->ops->set_to_rgmii(bsp_priv, tx_delay, rx_delay);
43363+		bsp_priv->tx_delay = tx_delay;
43364+		bsp_priv->rx_delay = rx_delay;
43365+	}
43366+}
43367+EXPORT_SYMBOL(dwmac_rk_set_rgmii_delayline);
43368+
43369+void dwmac_rk_get_rgmii_delayline(struct stmmac_priv *priv,
43370+				  int *tx_delay, int *rx_delay)
43371+{
43372+	struct rk_priv_data *bsp_priv = priv->plat->bsp_priv;
43373+
43374+	if (!bsp_priv->ops->set_to_rgmii)
43375+		return;
43376+
43377+	*tx_delay = bsp_priv->tx_delay;
43378+	*rx_delay = bsp_priv->rx_delay;
43379+}
43380+EXPORT_SYMBOL(dwmac_rk_get_rgmii_delayline);
43381+
43382+int dwmac_rk_get_phy_interface(struct stmmac_priv *priv)
43383+{
43384+	struct rk_priv_data *bsp_priv = priv->plat->bsp_priv;
43385+
43386+	return bsp_priv->phy_iface;
43387+}
43388+EXPORT_SYMBOL(dwmac_rk_get_phy_interface);
43389+
43390+static void rk_get_eth_addr(void *priv, unsigned char *addr)
43391+{
43392+	struct rk_priv_data *bsp_priv = priv;
43393+	struct device *dev = &bsp_priv->pdev->dev;
43394+	unsigned char ethaddr[ETH_ALEN * MAX_ETH] = {0};
43395+	int ret, id = bsp_priv->bus_id;
43396+
43397+	if (is_valid_ether_addr(addr))
43398+		goto out;
43399+
43400+	if (id < 0 || id >= MAX_ETH) {
43401+		dev_err(dev, "%s: Invalid ethernet bus id %d\n", __func__, id);
43402+		return;
43403+	}
43404+
43405+	ret = rk_vendor_read(LAN_MAC_ID, ethaddr, ETH_ALEN * MAX_ETH);
43406+	if (ret <= 0 ||
43407+	    !is_valid_ether_addr(&ethaddr[id * ETH_ALEN])) {
43408+		dev_err(dev, "%s: rk_vendor_read eth mac address failed (%d)\n",
43409+			__func__, ret);
43410+		random_ether_addr(&ethaddr[id * ETH_ALEN]);
43411+		memcpy(addr, &ethaddr[id * ETH_ALEN], ETH_ALEN);
43412+		dev_err(dev, "%s: generate random eth mac address: %pM\n", __func__, addr);
43413+
43414+		ret = rk_vendor_write(LAN_MAC_ID, ethaddr, ETH_ALEN * MAX_ETH);
43415+		if (ret != 0)
43416+			dev_err(dev, "%s: rk_vendor_write eth mac address failed (%d)\n",
43417+				__func__, ret);
43418+
43419+		ret = rk_vendor_read(LAN_MAC_ID, ethaddr, ETH_ALEN * MAX_ETH);
43420+		if (ret != ETH_ALEN * MAX_ETH)
43421+			dev_err(dev, "%s: id: %d rk_vendor_read eth mac address failed (%d)\n",
43422+				__func__, id, ret);
43423+	} else {
43424+		memcpy(addr, &ethaddr[id * ETH_ALEN], ETH_ALEN);
43425+	}
43426+
43427+out:
43428+	dev_err(dev, "%s: mac address: %pM\n", __func__, addr);
43429+}
43430+
43431 static int rk_gmac_probe(struct platform_device *pdev)
43432 {
43433 	struct plat_stmmacenet_data *plat_dat;
43434@@ -1396,8 +2235,11 @@ static int rk_gmac_probe(struct platform_device *pdev)
43435 	if (IS_ERR(plat_dat))
43436 		return PTR_ERR(plat_dat);
43437 
43438-	plat_dat->has_gmac = true;
43439+	if (!of_device_is_compatible(pdev->dev.of_node, "snps,dwmac-4.20a"))
43440+		plat_dat->has_gmac = true;
43441+
43442 	plat_dat->fix_mac_speed = rk_fix_speed;
43443+	plat_dat->get_eth_addr = rk_get_eth_addr;
43444 
43445 	plat_dat->bsp_priv = rk_gmac_setup(pdev, plat_dat, data);
43446 	if (IS_ERR(plat_dat->bsp_priv)) {
43447@@ -1417,6 +2259,10 @@ static int rk_gmac_probe(struct platform_device *pdev)
43448 	if (ret)
43449 		goto err_gmac_powerdown;
43450 
43451+	ret = dwmac_rk_create_loopback_sysfs(&pdev->dev);
43452+	if (ret)
43453+		goto err_gmac_powerdown;
43454+
43455 	return 0;
43456 
43457 err_gmac_powerdown:
43458@@ -1433,6 +2279,7 @@ static int rk_gmac_remove(struct platform_device *pdev)
43459 	int ret = stmmac_dvr_remove(&pdev->dev);
43460 
43461 	rk_gmac_powerdown(bsp_priv);
43462+	dwmac_rk_remove_loopback_sysfs(&pdev->dev);
43463 
43464 	return ret;
43465 }
43466@@ -1470,14 +2317,19 @@ static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume);
43467 
43468 static const struct of_device_id rk_gmac_dwmac_match[] = {
43469 	{ .compatible = "rockchip,px30-gmac",	.data = &px30_ops   },
43470+	{ .compatible = "rockchip,rk1808-gmac", .data = &rk1808_ops },
43471 	{ .compatible = "rockchip,rk3128-gmac", .data = &rk3128_ops },
43472 	{ .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops },
43473 	{ .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
43474+	{ .compatible = "rockchip,rk3308-mac",  .data = &rk3308_ops },
43475 	{ .compatible = "rockchip,rk3328-gmac", .data = &rk3328_ops },
43476 	{ .compatible = "rockchip,rk3366-gmac", .data = &rk3366_ops },
43477 	{ .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
43478 	{ .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops },
43479+	{ .compatible = "rockchip,rk3568-gmac", .data = &rk3568_ops },
43480+	{ .compatible = "rockchip,rk3588-gmac", .data = &rk3588_ops },
43481 	{ .compatible = "rockchip,rv1108-gmac", .data = &rv1108_ops },
43482+	{ .compatible = "rockchip,rv1126-gmac", .data = &rv1126_ops },
43483 	{ }
43484 };
43485 MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match);
43486diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
43487index fbf2deafe..2bac49b49 100644
43488--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
43489+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
43490@@ -218,18 +218,11 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
43491 				readl(ioaddr + DMA_BUS_MODE + i * 4);
43492 }
43493 
43494-static int dwmac1000_get_hw_feature(void __iomem *ioaddr,
43495-				    struct dma_features *dma_cap)
43496+static void dwmac1000_get_hw_feature(void __iomem *ioaddr,
43497+				     struct dma_features *dma_cap)
43498 {
43499 	u32 hw_cap = readl(ioaddr + DMA_HW_FEATURE);
43500 
43501-	if (!hw_cap) {
43502-		/* 0x00000000 is the value read on old hardware that does not
43503-		 * implement this register
43504-		 */
43505-		return -EOPNOTSUPP;
43506-	}
43507-
43508 	dma_cap->mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
43509 	dma_cap->mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
43510 	dma_cap->half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
43511@@ -259,8 +252,6 @@ static int dwmac1000_get_hw_feature(void __iomem *ioaddr,
43512 	dma_cap->number_tx_channel = (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
43513 	/* Alternate (enhanced) DESC mode */
43514 	dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
43515-
43516-	return 0;
43517 }
43518 
43519 static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt,
43520diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
43521index 935510cdc..a7249e407 100644
43522--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
43523+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
43524@@ -337,8 +337,8 @@ static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
43525 	writel(mtl_tx_op, ioaddr +  MTL_CHAN_TX_OP_MODE(channel));
43526 }
43527 
43528-static int dwmac4_get_hw_feature(void __iomem *ioaddr,
43529-				 struct dma_features *dma_cap)
43530+static void dwmac4_get_hw_feature(void __iomem *ioaddr,
43531+				  struct dma_features *dma_cap)
43532 {
43533 	u32 hw_cap = readl(ioaddr + GMAC_HW_FEATURE0);
43534 
43535@@ -425,8 +425,6 @@ static int dwmac4_get_hw_feature(void __iomem *ioaddr,
43536 	dma_cap->frpbs = (hw_cap & GMAC_HW_FEAT_FRPBS) >> 11;
43537 	dma_cap->frpsel = (hw_cap & GMAC_HW_FEAT_FRPSEL) >> 10;
43538 	dma_cap->dvlan = (hw_cap & GMAC_HW_FEAT_DVLAN) >> 5;
43539-
43540-	return 0;
43541 }
43542 
43543 /* Enable/disable TSO feature and set MSS */
43544diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
43545index 71e50751e..e40e312c0 100644
43546--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
43547+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
43548@@ -22,7 +22,7 @@ int dwmac4_dma_reset(void __iomem *ioaddr)
43549 
43550 	return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
43551 				 !(value & DMA_BUS_MODE_SFT_RESET),
43552-				 10000, 1000000);
43553+				 500, 1000000);
43554 }
43555 
43556 void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
43557diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
43558index a5583d706..77308c5c5 100644
43559--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
43560+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
43561@@ -365,8 +365,8 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
43562 	return ret;
43563 }
43564 
43565-static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
43566-				   struct dma_features *dma_cap)
43567+static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
43568+				    struct dma_features *dma_cap)
43569 {
43570 	u32 hw_cap;
43571 
43572@@ -439,8 +439,6 @@ static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
43573 	dma_cap->frpes = (hw_cap & XGMAC_HWFEAT_FRPES) >> 11;
43574 	dma_cap->frpbs = (hw_cap & XGMAC_HWFEAT_FRPPB) >> 9;
43575 	dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3;
43576-
43577-	return 0;
43578 }
43579 
43580 static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 nchan)
43581diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
43582index 8b7ec2457..b0b84244e 100644
43583--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
43584+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
43585@@ -203,8 +203,8 @@ struct stmmac_dma_ops {
43586 	int (*dma_interrupt) (void __iomem *ioaddr,
43587 			      struct stmmac_extra_stats *x, u32 chan);
43588 	/* If supported then get the optional core features */
43589-	int (*get_hw_feature)(void __iomem *ioaddr,
43590-			      struct dma_features *dma_cap);
43591+	void (*get_hw_feature)(void __iomem *ioaddr,
43592+			       struct dma_features *dma_cap);
43593 	/* Program the HW RX Watchdog */
43594 	void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 number_chan);
43595 	void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
43596@@ -255,7 +255,7 @@ struct stmmac_dma_ops {
43597 #define stmmac_dma_interrupt_status(__priv, __args...) \
43598 	stmmac_do_callback(__priv, dma, dma_interrupt, __args)
43599 #define stmmac_get_hw_feature(__priv, __args...) \
43600-	stmmac_do_callback(__priv, dma, get_hw_feature, __args)
43601+	stmmac_do_void_callback(__priv, dma, get_hw_feature, __args)
43602 #define stmmac_rx_watchdog(__priv, __args...) \
43603 	stmmac_do_void_callback(__priv, dma, rx_watchdog, __args)
43604 #define stmmac_set_tx_ring_len(__priv, __args...) \
43605diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
43606index 1ec000d4c..376a88abb 100644
43607--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
43608+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
43609@@ -236,7 +236,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
43610 {
43611 	u32 clk_rate;
43612 
43613-	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
43614+	clk_rate = clk_get_rate(priv->plat->pclk);
43615 
43616 	/* Platform provided default clk_csr would be assumed valid
43617 	 * for all other cases except for the below mentioned ones.
43618@@ -256,7 +256,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
43619 			priv->clk_csr = STMMAC_CSR_100_150M;
43620 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
43621 			priv->clk_csr = STMMAC_CSR_150_250M;
43622-		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
43623+		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
43624 			priv->clk_csr = STMMAC_CSR_250_300M;
43625 	}
43626 
43627@@ -628,7 +628,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
43628 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
43629 			ptp_v2 = PTP_TCR_TSVER2ENA;
43630 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
43631-			if (priv->synopsys_id < DWMAC_CORE_4_10)
43632+			if (priv->synopsys_id != DWMAC_CORE_5_10)
43633 				ts_event_en = PTP_TCR_TSEVNTENA;
43634 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
43635 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
43636@@ -738,10 +738,19 @@ int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
43637 	struct timespec64 now;
43638 	u32 sec_inc = 0;
43639 	u64 temp = 0;
43640+	int ret;
43641 
43642 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
43643 		return -EOPNOTSUPP;
43644 
43645+	ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
43646+	if (ret < 0) {
43647+		netdev_warn(priv->dev,
43648+			    "failed to enable PTP reference clock: %pe\n",
43649+			    ERR_PTR(ret));
43650+		return ret;
43651+	}
43652+
43653 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
43654 	priv->systime_flags = systime_flags;
43655 
43656@@ -807,6 +816,8 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
43657 	priv->hwts_tx_en = 0;
43658 	priv->hwts_rx_en = 0;
43659 
43660+	stmmac_ptp_register(priv);
43661+
43662 	return 0;
43663 }
43664 
43665@@ -1345,13 +1356,17 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
43666 {
43667 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
43668 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
43669+	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
43670 
43671-	buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
43672+	if (priv->dma_cap.addr64 <= 32)
43673+		gfp |= GFP_DMA32;
43674+
43675+	buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
43676 	if (!buf->page)
43677 		return -ENOMEM;
43678 
43679 	if (priv->sph) {
43680-		buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
43681+		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
43682 		if (!buf->sec_page)
43683 			return -ENOMEM;
43684 
43685@@ -2343,6 +2358,9 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
43686 {
43687 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
43688 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
43689+		if (likely(priv->plat->get_eth_addr))
43690+			priv->plat->get_eth_addr(priv->plat->bsp_priv,
43691+				priv->dev->dev_addr);
43692 		if (!is_valid_ether_addr(priv->dev->dev_addr))
43693 			eth_hw_addr_random(priv->dev);
43694 		dev_info(priv->device, "device MAC address %pM\n",
43695@@ -2694,7 +2712,7 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
43696 /**
43697  * stmmac_hw_setup - setup mac in a usable state.
43698  *  @dev : pointer to the device structure.
43699- *  @ptp_register: register PTP if set
43700+ *  @init_ptp: initialize PTP if set
43701  *  Description:
43702  *  this is the main function to setup the HW in a usable state because the
43703  *  dma engine is reset, the core registers are configured (e.g. AXI,
43704@@ -2704,7 +2722,7 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
43705  *  0 on success and an appropriate (-)ve integer as defined in errno.h
43706  *  file on failure.
43707  */
43708-static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
43709+static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
43710 {
43711 	struct stmmac_priv *priv = netdev_priv(dev);
43712 	u32 rx_cnt = priv->plat->rx_queues_to_use;
43713@@ -2760,22 +2778,14 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
43714 
43715 	stmmac_mmc_setup(priv);
43716 
43717-	if (ptp_register) {
43718-		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
43719-		if (ret < 0)
43720-			netdev_warn(priv->dev,
43721-				    "failed to enable PTP reference clock: %pe\n",
43722-				    ERR_PTR(ret));
43723+	if (init_ptp) {
43724+		ret = stmmac_init_ptp(priv);
43725+		if (ret == -EOPNOTSUPP)
43726+			netdev_warn(priv->dev, "PTP not supported by HW\n");
43727+		else if (ret)
43728+			netdev_warn(priv->dev, "PTP init failed\n");
43729 	}
43730 
43731-	ret = stmmac_init_ptp(priv);
43732-	if (ret == -EOPNOTSUPP)
43733-		netdev_warn(priv->dev, "PTP not supported by HW\n");
43734-	else if (ret)
43735-		netdev_warn(priv->dev, "PTP init failed\n");
43736-	else if (ptp_register)
43737-		stmmac_ptp_register(priv);
43738-
43739 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
43740 
43741 	/* Convert the timer from msec to usec */
43742@@ -3690,6 +3700,10 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
43743 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
43744 	int len, dirty = stmmac_rx_dirty(priv, queue);
43745 	unsigned int entry = rx_q->dirty_rx;
43746+	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
43747+
43748+	if (priv->dma_cap.addr64 <= 32)
43749+		gfp |= GFP_DMA32;
43750 
43751 	len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
43752 
43753@@ -3704,13 +3718,13 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
43754 			p = rx_q->dma_rx + entry;
43755 
43756 		if (!buf->page) {
43757-			buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
43758+			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
43759 			if (!buf->page)
43760 				break;
43761 		}
43762 
43763 		if (priv->sph && !buf->sec_page) {
43764-			buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
43765+			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
43766 			if (!buf->sec_page)
43767 				break;
43768 
43769@@ -5072,7 +5086,7 @@ int stmmac_dvr_probe(struct device *device,
43770 		dev_info(priv->device, "TSO feature enabled\n");
43771 	}
43772 
43773-	if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
43774+	if (priv->dma_cap.sphen) {
43775 		ndev->hw_features |= NETIF_F_GRO;
43776 		priv->sph = true;
43777 		dev_info(priv->device, "SPH feature enabled\n");
43778@@ -5444,7 +5458,7 @@ static int __init stmmac_cmdline_opt(char *str)
43779 	char *opt;
43780 
43781 	if (!str || !*str)
43782-		return 1;
43783+		return -EINVAL;
43784 	while ((opt = strsep(&str, ",")) != NULL) {
43785 		if (!strncmp(opt, "debug:", 6)) {
43786 			if (kstrtoint(opt + 6, 0, &debug))
43787@@ -5475,11 +5489,11 @@ static int __init stmmac_cmdline_opt(char *str)
43788 				goto err;
43789 		}
43790 	}
43791-	return 1;
43792+	return 0;
43793 
43794 err:
43795 	pr_err("%s: ERROR broken module parameter conversion", __func__);
43796-	return 1;
43797+	return -EINVAL;
43798 }
43799 
43800 __setup("stmmaceth=", stmmac_cmdline_opt);
43801diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
43802index f02ce0902..4ccd5428a 100644
43803--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
43804+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
43805@@ -508,14 +508,6 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
43806 		plat->pmt = 1;
43807 	}
43808 
43809-	if (of_device_is_compatible(np, "snps,dwmac-3.40a")) {
43810-		plat->has_gmac = 1;
43811-		plat->enh_desc = 1;
43812-		plat->tx_coe = 1;
43813-		plat->bugged_jumbo = 1;
43814-		plat->pmt = 1;
43815-	}
43816-
43817 	if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
43818 	    of_device_is_compatible(np, "snps,dwmac-4.10a") ||
43819 	    of_device_is_compatible(np, "snps,dwmac-4.20a") ||
43820@@ -586,7 +578,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
43821 		clk_prepare_enable(plat->stmmac_clk);
43822 	}
43823 
43824-	plat->pclk = devm_clk_get(&pdev->dev, "pclk");
43825+	plat->pclk = devm_clk_get(&pdev->dev, "pclk_mac");
43826 	if (IS_ERR(plat->pclk)) {
43827 		if (PTR_ERR(plat->pclk) == -EPROBE_DEFER)
43828 			goto error_pclk_get;
43829diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
43830index 1505c7451..bf71a345d 100644
43831--- a/drivers/nvmem/core.c
43832+++ b/drivers/nvmem/core.c
43833@@ -222,8 +222,6 @@ static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
43834 	struct device *dev = kobj_to_dev(kobj);
43835 	struct nvmem_device *nvmem = to_nvmem_device(dev);
43836 
43837-	attr->size = nvmem->size;
43838-
43839 	return nvmem_bin_attr_get_umode(nvmem);
43840 }
43841 
43842@@ -1668,7 +1666,11 @@ static void __exit nvmem_exit(void)
43843 	bus_unregister(&nvmem_bus_type);
43844 }
43845 
43846+#ifdef CONFIG_ROCKCHIP_THUNDER_BOOT
43847+arch_initcall_sync(nvmem_init);
43848+#else
43849 subsys_initcall(nvmem_init);
43850+#endif
43851 module_exit(nvmem_exit);
43852 
43853 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
43854diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c
43855index e4579de5d..f89018376 100644
43856--- a/drivers/nvmem/rockchip-efuse.c
43857+++ b/drivers/nvmem/rockchip-efuse.c
43858@@ -7,6 +7,7 @@
43859  */
43860 
43861 #include <linux/clk.h>
43862+#include <linux/clk-provider.h>
43863 #include <linux/delay.h>
43864 #include <linux/device.h>
43865 #include <linux/io.h>
43866@@ -16,7 +17,53 @@
43867 #include <linux/of.h>
43868 #include <linux/of_platform.h>
43869 #include <linux/platform_device.h>
43870-
43871+#include <linux/rockchip/rockchip_sip.h>
43872+
43873+#define T_CSB_P_S		0
43874+#define T_PGENB_P_S		0
43875+#define T_LOAD_P_S		0
43876+#define T_ADDR_P_S		0
43877+#define T_STROBE_P_S		(0 + 110) /* 1.1us */
43878+#define T_CSB_P_L		(0 + 110 + 1000 + 20) /* 200ns */
43879+#define T_PGENB_P_L		(0 + 110 + 1000 + 20)
43880+#define T_LOAD_P_L		(0 + 110 + 1000 + 20)
43881+#define T_ADDR_P_L		(0 + 110 + 1000 + 20)
43882+#define T_STROBE_P_L		(0 + 110 + 1000) /* 10us */
43883+#define T_CSB_R_S		0
43884+#define T_PGENB_R_S		0
43885+#define T_LOAD_R_S		0
43886+#define T_ADDR_R_S		2
43887+#define T_STROBE_R_S		(2 + 3)
43888+#define T_CSB_R_L		(2 + 3 + 3 + 3)
43889+#define T_PGENB_R_L		(2 + 3 + 3 + 3)
43890+#define T_LOAD_R_L		(2 + 3 + 3 + 3)
43891+#define T_ADDR_R_L		(2 + 3 + 3 + 2)
43892+#define T_STROBE_R_L		(2 + 3 + 3)
43893+
43894+#define T_CSB_P			0x28
43895+#define T_PGENB_P		0x2c
43896+#define T_LOAD_P		0x30
43897+#define T_ADDR_P		0x34
43898+#define T_STROBE_P		0x38
43899+#define T_CSB_R			0x3c
43900+#define T_PGENB_R		0x40
43901+#define T_LOAD_R		0x44
43902+#define T_ADDR_R		0x48
43903+#define T_STROBE_R		0x4c
43904+
43905+#define RK1808_MOD		0x00
43906+#define RK1808_INT_STATUS	RK3328_INT_STATUS
43907+#define RK1808_DOUT		RK3328_DOUT
43908+#define RK1808_AUTO_CTRL	RK3328_AUTO_CTRL
43909+#define RK1808_USER_MODE	BIT(0)
43910+#define RK1808_INT_FINISH	RK3328_INT_FINISH
43911+#define RK1808_AUTO_ENB		RK3328_AUTO_ENB
43912+#define RK1808_AUTO_RD		RK3328_AUTO_RD
43913+#define RK1808_A_SHIFT		RK3399_A_SHIFT
43914+#define RK1808_A_MASK		RK3399_A_MASK
43915+#define RK1808_NBYTES		RK3399_NBYTES
43916+
43917+#define RK3128_A_SHIFT		7
43918 #define RK3288_A_SHIFT		6
43919 #define RK3288_A_MASK		0x3ff
43920 #define RK3288_PGENB		BIT(3)
43921@@ -49,9 +96,149 @@
43922 struct rockchip_efuse_chip {
43923 	struct device *dev;
43924 	void __iomem *base;
43925-	struct clk *clk;
43926+	struct clk_bulk_data *clks;
43927+	int num_clks;
43928+	phys_addr_t phys;
43929+	struct mutex mutex;
43930 };
43931 
43932+static void rk1808_efuse_timing_init(void __iomem *base)
43933+{
43934+	/* enable auto mode */
43935+	writel(readl(base + RK1808_MOD) & (~RK1808_USER_MODE),
43936+	       base + RK1808_MOD);
43937+
43938+	/* setup efuse timing */
43939+	writel((T_CSB_P_S << 16) | T_CSB_P_L, base + T_CSB_P);
43940+	writel((T_PGENB_P_S << 16) | T_PGENB_P_L, base + T_PGENB_P);
43941+	writel((T_LOAD_P_S << 16) | T_LOAD_P_L, base + T_LOAD_P);
43942+	writel((T_ADDR_P_S << 16) | T_ADDR_P_L, base + T_ADDR_P);
43943+	writel((T_STROBE_P_S << 16) | T_STROBE_P_L, base + T_STROBE_P);
43944+	writel((T_CSB_R_S << 16) | T_CSB_R_L, base + T_CSB_R);
43945+	writel((T_PGENB_R_S << 16) | T_PGENB_R_L, base + T_PGENB_R);
43946+	writel((T_LOAD_R_S << 16) | T_LOAD_R_L, base + T_LOAD_R);
43947+	writel((T_ADDR_R_S << 16) | T_ADDR_R_L, base + T_ADDR_R);
43948+	writel((T_STROBE_R_S << 16) | T_STROBE_R_L, base + T_STROBE_R);
43949+}
43950+
43951+static void rk1808_efuse_timing_deinit(void __iomem *base)
43952+{
43953+	/* disable auto mode */
43954+	writel(readl(base + RK1808_MOD) | RK1808_USER_MODE,
43955+	       base + RK1808_MOD);
43956+
43957+	/* clear efuse timing */
43958+	writel(0, base + T_CSB_P);
43959+	writel(0, base + T_PGENB_P);
43960+	writel(0, base + T_LOAD_P);
43961+	writel(0, base + T_ADDR_P);
43962+	writel(0, base + T_STROBE_P);
43963+	writel(0, base + T_CSB_R);
43964+	writel(0, base + T_PGENB_R);
43965+	writel(0, base + T_LOAD_R);
43966+	writel(0, base + T_ADDR_R);
43967+	writel(0, base + T_STROBE_R);
43968+}
43969+
43970+static int rockchip_rk1808_efuse_read(void *context, unsigned int offset,
43971+				      void *val, size_t bytes)
43972+{
43973+	struct rockchip_efuse_chip *efuse = context;
43974+	unsigned int addr_start, addr_end, addr_offset, addr_len;
43975+	u32 out_value, status;
43976+	u8 *buf;
43977+	int ret, i = 0;
43978+
43979+	mutex_lock(&efuse->mutex);
43980+
43981+	ret = clk_bulk_prepare_enable(efuse->num_clks, efuse->clks);
43982+	if (ret < 0) {
43983+		dev_err(efuse->dev, "failed to prepare/enable efuse clk\n");
43984+		goto out;
43985+	}
43986+
43987+	addr_start = rounddown(offset, RK1808_NBYTES) / RK1808_NBYTES;
43988+	addr_end = roundup(offset + bytes, RK1808_NBYTES) / RK1808_NBYTES;
43989+	addr_offset = offset % RK1808_NBYTES;
43990+	addr_len = addr_end - addr_start;
43991+
43992+	buf = kzalloc(sizeof(*buf) * addr_len * RK1808_NBYTES, GFP_KERNEL);
43993+	if (!buf) {
43994+		ret = -ENOMEM;
43995+		goto nomem;
43996+	}
43997+
43998+	rk1808_efuse_timing_init(efuse->base);
43999+
44000+	while (addr_len--) {
44001+		writel(RK1808_AUTO_RD | RK1808_AUTO_ENB |
44002+		       ((addr_start++ & RK1808_A_MASK) << RK1808_A_SHIFT),
44003+		       efuse->base + RK1808_AUTO_CTRL);
44004+		udelay(2);
44005+		status = readl(efuse->base + RK1808_INT_STATUS);
44006+		if (!(status & RK1808_INT_FINISH)) {
44007+			ret = -EIO;
44008+			goto err;
44009+		}
44010+		out_value = readl(efuse->base + RK1808_DOUT);
44011+		writel(RK1808_INT_FINISH, efuse->base + RK1808_INT_STATUS);
44012+
44013+		memcpy(&buf[i], &out_value, RK1808_NBYTES);
44014+		i += RK1808_NBYTES;
44015+	}
44016+	memcpy(val, buf + addr_offset, bytes);
44017+err:
44018+	rk1808_efuse_timing_deinit(efuse->base);
44019+	kfree(buf);
44020+nomem:
44021+	rk1808_efuse_timing_deinit(efuse->base);
44022+	clk_bulk_disable_unprepare(efuse->num_clks, efuse->clks);
44023+out:
44024+	mutex_unlock(&efuse->mutex);
44025+
44026+	return ret;
44027+}
44028+
44029+static int rockchip_rk3128_efuse_read(void *context, unsigned int offset,
44030+				      void *val, size_t bytes)
44031+{
44032+	struct rockchip_efuse_chip *efuse = context;
44033+	u8 *buf = val;
44034+	int ret;
44035+
44036+	ret = clk_bulk_prepare_enable(efuse->num_clks, efuse->clks);
44037+	if (ret < 0) {
44038+		dev_err(efuse->dev, "failed to prepare/enable efuse clk\n");
44039+		return ret;
44040+	}
44041+
44042+	writel(RK3288_LOAD | RK3288_PGENB, efuse->base + REG_EFUSE_CTRL);
44043+	udelay(1);
44044+	while (bytes--) {
44045+		writel(readl(efuse->base + REG_EFUSE_CTRL) &
44046+			     (~(RK3288_A_MASK << RK3128_A_SHIFT)),
44047+			     efuse->base + REG_EFUSE_CTRL);
44048+		writel(readl(efuse->base + REG_EFUSE_CTRL) |
44049+			     ((offset++ & RK3288_A_MASK) << RK3128_A_SHIFT),
44050+			     efuse->base + REG_EFUSE_CTRL);
44051+		udelay(1);
44052+		writel(readl(efuse->base + REG_EFUSE_CTRL) |
44053+			     RK3288_STROBE, efuse->base + REG_EFUSE_CTRL);
44054+		udelay(1);
44055+		*buf++ = readb(efuse->base + REG_EFUSE_DOUT);
44056+		writel(readl(efuse->base + REG_EFUSE_CTRL) &
44057+		       (~RK3288_STROBE), efuse->base + REG_EFUSE_CTRL);
44058+		udelay(1);
44059+	}
44060+
44061+	/* Switch to standby mode */
44062+	writel(RK3288_PGENB | RK3288_CSB, efuse->base + REG_EFUSE_CTRL);
44063+
44064+	clk_bulk_disable_unprepare(efuse->num_clks, efuse->clks);
44065+
44066+	return 0;
44067+}
44068+
44069 static int rockchip_rk3288_efuse_read(void *context, unsigned int offset,
44070 				      void *val, size_t bytes)
44071 {
44072@@ -59,7 +246,7 @@ static int rockchip_rk3288_efuse_read(void *context, unsigned int offset,
44073 	u8 *buf = val;
44074 	int ret;
44075 
44076-	ret = clk_prepare_enable(efuse->clk);
44077+	ret = clk_bulk_prepare_enable(efuse->num_clks, efuse->clks);
44078 	if (ret < 0) {
44079 		dev_err(efuse->dev, "failed to prepare/enable efuse clk\n");
44080 		return ret;
44081@@ -87,7 +274,53 @@ static int rockchip_rk3288_efuse_read(void *context, unsigned int offset,
44082 	/* Switch to standby mode */
44083 	writel(RK3288_PGENB | RK3288_CSB, efuse->base + REG_EFUSE_CTRL);
44084 
44085-	clk_disable_unprepare(efuse->clk);
44086+	clk_bulk_disable_unprepare(efuse->num_clks, efuse->clks);
44087+
44088+	return 0;
44089+}
44090+
44091+static int rockchip_rk3288_efuse_secure_read(void *context,
44092+					     unsigned int offset,
44093+					     void *val, size_t bytes)
44094+{
44095+	struct rockchip_efuse_chip *efuse = context;
44096+	u8 *buf = val;
44097+	u32 wr_val;
44098+	int ret;
44099+
44100+	ret = clk_bulk_prepare_enable(efuse->num_clks, efuse->clks);
44101+	if (ret < 0) {
44102+		dev_err(efuse->dev, "failed to prepare/enable efuse clk\n");
44103+		return ret;
44104+	}
44105+
44106+	sip_smc_secure_reg_write(efuse->phys + REG_EFUSE_CTRL,
44107+				 RK3288_LOAD | RK3288_PGENB);
44108+	udelay(1);
44109+	while (bytes--) {
44110+		wr_val = sip_smc_secure_reg_read(efuse->phys + REG_EFUSE_CTRL) &
44111+			 (~(RK3288_A_MASK << RK3288_A_SHIFT));
44112+		sip_smc_secure_reg_write(efuse->phys + REG_EFUSE_CTRL, wr_val);
44113+		wr_val = sip_smc_secure_reg_read(efuse->phys + REG_EFUSE_CTRL) |
44114+			 ((offset++ & RK3288_A_MASK) << RK3288_A_SHIFT);
44115+		sip_smc_secure_reg_write(efuse->phys + REG_EFUSE_CTRL, wr_val);
44116+		udelay(1);
44117+		wr_val = sip_smc_secure_reg_read(efuse->phys + REG_EFUSE_CTRL) |
44118+			 RK3288_STROBE;
44119+		sip_smc_secure_reg_write(efuse->phys + REG_EFUSE_CTRL, wr_val);
44120+		udelay(1);
44121+		*buf++ = sip_smc_secure_reg_read(efuse->phys + REG_EFUSE_DOUT);
44122+		wr_val = sip_smc_secure_reg_read(efuse->phys + REG_EFUSE_CTRL) &
44123+			 (~RK3288_STROBE);
44124+		sip_smc_secure_reg_write(efuse->phys + REG_EFUSE_CTRL, wr_val);
44125+		udelay(1);
44126+	}
44127+
44128+	/* Switch to standby mode */
44129+	sip_smc_secure_reg_write(efuse->phys + REG_EFUSE_CTRL,
44130+				 RK3288_PGENB | RK3288_CSB);
44131+
44132+	clk_bulk_disable_unprepare(efuse->num_clks, efuse->clks);
44133 
44134 	return 0;
44135 }
44136@@ -101,7 +334,7 @@ static int rockchip_rk3328_efuse_read(void *context, unsigned int offset,
44137 	u8 *buf;
44138 	int ret, i = 0;
44139 
44140-	ret = clk_prepare_enable(efuse->clk);
44141+	ret = clk_bulk_prepare_enable(efuse->num_clks, efuse->clks);
44142 	if (ret < 0) {
44143 		dev_err(efuse->dev, "failed to prepare/enable efuse clk\n");
44144 		return ret;
44145@@ -142,11 +375,56 @@ static int rockchip_rk3328_efuse_read(void *context, unsigned int offset,
44146 err:
44147 	kfree(buf);
44148 nomem:
44149-	clk_disable_unprepare(efuse->clk);
44150+	clk_bulk_disable_unprepare(efuse->num_clks, efuse->clks);
44151 
44152 	return ret;
44153 }
44154 
44155+static int rockchip_rk3368_efuse_read(void *context, unsigned int offset,
44156+				      void *val, size_t bytes)
44157+{
44158+	struct rockchip_efuse_chip *efuse = context;
44159+	u8 *buf = val;
44160+	u32 wr_val;
44161+	int ret;
44162+
44163+	ret = clk_bulk_prepare_enable(efuse->num_clks, efuse->clks);
44164+	if (ret < 0) {
44165+		dev_err(efuse->dev, "failed to prepare/enable efuse clk\n");
44166+		return ret;
44167+	}
44168+
44169+	sip_smc_secure_reg_write(efuse->phys + REG_EFUSE_CTRL,
44170+				 RK3288_LOAD | RK3288_PGENB);
44171+	udelay(1);
44172+	while (bytes--) {
44173+		wr_val = sip_smc_secure_reg_read(efuse->phys + REG_EFUSE_CTRL) &
44174+			 (~(RK3288_A_MASK << RK3288_A_SHIFT));
44175+		sip_smc_secure_reg_write(efuse->phys + REG_EFUSE_CTRL, wr_val);
44176+		wr_val = sip_smc_secure_reg_read(efuse->phys + REG_EFUSE_CTRL) |
44177+			 ((offset++ & RK3288_A_MASK) << RK3288_A_SHIFT);
44178+		sip_smc_secure_reg_write(efuse->phys + REG_EFUSE_CTRL, wr_val);
44179+		udelay(1);
44180+		wr_val = sip_smc_secure_reg_read(efuse->phys + REG_EFUSE_CTRL) |
44181+			 RK3288_STROBE;
44182+		sip_smc_secure_reg_write(efuse->phys + REG_EFUSE_CTRL, wr_val);
44183+		udelay(1);
44184+		*buf++ = sip_smc_secure_reg_read(efuse->phys + REG_EFUSE_DOUT);
44185+		wr_val = sip_smc_secure_reg_read(efuse->phys + REG_EFUSE_CTRL) &
44186+			 (~RK3288_STROBE);
44187+		sip_smc_secure_reg_write(efuse->phys + REG_EFUSE_CTRL, wr_val);
44188+		udelay(1);
44189+	}
44190+
44191+	/* Switch to standby mode */
44192+	sip_smc_secure_reg_write(efuse->phys + REG_EFUSE_CTRL,
44193+				 RK3288_PGENB | RK3288_CSB);
44194+
44195+	clk_bulk_disable_unprepare(efuse->num_clks, efuse->clks);
44196+
44197+	return 0;
44198+}
44199+
44200 static int rockchip_rk3399_efuse_read(void *context, unsigned int offset,
44201 				      void *val, size_t bytes)
44202 {
44203@@ -156,7 +434,7 @@ static int rockchip_rk3399_efuse_read(void *context, unsigned int offset,
44204 	u8 *buf;
44205 	int ret, i = 0;
44206 
44207-	ret = clk_prepare_enable(efuse->clk);
44208+	ret = clk_bulk_prepare_enable(efuse->num_clks, efuse->clks);
44209 	if (ret < 0) {
44210 		dev_err(efuse->dev, "failed to prepare/enable efuse clk\n");
44211 		return ret;
44212@@ -170,8 +448,8 @@ static int rockchip_rk3399_efuse_read(void *context, unsigned int offset,
44213 	buf = kzalloc(array3_size(addr_len, RK3399_NBYTES, sizeof(*buf)),
44214 		      GFP_KERNEL);
44215 	if (!buf) {
44216-		clk_disable_unprepare(efuse->clk);
44217-		return -ENOMEM;
44218+		ret = -ENOMEM;
44219+		goto disable_clks;
44220 	}
44221 
44222 	writel(RK3399_LOAD | RK3399_PGENB | RK3399_STROBSFTSEL | RK3399_RSB,
44223@@ -198,9 +476,10 @@ static int rockchip_rk3399_efuse_read(void *context, unsigned int offset,
44224 
44225 	kfree(buf);
44226 
44227-	clk_disable_unprepare(efuse->clk);
44228+disable_clks:
44229+	clk_bulk_disable_unprepare(efuse->num_clks, efuse->clks);
44230 
44231-	return 0;
44232+	return ret;
44233 }
44234 
44235 static struct nvmem_config econfig = {
44236@@ -212,6 +491,10 @@ static struct nvmem_config econfig = {
44237 
44238 static const struct of_device_id rockchip_efuse_match[] = {
44239 	/* deprecated but kept around for dts binding compatibility */
44240+	{
44241+		.compatible = "rockchip,rk1808-efuse",
44242+		.data = (void *)&rockchip_rk1808_efuse_read,
44243+	},
44244 	{
44245 		.compatible = "rockchip,rockchip-efuse",
44246 		.data = (void *)&rockchip_rk3288_efuse_read,
44247@@ -220,6 +503,10 @@ static const struct of_device_id rockchip_efuse_match[] = {
44248 		.compatible = "rockchip,rk3066a-efuse",
44249 		.data = (void *)&rockchip_rk3288_efuse_read,
44250 	},
44251+	{
44252+		.compatible = "rockchip,rk3128-efuse",
44253+		.data = (void *)&rockchip_rk3128_efuse_read,
44254+	},
44255 	{
44256 		.compatible = "rockchip,rk3188-efuse",
44257 		.data = (void *)&rockchip_rk3288_efuse_read,
44258@@ -233,13 +520,17 @@ static const struct of_device_id rockchip_efuse_match[] = {
44259 		.data = (void *)&rockchip_rk3288_efuse_read,
44260 	},
44261 	{
44262-		.compatible = "rockchip,rk3368-efuse",
44263-		.data = (void *)&rockchip_rk3288_efuse_read,
44264+		.compatible = "rockchip,rk3288-secure-efuse",
44265+		.data = (void *)&rockchip_rk3288_efuse_secure_read,
44266 	},
44267 	{
44268 		.compatible = "rockchip,rk3328-efuse",
44269 		.data = (void *)&rockchip_rk3328_efuse_read,
44270 	},
44271+	{
44272+		.compatible = "rockchip,rk3368-efuse",
44273+		.data = (void *)&rockchip_rk3368_efuse_read,
44274+	},
44275 	{
44276 		.compatible = "rockchip,rk3399-efuse",
44277 		.data = (void *)&rockchip_rk3399_efuse_read,
44278@@ -268,13 +559,16 @@ static int rockchip_efuse_probe(struct platform_device *pdev)
44279 		return -ENOMEM;
44280 
44281 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
44282+	efuse->phys = res->start;
44283 	efuse->base = devm_ioremap_resource(dev, res);
44284 	if (IS_ERR(efuse->base))
44285 		return PTR_ERR(efuse->base);
44286 
44287-	efuse->clk = devm_clk_get(dev, "pclk_efuse");
44288-	if (IS_ERR(efuse->clk))
44289-		return PTR_ERR(efuse->clk);
44290+	efuse->num_clks = devm_clk_bulk_get_all(dev, &efuse->clks);
44291+	if (efuse->num_clks < 1)
44292+		return -ENODEV;
44293+
44294+	mutex_init(&efuse->mutex);
44295 
44296 	efuse->dev = dev;
44297 	if (of_property_read_u32(dev->of_node, "rockchip,efuse-size",
44298@@ -296,6 +590,26 @@ static struct platform_driver rockchip_efuse_driver = {
44299 	},
44300 };
44301 
44302-module_platform_driver(rockchip_efuse_driver);
44303+static int __init rockchip_efuse_init(void)
44304+{
44305+	int ret;
44306+
44307+	ret = platform_driver_register(&rockchip_efuse_driver);
44308+	if (ret) {
44309+		pr_err("failed to register efuse driver\n");
44310+		return ret;
44311+	}
44312+
44313+	return 0;
44314+}
44315+
44316+static void __exit rockchip_efuse_exit(void)
44317+{
44318+	return platform_driver_unregister(&rockchip_efuse_driver);
44319+}
44320+
44321+subsys_initcall(rockchip_efuse_init);
44322+module_exit(rockchip_efuse_exit);
44323+
44324 MODULE_DESCRIPTION("rockchip_efuse driver");
44325 MODULE_LICENSE("GPL v2");
44326diff --git a/drivers/nvmem/rockchip-otp.c b/drivers/nvmem/rockchip-otp.c
44327index 9f53bcce2..7ca6e3873 100644
44328--- a/drivers/nvmem/rockchip-otp.c
44329+++ b/drivers/nvmem/rockchip-otp.c
44330@@ -27,6 +27,7 @@
44331 #define OTPC_USER_CTRL			0x0100
44332 #define OTPC_USER_ADDR			0x0104
44333 #define OTPC_USER_ENABLE		0x0108
44334+#define OTPC_USER_QP			0x0120
44335 #define OTPC_USER_Q			0x0124
44336 #define OTPC_INT_STATUS			0x0304
44337 #define OTPC_SBPI_CMD0_OFFSET		0x1000
44338@@ -53,6 +54,84 @@
44339 #define SBPI_ENABLE_MASK		GENMASK(16, 16)
44340 
44341 #define OTPC_TIMEOUT			10000
44342+#define OTPC_TIMEOUT_PROG		100000
44343+#define RK3568_NBYTES			2
44344+
44345+#define RK3588_OTPC_AUTO_CTRL		0x04
44346+#define RK3588_OTPC_AUTO_EN		0x08
44347+#define RK3588_OTPC_INT_ST		0x84
44348+#define RK3588_OTPC_DOUT0		0x20
44349+#define RK3588_NO_SECURE_OFFSET		0x300
44350+#define RK3588_NBYTES			4
44351+#define RK3588_BURST_NUM		1
44352+#define RK3588_BURST_SHIFT		8
44353+#define RK3588_ADDR_SHIFT		16
44354+#define RK3588_AUTO_EN			BIT(0)
44355+#define RK3588_RD_DONE			BIT(1)
44356+
44357+#define RV1126_OTP_NVM_CEB		0x00
44358+#define RV1126_OTP_NVM_RSTB		0x04
44359+#define RV1126_OTP_NVM_ST		0x18
44360+#define RV1126_OTP_NVM_RADDR		0x1C
44361+#define RV1126_OTP_NVM_RSTART		0x20
44362+#define RV1126_OTP_NVM_RDATA		0x24
44363+#define RV1126_OTP_NVM_TRWH		0x28
44364+#define RV1126_OTP_READ_ST		0x30
44365+#define RV1126_OTP_NVM_PRADDR		0x34
44366+#define RV1126_OTP_NVM_PRLEN		0x38
44367+#define RV1126_OTP_NVM_PRDATA		0x3c
44368+#define RV1126_OTP_NVM_FAILTIME		0x40
44369+#define RV1126_OTP_NVM_PRSTART		0x44
44370+#define RV1126_OTP_NVM_PRSTATE		0x48
44371+
44372+/*
44373+ * +----------+------------------+--------------------------+
44374+ * | TYPE     | RANGE(byte)      | NOTE                     |
44375+ * +----------+------------------+--------------------------+
44376+ * | system   | 0x000 ~ 0x0ff    | system info, read only   |
44377+ * +----------+------------------+--------------------------+
44378+ * | oem      | 0x100 ~ 0x1ef    | for customized           |
44379+ * +----------+------------------+--------------------------+
44380+ * | reserved | 0x1f0 ~ 0x1f7    | future extension         |
44381+ * +----------+------------------+--------------------------+
44382+ * | wp       | 0x1f8 ~ 0x1ff    | write protection for oem |
44383+ * +----------+------------------+--------------------------+
44384+ *
44385+ * +-----+    +------------------+
44386+ * | wp  | -- | wp for oem range |
44387+ * +-----+    +------------------+
44388+ * | 1f8 |    | 0x100 ~ 0x11f    |
44389+ * +-----+    +------------------+
44390+ * | 1f9 |    | 0x120 ~ 0x13f    |
44391+ * +-----+    +------------------+
44392+ * | 1fa |    | 0x140 ~ 0x15f    |
44393+ * +-----+    +------------------+
44394+ * | 1fb |    | 0x160 ~ 0x17f    |
44395+ * +-----+    +------------------+
44396+ * | 1fc |    | 0x180 ~ 0x19f    |
44397+ * +-----+    +------------------+
44398+ * | 1fd |    | 0x1a0 ~ 0x1bf    |
44399+ * +-----+    +------------------+
44400+ * | 1fe |    | 0x1c0 ~ 0x1df    |
44401+ * +-----+    +------------------+
44402+ * | 1ff |    | 0x1e0 ~ 0x1ef    |
44403+ * +-----+    +------------------+
44404+ */
44405+#define RV1126_OTP_OEM_OFFSET		0x100
44406+#define RV1126_OTP_OEM_SIZE		0xf0
44407+#define RV1126_OTP_WP_OFFSET		0x1f8
44408+#define RV1126_OTP_WP_SIZE		0x8
44409+
44410+/* magic for enable otp write func */
44411+#define ROCKCHIP_OTP_WR_MAGIC		0x524F434B
44412+/* each bit mask 32 bits in OTP NVM */
44413+#define ROCKCHIP_OTP_WP_MASK_NBITS	64
44414+
44415+static unsigned int rockchip_otp_wr_magic;
44416+module_param(rockchip_otp_wr_magic, uint, 0644);
44417+MODULE_PARM_DESC(rockchip_otp_wr_magic, "magic for enable otp write func.");
44418+
44419+struct rockchip_data;
44420 
44421 struct rockchip_otp {
44422 	struct device *dev;
44423@@ -60,15 +139,19 @@ struct rockchip_otp {
44424 	struct clk_bulk_data	*clks;
44425 	int num_clks;
44426 	struct reset_control *rst;
44427-};
44428-
44429-/* list of required clocks */
44430-static const char * const rockchip_otp_clocks[] = {
44431-	"otp", "apb_pclk", "phy",
44432+	struct nvmem_config *config;
44433+	const struct rockchip_data *data;
44434+	struct mutex mutex;
44435+	DECLARE_BITMAP(wp_mask, ROCKCHIP_OTP_WP_MASK_NBITS);
44436 };
44437 
44438 struct rockchip_data {
44439 	int size;
44440+	const char * const *clocks;
44441+	int num_clks;
44442+	nvmem_reg_read_t reg_read;
44443+	nvmem_reg_write_t reg_write;
44444+	int (*init)(struct rockchip_otp *otp);
44445 };
44446 
44447 static int rockchip_otp_reset(struct rockchip_otp *otp)
44448@@ -92,7 +175,7 @@ static int rockchip_otp_reset(struct rockchip_otp *otp)
44449 	return 0;
44450 }
44451 
44452-static int rockchip_otp_wait_status(struct rockchip_otp *otp, u32 flag)
44453+static int px30_otp_wait_status(struct rockchip_otp *otp, u32 flag)
44454 {
44455 	u32 status = 0;
44456 	int ret;
44457@@ -108,7 +191,7 @@ static int rockchip_otp_wait_status(struct rockchip_otp *otp, u32 flag)
44458 	return 0;
44459 }
44460 
44461-static int rockchip_otp_ecc_enable(struct rockchip_otp *otp, bool enable)
44462+static int px30_otp_ecc_enable(struct rockchip_otp *otp, bool enable)
44463 {
44464 	int ret = 0;
44465 
44466@@ -125,15 +208,15 @@ static int rockchip_otp_ecc_enable(struct rockchip_otp *otp, bool enable)
44467 
44468 	writel(SBPI_ENABLE_MASK | SBPI_ENABLE, otp->base + OTPC_SBPI_CTRL);
44469 
44470-	ret = rockchip_otp_wait_status(otp, OTPC_SBPI_DONE);
44471+	ret = px30_otp_wait_status(otp, OTPC_SBPI_DONE);
44472 	if (ret < 0)
44473 		dev_err(otp->dev, "timeout during ecc_enable\n");
44474 
44475 	return ret;
44476 }
44477 
44478-static int rockchip_otp_read(void *context, unsigned int offset,
44479-			     void *val, size_t bytes)
44480+static int px30_otp_read(void *context, unsigned int offset, void *val,
44481+			 size_t bytes)
44482 {
44483 	struct rockchip_otp *otp = context;
44484 	u8 *buf = val;
44485@@ -151,7 +234,7 @@ static int rockchip_otp_read(void *context, unsigned int offset,
44486 		goto disable_clks;
44487 	}
44488 
44489-	ret = rockchip_otp_ecc_enable(otp, false);
44490+	ret = px30_otp_ecc_enable(otp, false);
44491 	if (ret < 0) {
44492 		dev_err(otp->dev, "rockchip_otp_ecc_enable err\n");
44493 		goto disable_clks;
44494@@ -164,7 +247,7 @@ static int rockchip_otp_read(void *context, unsigned int offset,
44495 		       otp->base + OTPC_USER_ADDR);
44496 		writel(OTPC_USER_FSM_ENABLE | OTPC_USER_FSM_ENABLE_MASK,
44497 		       otp->base + OTPC_USER_ENABLE);
44498-		ret = rockchip_otp_wait_status(otp, OTPC_USER_DONE);
44499+		ret = px30_otp_wait_status(otp, OTPC_USER_DONE);
44500 		if (ret < 0) {
44501 			dev_err(otp->dev, "timeout during read setup\n");
44502 			goto read_end;
44503@@ -180,17 +263,366 @@ static int rockchip_otp_read(void *context, unsigned int offset,
44504 	return ret;
44505 }
44506 
44507+static int rk3568_otp_read(void *context, unsigned int offset, void *val,
44508+			   size_t bytes)
44509+{
44510+	struct rockchip_otp *otp = context;
44511+	unsigned int addr_start, addr_end, addr_offset, addr_len;
44512+	unsigned int otp_qp;
44513+	u32 out_value;
44514+	u8 *buf;
44515+	int ret = 0, i = 0;
44516+
44517+	addr_start = rounddown(offset, RK3568_NBYTES) / RK3568_NBYTES;
44518+	addr_end = roundup(offset + bytes, RK3568_NBYTES) / RK3568_NBYTES;
44519+	addr_offset = offset % RK3568_NBYTES;
44520+	addr_len = addr_end - addr_start;
44521+
44522+	buf = kzalloc(array3_size(addr_len, RK3568_NBYTES, sizeof(*buf)),
44523+		      GFP_KERNEL);
44524+	if (!buf)
44525+		return -ENOMEM;
44526+
44527+	ret = clk_bulk_prepare_enable(otp->num_clks, otp->clks);
44528+	if (ret < 0) {
44529+		dev_err(otp->dev, "failed to prepare/enable clks\n");
44530+		goto out;
44531+	}
44532+
44533+	ret = rockchip_otp_reset(otp);
44534+	if (ret) {
44535+		dev_err(otp->dev, "failed to reset otp phy\n");
44536+		goto disable_clks;
44537+	}
44538+
44539+	ret = px30_otp_ecc_enable(otp, true);
44540+	if (ret < 0) {
44541+		dev_err(otp->dev, "rockchip_otp_ecc_enable err\n");
44542+		goto disable_clks;
44543+	}
44544+
44545+	writel(OTPC_USE_USER | OTPC_USE_USER_MASK, otp->base + OTPC_USER_CTRL);
44546+	udelay(5);
44547+	while (addr_len--) {
44548+		writel(addr_start++ | OTPC_USER_ADDR_MASK,
44549+		       otp->base + OTPC_USER_ADDR);
44550+		writel(OTPC_USER_FSM_ENABLE | OTPC_USER_FSM_ENABLE_MASK,
44551+		       otp->base + OTPC_USER_ENABLE);
44552+		ret = px30_otp_wait_status(otp, OTPC_USER_DONE);
44553+		if (ret < 0) {
44554+			dev_err(otp->dev, "timeout during read setup\n");
44555+			goto read_end;
44556+		}
44557+		otp_qp = readl(otp->base + OTPC_USER_QP);
44558+		if (((otp_qp & 0xc0) == 0xc0) || (otp_qp & 0x20)) {
44559+			ret = -EIO;
44560+			dev_err(otp->dev, "ecc check error during read setup\n");
44561+			goto read_end;
44562+		}
44563+		out_value = readl(otp->base + OTPC_USER_Q);
44564+		memcpy(&buf[i], &out_value, RK3568_NBYTES);
44565+		i += RK3568_NBYTES;
44566+	}
44567+
44568+	memcpy(val, buf + addr_offset, bytes);
44569+
44570+read_end:
44571+	writel(0x0 | OTPC_USE_USER_MASK, otp->base + OTPC_USER_CTRL);
44572+disable_clks:
44573+	clk_bulk_disable_unprepare(otp->num_clks, otp->clks);
44574+out:
44575+	kfree(buf);
44576+
44577+	return ret;
44578+}
44579+
44580+static int rk3588_otp_wait_status(struct rockchip_otp *otp, u32 flag)
44581+{
44582+	u32 status = 0;
44583+	int ret;
44584+
44585+	ret = readl_poll_timeout_atomic(otp->base + RK3588_OTPC_INT_ST, status,
44586+					(status & flag), 1, OTPC_TIMEOUT);
44587+	if (ret)
44588+		return ret;
44589+
44590+	/* clean int status */
44591+	writel(flag, otp->base + RK3588_OTPC_INT_ST);
44592+
44593+	return 0;
44594+}
44595+
44596+static int rk3588_otp_read(void *context, unsigned int offset, void *val,
44597+			   size_t bytes)
44598+{
44599+	struct rockchip_otp *otp = context;
44600+	unsigned int addr_start, addr_end, addr_offset, addr_len;
44601+	int ret = 0, i = 0;
44602+	u32 out_value;
44603+	u8 *buf;
44604+
44605+	if (offset >= otp->data->size)
44606+		return -ENOMEM;
44607+	if (offset + bytes > otp->data->size)
44608+		bytes = otp->data->size - offset;
44609+
44610+	addr_start = rounddown(offset, RK3588_NBYTES) / RK3588_NBYTES;
44611+	addr_end = roundup(offset + bytes, RK3588_NBYTES) / RK3588_NBYTES;
44612+	addr_offset = offset % RK3588_NBYTES;
44613+	addr_len = addr_end - addr_start;
44614+	addr_start += RK3588_NO_SECURE_OFFSET;
44615+
44616+	buf = kzalloc(array3_size(addr_len, RK3588_NBYTES, sizeof(*buf)),
44617+		      GFP_KERNEL);
44618+	if (!buf)
44619+		return -ENOMEM;
44620+
44621+	ret = clk_bulk_prepare_enable(otp->num_clks, otp->clks);
44622+	if (ret < 0) {
44623+		dev_err(otp->dev, "failed to prepare/enable clks\n");
44624+		goto out;
44625+	}
44626+
44627+	while (addr_len--) {
44628+		writel((addr_start << RK3588_ADDR_SHIFT) |
44629+		       (RK3588_BURST_NUM << RK3588_BURST_SHIFT),
44630+		       otp->base + RK3588_OTPC_AUTO_CTRL);
44631+		writel(RK3588_AUTO_EN, otp->base + RK3588_OTPC_AUTO_EN);
44632+		ret = rk3588_otp_wait_status(otp, RK3588_RD_DONE);
44633+		if (ret < 0) {
44634+			dev_err(otp->dev, "timeout during read setup\n");
44635+			goto read_end;
44636+		}
44637+
44638+		out_value = readl(otp->base + RK3588_OTPC_DOUT0);
44639+		memcpy(&buf[i], &out_value, RK3588_NBYTES);
44640+		i += RK3588_NBYTES;
44641+		addr_start++;
44642+	}
44643+
44644+	memcpy(val, buf + addr_offset, bytes);
44645+
44646+read_end:
44647+	clk_bulk_disable_unprepare(otp->num_clks, otp->clks);
44648+out:
44649+	kfree(buf);
44650+
44651+	return ret;
44652+}
44653+
44654+static int rv1126_otp_init(struct rockchip_otp *otp)
44655+{
44656+	u32 status = 0;
44657+	int ret;
44658+
44659+	writel(0x0, otp->base + RV1126_OTP_NVM_CEB);
44660+	ret = readl_poll_timeout_atomic(otp->base + RV1126_OTP_NVM_ST, status,
44661+					status & 0x1, 1, OTPC_TIMEOUT);
44662+	if (ret < 0) {
44663+		dev_err(otp->dev, "timeout during set ceb\n");
44664+		return ret;
44665+	}
44666+
44667+	writel(0x1, otp->base + RV1126_OTP_NVM_RSTB);
44668+	ret = readl_poll_timeout_atomic(otp->base + RV1126_OTP_NVM_ST, status,
44669+					status & 0x4, 1, OTPC_TIMEOUT);
44670+	if (ret < 0) {
44671+		dev_err(otp->dev, "timeout during set rstb\n");
44672+		return ret;
44673+	}
44674+
44675+	otp->config->read_only = false;
44676+
44677+	return 0;
44678+}
44679+
44680+static int rv1126_otp_read(void *context, unsigned int offset, void *val,
44681+			   size_t bytes)
44682+{
44683+	struct rockchip_otp *otp = context;
44684+	u32 status = 0;
44685+	u8 *buf = val;
44686+	int ret = 0;
44687+
44688+	while (bytes--) {
44689+		writel(offset++, otp->base + RV1126_OTP_NVM_RADDR);
44690+		writel(0x1, otp->base + RV1126_OTP_NVM_RSTART);
44691+		ret = readl_poll_timeout_atomic(otp->base + RV1126_OTP_READ_ST,
44692+						status, status == 0, 1,
44693+						OTPC_TIMEOUT);
44694+		if (ret < 0) {
44695+			dev_err(otp->dev, "timeout during read setup\n");
44696+			return ret;
44697+		}
44698+
44699+		*buf++ = readb(otp->base + RV1126_OTP_NVM_RDATA);
44700+	}
44701+
44702+	return 0;
44703+}
44704+
44705+static int rv1126_otp_prog(struct rockchip_otp *otp, u32 bit_offset, u32 data,
44706+			   u32 bit_len)
44707+{
44708+	u32 status = 0;
44709+	int ret = 0;
44710+
44711+	if (!data)
44712+		return 0;
44713+
44714+	writel(bit_offset, otp->base + RV1126_OTP_NVM_PRADDR);
44715+	writel(bit_len - 1, otp->base + RV1126_OTP_NVM_PRLEN);
44716+	writel(data, otp->base + RV1126_OTP_NVM_PRDATA);
44717+	writel(1, otp->base + RV1126_OTP_NVM_PRSTART);
44718+	/* Wait max 100 ms */
44719+	ret = readl_poll_timeout_atomic(otp->base + RV1126_OTP_NVM_PRSTATE,
44720+					status, status == 0, 1,
44721+					OTPC_TIMEOUT_PROG);
44722+	if (ret < 0)
44723+		dev_err(otp->dev, "timeout during prog\n");
44724+
44725+	return ret;
44726+}
44727+
44728+static int rv1126_otp_write(void *context, unsigned int offset, void *val,
44729+			    size_t bytes)
44730+{
44731+	struct rockchip_otp *otp = context;
44732+	u8 *buf = val;
44733+	u8 val_r, val_w;
44734+	int ret = 0;
44735+
44736+	while (bytes--) {
44737+		ret = rv1126_otp_read(context, offset, &val_r, 1);
44738+		if (ret)
44739+			return ret;
44740+		val_w = *buf & (~val_r);
44741+		ret = rv1126_otp_prog(otp, offset * 8, val_w, 8);
44742+		if (ret)
44743+			return ret;
44744+		buf++;
44745+		offset++;
44746+	}
44747+
44748+	return 0;
44749+}
44750+
44751+static int rv1126_otp_wp(void *context, unsigned int offset, size_t bytes)
44752+{
44753+	struct rockchip_otp *otp = context;
44754+
44755+	bitmap_set(otp->wp_mask, (offset - RV1126_OTP_OEM_OFFSET) / 4, bytes / 4);
44756+
44757+	return rv1126_otp_write(context, RV1126_OTP_WP_OFFSET, otp->wp_mask,
44758+				RV1126_OTP_WP_SIZE);
44759+}
44760+
44761+static int rv1126_otp_oem_write(void *context, unsigned int offset, void *val,
44762+				size_t bytes)
44763+{
44764+	int ret = 0;
44765+
44766+	if (offset < RV1126_OTP_OEM_OFFSET ||
44767+	    offset > (RV1126_OTP_OEM_OFFSET + RV1126_OTP_OEM_SIZE - 1) ||
44768+	    bytes > RV1126_OTP_OEM_SIZE ||
44769+	    (offset + bytes) > (RV1126_OTP_OEM_OFFSET + RV1126_OTP_OEM_SIZE))
44770+		return -EINVAL;
44771+
44772+	if (!IS_ALIGNED(offset, 4) || !IS_ALIGNED(bytes, 4))
44773+		return -EINVAL;
44774+
44775+	ret = rv1126_otp_write(context, offset, val, bytes);
44776+	if (!ret)
44777+		ret = rv1126_otp_wp(context, offset, bytes);
44778+
44779+	return ret;
44780+}
44781+
44782+static int rockchip_otp_read(void *context, unsigned int offset, void *val,
44783+			     size_t bytes)
44784+{
44785+	struct rockchip_otp *otp = context;
44786+	int ret = -EINVAL;
44787+
44788+	mutex_lock(&otp->mutex);
44789+	if (otp->data && otp->data->reg_read)
44790+		ret = otp->data->reg_read(context, offset, val, bytes);
44791+	mutex_unlock(&otp->mutex);
44792+
44793+	return ret;
44794+}
44795+
44796+static int rockchip_otp_write(void *context, unsigned int offset, void *val,
44797+			      size_t bytes)
44798+{
44799+	struct rockchip_otp *otp = context;
44800+	int ret = -EINVAL;
44801+
44802+	mutex_lock(&otp->mutex);
44803+	if (rockchip_otp_wr_magic == ROCKCHIP_OTP_WR_MAGIC &&
44804+	    otp->data && otp->data->reg_write) {
44805+		ret = otp->data->reg_write(context, offset, val, bytes);
44806+		rockchip_otp_wr_magic = 0;
44807+	}
44808+	mutex_unlock(&otp->mutex);
44809+
44810+	return ret;
44811+}
44812+
44813 static struct nvmem_config otp_config = {
44814 	.name = "rockchip-otp",
44815 	.owner = THIS_MODULE,
44816 	.read_only = true,
44817+	.reg_read = rockchip_otp_read,
44818+	.reg_write = rockchip_otp_write,
44819 	.stride = 1,
44820 	.word_size = 1,
44821-	.reg_read = rockchip_otp_read,
44822+};
44823+
44824+static const char * const px30_otp_clocks[] = {
44825+	"otp", "apb_pclk", "phy",
44826 };
44827 
44828 static const struct rockchip_data px30_data = {
44829 	.size = 0x40,
44830+	.clocks = px30_otp_clocks,
44831+	.num_clks = ARRAY_SIZE(px30_otp_clocks),
44832+	.reg_read = px30_otp_read,
44833+};
44834+
44835+static const char * const rk3568_otp_clocks[] = {
44836+	"usr", "sbpi", "apb", "phy",
44837+};
44838+
44839+static const struct rockchip_data rk3568_data = {
44840+	.size = 0x80,
44841+	.clocks = rk3568_otp_clocks,
44842+	.num_clks = ARRAY_SIZE(rk3568_otp_clocks),
44843+	.reg_read = rk3568_otp_read,
44844+};
44845+
44846+static const char * const rk3588_otp_clocks[] = {
44847+	"otpc", "apb", "arb", "phy",
44848+};
44849+
44850+static const struct rockchip_data rk3588_data = {
44851+	.size = 0x400,
44852+	.clocks = rk3588_otp_clocks,
44853+	.num_clks = ARRAY_SIZE(rk3588_otp_clocks),
44854+	.reg_read = rk3588_otp_read,
44855+};
44856+
44857+static const char * const rv1126_otp_clocks[] = {
44858+	"otp", "apb_pclk",
44859+};
44860+
44861+static const struct rockchip_data rv1126_data = {
44862+	.size = 0x200,
44863+	.clocks = rv1126_otp_clocks,
44864+	.num_clks = ARRAY_SIZE(rv1126_otp_clocks),
44865+	.init = rv1126_otp_init,
44866+	.reg_read = rv1126_otp_read,
44867+	.reg_write = rv1126_otp_oem_write,
44868 };
44869 
44870 static const struct of_device_id rockchip_otp_match[] = {
44871@@ -202,6 +634,18 @@ static const struct of_device_id rockchip_otp_match[] = {
44872 		.compatible = "rockchip,rk3308-otp",
44873 		.data = (void *)&px30_data,
44874 	},
44875+	{
44876+		.compatible = "rockchip,rk3568-otp",
44877+		.data = (void *)&rk3568_data,
44878+	},
44879+	{
44880+		.compatible = "rockchip,rk3588-otp",
44881+		.data = (void *)&rk3588_data,
44882+	},
44883+	{
44884+		.compatible = "rockchip,rv1126-otp",
44885+		.data = (void *)&rv1126_data,
44886+	},
44887 	{ /* sentinel */ },
44888 };
44889 MODULE_DEVICE_TABLE(of, rockchip_otp_match);
44890@@ -225,32 +669,42 @@ static int rockchip_otp_probe(struct platform_device *pdev)
44891 	if (!otp)
44892 		return -ENOMEM;
44893 
44894+	mutex_init(&otp->mutex);
44895+	otp->data = data;
44896 	otp->dev = dev;
44897 	otp->base = devm_platform_ioremap_resource(pdev, 0);
44898 	if (IS_ERR(otp->base))
44899 		return PTR_ERR(otp->base);
44900 
44901-	otp->num_clks = ARRAY_SIZE(rockchip_otp_clocks);
44902+	otp->num_clks = data->num_clks;
44903 	otp->clks = devm_kcalloc(dev, otp->num_clks,
44904 				     sizeof(*otp->clks), GFP_KERNEL);
44905 	if (!otp->clks)
44906 		return -ENOMEM;
44907 
44908 	for (i = 0; i < otp->num_clks; ++i)
44909-		otp->clks[i].id = rockchip_otp_clocks[i];
44910+		otp->clks[i].id = data->clocks[i];
44911 
44912 	ret = devm_clk_bulk_get(dev, otp->num_clks, otp->clks);
44913 	if (ret)
44914 		return ret;
44915 
44916-	otp->rst = devm_reset_control_get(dev, "phy");
44917+	otp->rst = devm_reset_control_array_get_optional_exclusive(dev);
44918 	if (IS_ERR(otp->rst))
44919 		return PTR_ERR(otp->rst);
44920 
44921-	otp_config.size = data->size;
44922-	otp_config.priv = otp;
44923-	otp_config.dev = dev;
44924-	nvmem = devm_nvmem_register(dev, &otp_config);
44925+	otp->config = &otp_config;
44926+	otp->config->size = data->size;
44927+	otp->config->priv = otp;
44928+	otp->config->dev = dev;
44929+
44930+	if (data->init) {
44931+		ret = data->init(otp);
44932+		if (ret)
44933+			return ret;
44934+	}
44935+
44936+	nvmem = devm_nvmem_register(dev, otp->config);
44937 
44938 	return PTR_ERR_OR_ZERO(nvmem);
44939 }
44940@@ -263,6 +717,26 @@ static struct platform_driver rockchip_otp_driver = {
44941 	},
44942 };
44943 
44944-module_platform_driver(rockchip_otp_driver);
44945+static int __init rockchip_otp_init(void)
44946+{
44947+	int ret;
44948+
44949+	ret = platform_driver_register(&rockchip_otp_driver);
44950+	if (ret) {
44951+		pr_err("failed to register otp driver\n");
44952+		return ret;
44953+	}
44954+
44955+	return 0;
44956+}
44957+
44958+static void __exit rockchip_otp_exit(void)
44959+{
44960+	return platform_driver_unregister(&rockchip_otp_driver);
44961+}
44962+
44963+subsys_initcall(rockchip_otp_init);
44964+module_exit(rockchip_otp_exit);
44965+
44966 MODULE_DESCRIPTION("Rockchip OTP driver");
44967 MODULE_LICENSE("GPL v2");
44968diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c
44969index 60f4ff8e0..ee20811cc 100644
44970--- a/drivers/opp/debugfs.c
44971+++ b/drivers/opp/debugfs.c
44972@@ -239,11 +239,60 @@ void opp_debug_unregister(struct opp_device *opp_dev,
44973 	opp_dev->dentry = NULL;
44974 }
44975 
44976+static int opp_summary_show(struct seq_file *s, void *data)
44977+{
44978+	struct list_head *lists = (struct list_head *)s->private;
44979+	struct opp_table *opp_table;
44980+	struct dev_pm_opp *opp;
44981+
44982+	mutex_lock(&opp_table_lock);
44983+
44984+	seq_puts(s, " device                rate(Hz)    target(uV)    min(uV)    max(uV)\n");
44985+	seq_puts(s, "-------------------------------------------------------------------\n");
44986+
44987+	list_for_each_entry(opp_table, lists, node) {
44988+		seq_printf(s, " %s\n", opp_table->dentry_name);
44989+		mutex_lock(&opp_table->lock);
44990+		list_for_each_entry(opp, &opp_table->opp_list, node) {
44991+			seq_printf(s, "%31lu %12lu %11lu %11lu\n",
44992+				   opp->rate,
44993+				   opp->supplies[0].u_volt,
44994+				   opp->supplies[0].u_volt_min,
44995+				   opp->supplies[0].u_volt_max);
44996+			if (opp_table->regulator_count > 1)
44997+				seq_printf(s, "%44lu %11lu %11lu\n",
44998+					   opp->supplies[1].u_volt,
44999+					   opp->supplies[1].u_volt_min,
45000+					   opp->supplies[1].u_volt_max);
45001+		}
45002+		mutex_unlock(&opp_table->lock);
45003+	}
45004+
45005+	mutex_unlock(&opp_table_lock);
45006+
45007+	return 0;
45008+}
45009+
45010+static int opp_summary_open(struct inode *inode, struct file *file)
45011+{
45012+	return single_open(file, opp_summary_show, inode->i_private);
45013+}
45014+
45015+static const struct file_operations opp_summary_fops = {
45016+	.open		= opp_summary_open,
45017+	.read		= seq_read,
45018+	.llseek		= seq_lseek,
45019+	.release	= single_release,
45020+};
45021+
45022 static int __init opp_debug_init(void)
45023 {
45024 	/* Create /sys/kernel/debug/opp directory */
45025 	rootdir = debugfs_create_dir("opp", NULL);
45026 
45027+	debugfs_create_file("opp_summary", 0444, rootdir, &opp_tables,
45028+			    &opp_summary_fops);
45029+
45030 	return 0;
45031 }
45032 core_initcall(opp_debug_init);
45033diff --git a/drivers/opp/of.c b/drivers/opp/of.c
45034index 3d7adc0de..b163f5d13 100644
45035--- a/drivers/opp/of.c
45036+++ b/drivers/opp/of.c
45037@@ -1328,7 +1328,7 @@ int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus)
45038 		goto failed;
45039 	}
45040 
45041-	ret = em_dev_register_perf_domain(dev, nr_opp, &em_cb, cpus);
45042+	ret = em_dev_register_perf_domain(dev, nr_opp, &em_cb, cpus, true);
45043 	if (ret)
45044 		goto failed;
45045 
45046diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
45047index 04c6edc28..d023ff1a1 100644
45048--- a/drivers/pci/controller/Makefile
45049+++ b/drivers/pci/controller/Makefile
45050@@ -23,9 +23,10 @@ obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o
45051 obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o
45052 obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o
45053 obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o
45054-obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
45055-obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
45056-obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
45057+obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o pcie-rockchip.o
45058+pcierockchiphost-y := pcie-rockchip-host.o pcie-rockchip.o
45059+obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcierockchiphost.o
45060+
45061 obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
45062 obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o
45063 obj-$(CONFIG_VMD) += vmd.o
45064diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
45065index 28945351d..2df528706 100644
45066--- a/drivers/pci/controller/dwc/pcie-designware.c
45067+++ b/drivers/pci/controller/dwc/pcie-designware.c
45068@@ -496,6 +496,7 @@ int dw_pcie_link_up(struct dw_pcie *pci)
45069 	return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
45070 		(!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
45071 }
45072+EXPORT_SYMBOL_GPL(dw_pcie_link_up);
45073 
45074 void dw_pcie_upconfig_setup(struct dw_pcie *pci)
45075 {
45076diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
45077index 9d2f511f1..08cd48487 100644
45078--- a/drivers/pci/controller/dwc/pcie-designware.h
45079+++ b/drivers/pci/controller/dwc/pcie-designware.h
45080@@ -42,6 +42,7 @@
45081 #define PORT_AFR_L1_ENTRANCE_LAT_MASK	GENMASK(29, 27)
45082 
45083 #define PCIE_PORT_LINK_CONTROL		0x710
45084+#define PORT_LINK_LPBK_ENABLE		BIT(2)
45085 #define PORT_LINK_DLL_LINK_EN		BIT(5)
45086 #define PORT_LINK_FAST_LINK_MODE	BIT(7)
45087 #define PORT_LINK_MODE_MASK		GENMASK(21, 16)
45088@@ -266,7 +267,6 @@ struct dw_pcie {
45089 	/* Used when iatu_unroll_enabled is true */
45090 	void __iomem		*atu_base;
45091 	u32			num_viewport;
45092-	u8			iatu_unroll_enabled;
45093 	struct pcie_port	pp;
45094 	struct dw_pcie_ep	ep;
45095 	const struct dw_pcie_ops *ops;
45096@@ -274,6 +274,8 @@ struct dw_pcie {
45097 	int			num_lanes;
45098 	int			link_gen;
45099 	u8			n_fts[2];
45100+	bool			iatu_unroll_enabled: 1;
45101+	bool			io_cfg_atu_shared: 1;
45102 };
45103 
45104 #define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
45105diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
45106index 0d6df73bb..2645157f3 100644
45107--- a/drivers/pci/controller/pcie-rockchip-host.c
45108+++ b/drivers/pci/controller/pcie-rockchip-host.c
45109@@ -38,6 +38,37 @@
45110 
45111 #include "../pci.h"
45112 #include "pcie-rockchip.h"
45113+#include <linux/rockchip-pcie-dma.h>
45114+
45115+static void rk_pcie_start_dma_rk3399(struct dma_trx_obj *obj)
45116+{
45117+	struct rockchip_pcie *rockchip = dev_get_drvdata(obj->dev);
45118+	struct dma_table *tbl = obj->cur;
45119+	int chn = tbl->chn;
45120+
45121+	rockchip_pcie_write(rockchip, (u32)(tbl->phys_descs & 0xffffffff),
45122+			    PCIE_APB_CORE_UDMA_BASE + 0x14 * chn + 0x04);
45123+	rockchip_pcie_write(rockchip, (u32)(tbl->phys_descs >> 32),
45124+			    PCIE_APB_CORE_UDMA_BASE + 0x14 * chn + 0x08);
45125+	rockchip_pcie_write(rockchip, BIT(0) | (tbl->dir << 1),
45126+			    PCIE_APB_CORE_UDMA_BASE + 0x14 * chn + 0x00);
45127+}
45128+
45129+static void rk_pcie_config_dma_rk3399(struct dma_table *table)
45130+{
45131+	u32 *desc = table->descs;
45132+
45133+	*(desc + 0) = (u32)(table->local & 0xffffffff);
45134+	*(desc + 1) = (u32)(table->local >> 32);
45135+	*(desc + 2) = (u32)(table->bus & 0xffffffff);
45136+	*(desc + 3) = (u32)(table->bus >> 32);
45137+	*(desc + 4) = 0;
45138+	*(desc + 5) = 0;
45139+	*(desc + 6) = table->buf_size;
45140+	*(desc + 7) = 0;
45141+	*(desc + 8) = 0;
45142+	*(desc + 6) |= 1 << 24;
45143+}
45144 
45145 static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip)
45146 {
45147@@ -159,6 +190,9 @@ static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
45148 {
45149 	u32 busdev;
45150 
45151+	if (rockchip->in_remove)
45152+		return PCIBIOS_SUCCESSFUL;
45153+
45154 	busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
45155 				PCI_FUNC(devfn), where);
45156 
45157@@ -193,6 +227,9 @@ static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip,
45158 {
45159 	u32 busdev;
45160 
45161+	if (rockchip->in_remove)
45162+		return PCIBIOS_SUCCESSFUL;
45163+
45164 	busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
45165 				PCI_FUNC(devfn), where);
45166 	if (!IS_ALIGNED(busdev, size))
45167@@ -299,6 +336,7 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
45168 	struct device *dev = rockchip->dev;
45169 	int err, i = MAX_LANE_NUM;
45170 	u32 status;
45171+	int timeouts = 500;
45172 
45173 	gpiod_set_value_cansleep(rockchip->ep_gpio, 0);
45174 
45175@@ -330,15 +368,26 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
45176 
45177 	gpiod_set_value_cansleep(rockchip->ep_gpio, 1);
45178 
45179+	if (rockchip->wait_ep)
45180+		timeouts = 10000;
45181+
45182 	/* 500ms timeout value should be enough for Gen1/2 training */
45183 	err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
45184 				 status, PCIE_LINK_UP(status), 20,
45185-				 500 * USEC_PER_MSEC);
45186+				 timeouts * USEC_PER_MSEC);
45187 	if (err) {
45188 		dev_err(dev, "PCIe link training gen1 timeout!\n");
45189 		goto err_power_off_phy;
45190 	}
45191 
45192+	err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_DEBUG_OUT_0,
45193+				 status, PCIE_LINK_IS_L0(status), 20,
45194+				 timeouts * USEC_PER_MSEC);
45195+	if (err) {
45196+		dev_err(dev, "LTSSM is not L0!\n");
45197+		return -ETIMEDOUT;
45198+	}
45199+
45200 	if (rockchip->link_gen == 2) {
45201 		/*
45202 		 * Enable retrain for gen2. This should be configured only after
45203@@ -370,6 +419,11 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
45204 		}
45205 	}
45206 
45207+	/* disable ltssm */
45208+	if (rockchip->dma_trx_enabled)
45209+		rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_DISABLE,
45210+				    PCIE_CLIENT_CONFIG);
45211+
45212 	rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
45213 			    PCIE_CORE_CONFIG_VENDOR);
45214 	rockchip_pcie_write(rockchip,
45215@@ -403,6 +457,33 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
45216 	return err;
45217 }
45218 
45219+static inline void
45220+rockchip_pcie_handle_dma_interrupt(struct rockchip_pcie *rockchip)
45221+{
45222+	u32 dma_status;
45223+	struct dma_trx_obj *obj = rockchip->dma_obj;
45224+
45225+	dma_status = rockchip_pcie_read(rockchip,
45226+			PCIE_APB_CORE_UDMA_BASE + PCIE_UDMA_INT_REG);
45227+
45228+	/* Core: clear dma interrupt */
45229+	rockchip_pcie_write(rockchip, dma_status,
45230+			PCIE_APB_CORE_UDMA_BASE + PCIE_UDMA_INT_REG);
45231+
45232+	WARN_ONCE(!(dma_status & 0x3), "dma_status 0x%x\n", dma_status);
45233+
45234+	if (dma_status & (1 << 0)) {
45235+		obj->irq_num++;
45236+		obj->dma_free = true;
45237+	}
45238+
45239+	if (list_empty(&obj->tbl_list)) {
45240+		if (obj->dma_free &&
45241+			obj->loop_count >= obj->loop_count_threshold)
45242+			complete(&obj->done);
45243+	}
45244+}
45245+
45246 static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
45247 {
45248 	struct rockchip_pcie *rockchip = arg;
45249@@ -411,9 +492,10 @@ static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
45250 	u32 sub_reg;
45251 
45252 	reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
45253+	sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS);
45254+	dev_dbg(dev, "reg = 0x%x, sub_reg = 0x%x\n", reg, sub_reg);
45255 	if (reg & PCIE_CLIENT_INT_LOCAL) {
45256 		dev_dbg(dev, "local interrupt received\n");
45257-		sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS);
45258 		if (sub_reg & PCIE_CORE_INT_PRFPE)
45259 			dev_dbg(dev, "parity error detected while reading from the PNP receive FIFO RAM\n");
45260 
45261@@ -463,6 +545,12 @@ static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
45262 		rockchip_pcie_clr_bw_int(rockchip);
45263 	}
45264 
45265+	if (reg & PCIE_CLIENT_INT_UDMA) {
45266+		rockchip_pcie_write(rockchip, sub_reg, PCIE_CLIENT_INT_STATUS);
45267+		rockchip_pcie_write(rockchip, reg, PCIE_CLIENT_INT_STATUS);
45268+		rockchip_pcie_handle_dma_interrupt(rockchip);
45269+	}
45270+
45271 	rockchip_pcie_write(rockchip, reg & PCIE_CLIENT_INT_LOCAL,
45272 			    PCIE_CLIENT_INT_STATUS);
45273 
45274@@ -673,6 +761,8 @@ static void rockchip_pcie_enable_interrupts(struct rockchip_pcie *rockchip)
45275 			    PCIE_CORE_INT_MASK);
45276 
45277 	rockchip_pcie_enable_bw_int(rockchip);
45278+	rockchip_pcie_write(rockchip, PCIE_UDMA_INT_ENABLE_MASK,
45279+			PCIE_APB_CORE_UDMA_BASE + PCIE_UDMA_INT_ENABLE_REG);
45280 }
45281 
45282 static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
45283@@ -811,6 +901,12 @@ static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
45284 		}
45285 	}
45286 
45287+	/* Workaround for PCIe DMA transfer */
45288+	if (rockchip->dma_trx_enabled) {
45289+		rockchip_pcie_prog_ob_atu(rockchip, 1, AXI_WRAPPER_MEM_WRITE,
45290+				32 - 1, rockchip->mem_reserve_start, 0x0);
45291+	}
45292+
45293 	err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0);
45294 	if (err) {
45295 		dev_err(dev, "program RC mem inbound ATU failed\n");
45296@@ -846,6 +942,9 @@ static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
45297 				  20 - 1, 0, 0);
45298 
45299 	rockchip->msg_bus_addr += ((reg_no + offset) << 20);
45300+	rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M);
45301+	if (!rockchip->msg_region)
45302+		err = -ENOMEM;
45303 	return err;
45304 }
45305 
45306@@ -854,6 +953,10 @@ static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip)
45307 	u32 value;
45308 	int err;
45309 
45310+	/* Don't enter L2 state when no ep connected */
45311+	if (rockchip->dma_trx_enabled == 1)
45312+		return 0;
45313+
45314 	/* send PME_TURN_OFF message */
45315 	writel(0x0, rockchip->msg_region + PCIE_RC_SEND_PME_OFF);
45316 
45317@@ -869,7 +972,7 @@ static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip)
45318 	return 0;
45319 }
45320 
45321-static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
45322+static int rockchip_pcie_suspend_for_user(struct device *dev)
45323 {
45324 	struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
45325 	int ret;
45326@@ -885,8 +988,43 @@ static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
45327 		return ret;
45328 	}
45329 
45330+	/* disable ltssm */
45331+	rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_DISABLE,
45332+			    PCIE_CLIENT_CONFIG);
45333+
45334 	rockchip_pcie_deinit_phys(rockchip);
45335 
45336+	return ret;
45337+}
45338+
45339+static int rockchip_pcie_resume_for_user(struct device *dev)
45340+{
45341+	struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
45342+	int err;
45343+
45344+	err = rockchip_pcie_host_init_port(rockchip);
45345+	if (err)
45346+		return err;
45347+
45348+	err = rockchip_pcie_cfg_atu(rockchip);
45349+	if (err)
45350+		return err;
45351+
45352+	/* Need this to enter L1 again */
45353+	rockchip_pcie_update_txcredit_mui(rockchip);
45354+	rockchip_pcie_enable_interrupts(rockchip);
45355+
45356+	return 0;
45357+}
45358+
45359+static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
45360+{
45361+	struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
45362+	int ret = 0;
45363+
45364+	if (!rockchip->dma_trx_enabled)
45365+		ret = rockchip_pcie_suspend_for_user(dev);
45366+
45367 	rockchip_pcie_disable_clocks(rockchip);
45368 
45369 	regulator_disable(rockchip->vpcie0v9);
45370@@ -909,29 +1047,105 @@ static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
45371 	if (err)
45372 		goto err_disable_0v9;
45373 
45374-	err = rockchip_pcie_host_init_port(rockchip);
45375+	if (!rockchip->dma_trx_enabled)
45376+		err = rockchip_pcie_resume_for_user(dev);
45377 	if (err)
45378-		goto err_pcie_resume;
45379-
45380-	err = rockchip_pcie_cfg_atu(rockchip);
45381-	if (err)
45382-		goto err_err_deinit_port;
45383-
45384-	/* Need this to enter L1 again */
45385-	rockchip_pcie_update_txcredit_mui(rockchip);
45386-	rockchip_pcie_enable_interrupts(rockchip);
45387+		goto err_disable_clocks;
45388 
45389 	return 0;
45390 
45391-err_err_deinit_port:
45392-	rockchip_pcie_deinit_phys(rockchip);
45393-err_pcie_resume:
45394+err_disable_clocks:
45395 	rockchip_pcie_disable_clocks(rockchip);
45396 err_disable_0v9:
45397 	regulator_disable(rockchip->vpcie0v9);
45398+
45399 	return err;
45400 }
45401 
45402+static int rockchip_pcie_really_probe(struct rockchip_pcie *rockchip)
45403+{
45404+	int err;
45405+
45406+	err = rockchip_pcie_host_init_port(rockchip);
45407+	if (err)
45408+		return err;
45409+
45410+	err = rockchip_pcie_setup_irq(rockchip);
45411+	if (err)
45412+		return err;
45413+
45414+	rockchip_pcie_enable_interrupts(rockchip);
45415+
45416+	err = rockchip_pcie_cfg_atu(rockchip);
45417+	if (err)
45418+		return err;
45419+
45420+	rockchip->bridge->sysdata = rockchip;
45421+	rockchip->bridge->ops = &rockchip_pcie_ops;
45422+
45423+	return pci_host_probe(rockchip->bridge);
45424+}
45425+
45426+static ssize_t pcie_deferred_store(struct device *dev,
45427+			   struct device_attribute *attr,
45428+			   const char *buf, size_t size)
45429+{
45430+	u32 val = 0;
45431+	int err;
45432+	struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
45433+
45434+	err = kstrtou32(buf, 10, &val);
45435+	if (err)
45436+		return err;
45437+
45438+	if (val) {
45439+		rockchip->wait_ep = 1;
45440+		err = rockchip_pcie_really_probe(rockchip);
45441+		if (err)
45442+			return -EINVAL;
45443+	}
45444+
45445+	return size;
45446+}
45447+
45448+static ssize_t pcie_reset_ep_store(struct device *dev,
45449+			struct device_attribute *attr,
45450+			const char *buf, size_t size)
45451+{
45452+	u32 val = 0;
45453+	int err;
45454+	struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
45455+	struct dma_trx_obj *obj = rockchip->dma_obj;
45456+
45457+	dev_info(dev, "loop_cout = %d\n", obj->loop_count);
45458+
45459+	err = kstrtou32(buf, 10, &val);
45460+	if (err)
45461+		return err;
45462+
45463+	if (val == PCIE_USER_UNLINK)
45464+		rockchip_pcie_suspend_for_user(rockchip->dev);
45465+	else if (val == PCIE_USER_RELINK)
45466+		rockchip_pcie_resume_for_user(rockchip->dev);
45467+	else
45468+		return -EINVAL;
45469+
45470+	return size;
45471+}
45472+
45473+static DEVICE_ATTR_WO(pcie_deferred);
45474+static DEVICE_ATTR_WO(pcie_reset_ep);
45475+
45476+static struct attribute *pcie_attrs[] = {
45477+	&dev_attr_pcie_deferred.attr,
45478+	&dev_attr_pcie_reset_ep.attr,
45479+	NULL
45480+};
45481+
45482+static const struct attribute_group pcie_attr_group = {
45483+	.attrs = pcie_attrs,
45484+};
45485+
45486 static int rockchip_pcie_probe(struct platform_device *pdev)
45487 {
45488 	struct rockchip_pcie *rockchip;
45489@@ -948,6 +1162,8 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
45490 
45491 	rockchip = pci_host_bridge_priv(bridge);
45492 
45493+	rockchip->bridge = bridge;
45494+
45495 	platform_set_drvdata(pdev, rockchip);
45496 	rockchip->dev = dev;
45497 	rockchip->is_rc = true;
45498@@ -966,43 +1182,47 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
45499 		goto err_set_vpcie;
45500 	}
45501 
45502-	err = rockchip_pcie_host_init_port(rockchip);
45503-	if (err)
45504-		goto err_vpcie;
45505-
45506 	err = rockchip_pcie_init_irq_domain(rockchip);
45507 	if (err < 0)
45508-		goto err_deinit_port;
45509-
45510-	err = rockchip_pcie_cfg_atu(rockchip);
45511-	if (err)
45512-		goto err_remove_irq_domain;
45513+		goto err_vpcie;
45514 
45515-	rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M);
45516-	if (!rockchip->msg_region) {
45517-		err = -ENOMEM;
45518-		goto err_remove_irq_domain;
45519+	if (rockchip->deferred) {
45520+		err = sysfs_create_group(&pdev->dev.kobj, &pcie_attr_group);
45521+		if (err) {
45522+			dev_err(&pdev->dev, "SysFS group creation failed\n");
45523+			goto err_remove_irq_domain;
45524+		}
45525+	} else {
45526+		err = rockchip_pcie_really_probe(rockchip);
45527+		if (err) {
45528+			dev_err(&pdev->dev, "deferred probe failed\n");
45529+			goto err_deinit_port;
45530+		}
45531 	}
45532 
45533-	bridge->sysdata = rockchip;
45534-	bridge->ops = &rockchip_pcie_ops;
45535+	if (rockchip->dma_trx_enabled == 0)
45536+		return 0;
45537 
45538-	err = rockchip_pcie_setup_irq(rockchip);
45539-	if (err)
45540-		goto err_remove_irq_domain;
45541-
45542-	rockchip_pcie_enable_interrupts(rockchip);
45543+	rockchip->dma_obj = rk_pcie_dma_obj_probe(dev);
45544+	if (IS_ERR(rockchip->dma_obj)) {
45545+		dev_err(dev, "failed to prepare dma object\n");
45546+		err = -EINVAL;
45547+		goto err_deinit_port;
45548+	}
45549 
45550-	err = pci_host_probe(bridge);
45551-	if (err < 0)
45552-		goto err_remove_irq_domain;
45553+	if (rockchip->dma_obj) {
45554+		rockchip->dma_obj->start_dma_func = rk_pcie_start_dma_rk3399;
45555+		rockchip->dma_obj->config_dma_func = rk_pcie_config_dma_rk3399;
45556+	}
45557 
45558 	return 0;
45559 
45560-err_remove_irq_domain:
45561-	irq_domain_remove(rockchip->irq_domain);
45562 err_deinit_port:
45563 	rockchip_pcie_deinit_phys(rockchip);
45564+	if (rockchip->deferred)
45565+		sysfs_remove_group(&pdev->dev.kobj, &pcie_attr_group);
45566+err_remove_irq_domain:
45567+	irq_domain_remove(rockchip->irq_domain);
45568 err_vpcie:
45569 	if (!IS_ERR(rockchip->vpcie12v))
45570 		regulator_disable(rockchip->vpcie12v);
45571@@ -1019,16 +1239,41 @@ static int rockchip_pcie_remove(struct platform_device *pdev)
45572 {
45573 	struct device *dev = &pdev->dev;
45574 	struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
45575+	u32 status1, status2;
45576+	u32 status;
45577 	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rockchip);
45578 
45579+	status1 = rockchip_pcie_read(rockchip, PCIE_CLIENT_BASIC_STATUS1);
45580+	status2 = rockchip_pcie_read(rockchip, PCIE_CLIENT_DEBUG_OUT_0);
45581+
45582+	if (!PCIE_LINK_UP(status1) || !PCIE_LINK_IS_L0(status2))
45583+		rockchip->in_remove = 1;
45584+
45585 	pci_stop_root_bus(bridge->bus);
45586 	pci_remove_root_bus(bridge->bus);
45587 	irq_domain_remove(rockchip->irq_domain);
45588 
45589+	/* disable link state */
45590+	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
45591+	status |= BIT(4);
45592+	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
45593+
45594+	mdelay(1);
45595+
45596+	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
45597+	status &= ~BIT(4);
45598+	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
45599+
45600 	rockchip_pcie_deinit_phys(rockchip);
45601 
45602 	rockchip_pcie_disable_clocks(rockchip);
45603 
45604+	if (rockchip->dma_trx_enabled)
45605+		rk_pcie_dma_obj_remove(rockchip->dma_obj);
45606+
45607+	if (rockchip->deferred)
45608+		sysfs_remove_group(&pdev->dev.kobj, &pcie_attr_group);
45609+
45610 	if (!IS_ERR(rockchip->vpcie12v))
45611 		regulator_disable(rockchip->vpcie12v);
45612 	if (!IS_ERR(rockchip->vpcie3v3))
45613diff --git a/drivers/pci/controller/pcie-rockchip.c b/drivers/pci/controller/pcie-rockchip.c
45614index 990a00e08..92cced721 100644
45615--- a/drivers/pci/controller/pcie-rockchip.c
45616+++ b/drivers/pci/controller/pcie-rockchip.c
45617@@ -14,6 +14,7 @@
45618 #include <linux/clk.h>
45619 #include <linux/delay.h>
45620 #include <linux/gpio/consumer.h>
45621+#include <linux/module.h>
45622 #include <linux/of_pci.h>
45623 #include <linux/phy/phy.h>
45624 #include <linux/platform_device.h>
45625@@ -421,3 +422,7 @@ void rockchip_pcie_cfg_configuration_accesses(
45626 	rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1);
45627 }
45628 EXPORT_SYMBOL_GPL(rockchip_pcie_cfg_configuration_accesses);
45629+
45630+MODULE_AUTHOR("Rockchip Inc");
45631+MODULE_DESCRIPTION("Rockchip AXI PCIe driver");
45632+MODULE_LICENSE("GPL v2");
45633diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
45634index c7d0178fc..e34d33264 100644
45635--- a/drivers/pci/controller/pcie-rockchip.h
45636+++ b/drivers/pci/controller/pcie-rockchip.h
45637@@ -31,6 +31,7 @@
45638 #define   PCIE_CLIENT_CONF_ENABLE	  HIWORD_UPDATE_BIT(0x0001)
45639 #define   PCIE_CLIENT_CONF_DISABLE       HIWORD_UPDATE(0x0001, 0)
45640 #define   PCIE_CLIENT_LINK_TRAIN_ENABLE	  HIWORD_UPDATE_BIT(0x0002)
45641+#define   PCIE_CLIENT_LINK_TRAIN_DISABLE  HIWORD_UPDATE(0x0002, 0x0000)
45642 #define   PCIE_CLIENT_ARI_ENABLE	  HIWORD_UPDATE_BIT(0x0008)
45643 #define   PCIE_CLIENT_CONF_LANE_NUM(x)	  HIWORD_UPDATE(0x0030, ENCODE_LANES(x))
45644 #define   PCIE_CLIENT_MODE_RC		  HIWORD_UPDATE_BIT(0x0040)
45645@@ -39,6 +40,7 @@
45646 #define   PCIE_CLIENT_GEN_SEL_2		  HIWORD_UPDATE_BIT(0x0080)
45647 #define PCIE_CLIENT_DEBUG_OUT_0		(PCIE_CLIENT_BASE + 0x3c)
45648 #define   PCIE_CLIENT_DEBUG_LTSSM_MASK		GENMASK(5, 0)
45649+#define   PCIE_CLIENT_DEBUG_LTSSM_L0		0x10
45650 #define   PCIE_CLIENT_DEBUG_LTSSM_L1		0x18
45651 #define   PCIE_CLIENT_DEBUG_LTSSM_L2		0x19
45652 #define PCIE_CLIENT_BASIC_STATUS1	(PCIE_CLIENT_BASE + 0x48)
45653@@ -74,7 +76,20 @@
45654 	PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_DPA | \
45655 	PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_MSG | \
45656 	PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_LEGACY | \
45657-	PCIE_CLIENT_INT_PHY)
45658+	PCIE_CLIENT_INT_PHY | PCIE_CLIENT_INT_UDMA)
45659+
45660+#define PCIE_APB_CORE_UDMA_BASE	(BIT(23) | BIT(22) | BIT(21))
45661+#define PCIE_CH0_DONE_ENABLE	BIT(0)
45662+#define PCIE_CH1_DONE_ENABLE	BIT(1)
45663+#define PCIE_CH0_ERR_ENABLE	BIT(8)
45664+#define PCIE_CH1_ERR_ENABLE	BIT(9)
45665+
45666+#define PCIE_UDMA_INT_REG			0xa0
45667+#define PCIE_UDMA_INT_ENABLE_REG	0xa4
45668+
45669+#define PCIE_UDMA_INT_ENABLE_MASK \
45670+	(PCIE_CH0_DONE_ENABLE | PCIE_CH1_DONE_ENABLE | \
45671+	PCIE_CH0_ERR_ENABLE | PCIE_CH1_ERR_ENABLE)
45672 
45673 #define PCIE_CORE_CTRL_MGMT_BASE	0x900000
45674 #define PCIE_CORE_CTRL			(PCIE_CORE_CTRL_MGMT_BASE + 0x000)
45675@@ -185,6 +200,8 @@
45676 #define PCIE_ECAM_ADDR(bus, dev, func, reg) \
45677 	  (PCIE_ECAM_BUS(bus) | PCIE_ECAM_DEV(dev) | \
45678 	   PCIE_ECAM_FUNC(func) | PCIE_ECAM_REG(reg))
45679+#define PCIE_LINK_IS_L0(x) \
45680+	(((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L0)
45681 #define PCIE_LINK_IS_L2(x) \
45682 	(((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2)
45683 #define PCIE_LINK_UP(x) \
45684@@ -275,6 +292,9 @@
45685 		(((c) << ((b) * 8 + 5)) & \
45686 		 ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
45687 
45688+#define PCIE_USER_RELINK 0x1
45689+#define PCIE_USER_UNLINK 0x2
45690+
45691 struct rockchip_pcie {
45692 	void	__iomem *reg_base;		/* DT axi-base */
45693 	void	__iomem *apb_base;		/* DT apb-base */
45694@@ -306,6 +326,15 @@ struct rockchip_pcie {
45695 	phys_addr_t msg_bus_addr;
45696 	bool is_rc;
45697 	struct resource *mem_res;
45698+	phys_addr_t mem_reserve_start;
45699+	size_t mem_reserve_size;
45700+	int dma_trx_enabled;
45701+	int deferred;
45702+	int wait_ep;
45703+	struct dma_trx_obj *dma_obj;
45704+	struct list_head resources;
45705+	struct pci_host_bridge *bridge;
45706+	int in_remove;
45707 };
45708 
45709 static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg)
45710diff --git a/drivers/phy/rockchip/Makefile b/drivers/phy/rockchip/Makefile
45711index c3cfc7f0a..d0dfcf9a8 100644
45712--- a/drivers/phy/rockchip/Makefile
45713+++ b/drivers/phy/rockchip/Makefile
45714@@ -3,7 +3,7 @@ obj-$(CONFIG_PHY_ROCKCHIP_DP)		+= phy-rockchip-dp.o
45715 obj-$(CONFIG_PHY_ROCKCHIP_DPHY_RX0)     += phy-rockchip-dphy-rx0.o
45716 obj-$(CONFIG_PHY_ROCKCHIP_EMMC)		+= phy-rockchip-emmc.o
45717 obj-$(CONFIG_PHY_ROCKCHIP_INNO_DSIDPHY)	+= phy-rockchip-inno-dsidphy.o
45718-obj-$(CONFIG_PHY_ROCKCHIP_INNO_HDMI)	+= phy-rockchip-inno-hdmi.o
45719+
45720 obj-$(CONFIG_PHY_ROCKCHIP_INNO_USB2)	+= phy-rockchip-inno-usb2.o
45721 obj-$(CONFIG_PHY_ROCKCHIP_PCIE)		+= phy-rockchip-pcie.o
45722 obj-$(CONFIG_PHY_ROCKCHIP_TYPEC)	+= phy-rockchip-typec.o
45723diff --git a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
45724index 8af8c6c5c..92462fee9 100644
45725--- a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
45726+++ b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
45727@@ -83,9 +83,31 @@
45728 #define DATA_LANE_0_SKEW_PHASE_MASK		GENMASK(2, 0)
45729 #define DATA_LANE_0_SKEW_PHASE(x)		UPDATE(x, 2, 0)
45730 /* Analog Register Part: reg08 */
45731+#define PRE_EMPHASIS_ENABLE_MASK		BIT(7)
45732+#define PRE_EMPHASIS_ENABLE			BIT(7)
45733+#define PRE_EMPHASIS_DISABLE			0
45734+#define PLL_POST_DIV_ENABLE_MASK		BIT(5)
45735+#define PLL_POST_DIV_ENABLE			BIT(5)
45736+#define PLL_POST_DIV_DISABLE			0
45737+#define DATA_LANE_VOD_RANGE_SET_MASK		GENMASK(3, 0)
45738+#define DATA_LANE_VOD_RANGE_SET(x)		UPDATE(x, 3, 0)
45739 #define SAMPLE_CLOCK_DIRECTION_MASK		BIT(4)
45740 #define SAMPLE_CLOCK_DIRECTION_REVERSE		BIT(4)
45741 #define SAMPLE_CLOCK_DIRECTION_FORWARD		0
45742+#define LOWFRE_EN_MASK                          BIT(5)
45743+#define PLL_OUTPUT_FREQUENCY_DIV_BY_1           0
45744+#define PLL_OUTPUT_FREQUENCY_DIV_BY_2           1
45745+/* Analog Register Part: reg1e */
45746+#define PLL_MODE_SEL_MASK			GENMASK(6, 5)
45747+#define PLL_MODE_SEL_LVDS_MODE			0
45748+#define PLL_MODE_SEL_MIPI_MODE			BIT(5)
45749+/* Analog Register Part: reg0b */
45750+#define CLOCK_LANE_VOD_RANGE_SET_MASK	GENMASK(3, 0)
45751+#define CLOCK_LANE_VOD_RANGE_SET(x)	UPDATE(x, 3, 0)
45752+#define VOD_MIN_RANGE			0x1
45753+#define VOD_MID_RANGE			0x3
45754+#define VOD_BIG_RANGE			0x7
45755+#define VOD_MAX_RANGE			0xf
45756 /* Digital Register Part: reg00 */
45757 #define REG_DIG_RSTN_MASK			BIT(0)
45758 #define REG_DIG_RSTN_NORMAL			BIT(0)
45759@@ -101,20 +123,22 @@
45760 #define T_LPX_CNT_MASK				GENMASK(5, 0)
45761 #define T_LPX_CNT(x)				UPDATE(x, 5, 0)
45762 /* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg06 */
45763+#define T_HS_ZERO_CNT_HI_MASK			BIT(7)
45764+#define T_HS_ZERO_CNT_HI(x)			UPDATE(x, 7, 7)
45765 #define T_HS_PREPARE_CNT_MASK			GENMASK(6, 0)
45766 #define T_HS_PREPARE_CNT(x)			UPDATE(x, 6, 0)
45767 /* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg07 */
45768-#define T_HS_ZERO_CNT_MASK			GENMASK(5, 0)
45769-#define T_HS_ZERO_CNT(x)			UPDATE(x, 5, 0)
45770+#define T_HS_ZERO_CNT_LO_MASK			GENMASK(5, 0)
45771+#define T_HS_ZERO_CNT_LO(x)			UPDATE(x, 5, 0)
45772 /* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg08 */
45773 #define T_HS_TRAIL_CNT_MASK			GENMASK(6, 0)
45774 #define T_HS_TRAIL_CNT(x)			UPDATE(x, 6, 0)
45775 /* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg09 */
45776-#define T_HS_EXIT_CNT_MASK			GENMASK(4, 0)
45777-#define T_HS_EXIT_CNT(x)			UPDATE(x, 4, 0)
45778+#define T_HS_EXIT_CNT_LO_MASK			GENMASK(4, 0)
45779+#define T_HS_EXIT_CNT_LO(x)			UPDATE(x, 4, 0)
45780 /* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg0a */
45781-#define T_CLK_POST_CNT_MASK			GENMASK(3, 0)
45782-#define T_CLK_POST_CNT(x)			UPDATE(x, 3, 0)
45783+#define T_CLK_POST_CNT_LO_MASK			GENMASK(3, 0)
45784+#define T_CLK_POST_CNT_LO(x)			UPDATE(x, 3, 0)
45785 /* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg0c */
45786 #define LPDT_TX_PPI_SYNC_MASK			BIT(2)
45787 #define LPDT_TX_PPI_SYNC_ENABLE			BIT(2)
45788@@ -128,9 +152,13 @@
45789 #define T_CLK_PRE_CNT_MASK			GENMASK(3, 0)
45790 #define T_CLK_PRE_CNT(x)			UPDATE(x, 3, 0)
45791 /* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg10 */
45792+#define T_CLK_POST_HI_MASK			GENMASK(7, 6)
45793+#define T_CLK_POST_HI(x)			UPDATE(x, 7, 6)
45794 #define T_TA_GO_CNT_MASK			GENMASK(5, 0)
45795 #define T_TA_GO_CNT(x)				UPDATE(x, 5, 0)
45796 /* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg11 */
45797+#define T_HS_EXIT_CNT_HI_MASK			BIT(6)
45798+#define T_HS_EXIT_CNT_HI(x)			UPDATE(x, 6, 6)
45799 #define T_TA_SURE_CNT_MASK			GENMASK(5, 0)
45800 #define T_TA_SURE_CNT(x)			UPDATE(x, 5, 0)
45801 /* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg12 */
45802@@ -168,6 +196,20 @@
45803 #define DSI_PHY_STATUS		0xb0
45804 #define PHY_LOCK		BIT(0)
45805 
45806+enum phy_max_rate {
45807+	MAX_1GHZ,
45808+	MAX_2_5GHZ,
45809+};
45810+
45811+struct inno_mipi_dphy_timing {
45812+	unsigned int max_lane_mbps;
45813+	u8 lpx;
45814+	u8 hs_prepare;
45815+	u8 clk_lane_hs_zero;
45816+	u8 data_lane_hs_zero;
45817+	u8 hs_trail;
45818+};
45819+
45820 struct inno_dsidphy {
45821 	struct device *dev;
45822 	struct clk *ref_clk;
45823@@ -176,8 +218,9 @@ struct inno_dsidphy {
45824 	void __iomem *phy_base;
45825 	void __iomem *host_base;
45826 	struct reset_control *rst;
45827-	enum phy_mode mode;
45828 	struct phy_configure_opts_mipi_dphy dphy_cfg;
45829+	unsigned int lanes;
45830+	const struct inno_dsidphy_plat_data *pdata;
45831 
45832 	struct clk *pll_clk;
45833 	struct {
45834@@ -188,6 +231,12 @@ struct inno_dsidphy {
45835 	} pll;
45836 };
45837 
45838+struct inno_dsidphy_plat_data {
45839+	const struct inno_mipi_dphy_timing *inno_mipi_dphy_timing_table;
45840+	const unsigned int num_timings;
45841+	enum phy_max_rate max_rate;
45842+};
45843+
45844 enum {
45845 	REGISTER_PART_ANALOG,
45846 	REGISTER_PART_DIGITAL,
45847@@ -199,6 +248,44 @@ enum {
45848 	REGISTER_PART_LVDS,
45849 };
45850 
45851+static const
45852+struct inno_mipi_dphy_timing inno_mipi_dphy_timing_table_max_1GHz[] = {
45853+	{ 110, 0x0, 0x20, 0x16, 0x02, 0x22},
45854+	{ 150, 0x0, 0x06, 0x16, 0x03, 0x45},
45855+	{ 200, 0x0, 0x18, 0x17, 0x04, 0x0b},
45856+	{ 250, 0x0, 0x05, 0x17, 0x05, 0x16},
45857+	{ 300, 0x0, 0x51, 0x18, 0x06, 0x2c},
45858+	{ 400, 0x0, 0x64, 0x19, 0x07, 0x33},
45859+	{ 500, 0x0, 0x20, 0x1b, 0x07, 0x4e},
45860+	{ 600, 0x0, 0x6a, 0x1d, 0x08, 0x3a},
45861+	{ 700, 0x0, 0x3e, 0x1e, 0x08, 0x6a},
45862+	{ 800, 0x0, 0x21, 0x1f, 0x09, 0x29},
45863+	{1000, 0x0, 0x09, 0x20, 0x09, 0x27},
45864+};
45865+
45866+static const
45867+struct inno_mipi_dphy_timing inno_mipi_dphy_timing_table_max_2_5GHz[] = {
45868+	{ 110, 0x02, 0x7f, 0x16, 0x02, 0x02},
45869+	{ 150, 0x02, 0x7f, 0x16, 0x03, 0x02},
45870+	{ 200, 0x02, 0x7f, 0x17, 0x04, 0x02},
45871+	{ 250, 0x02, 0x7f, 0x17, 0x05, 0x04},
45872+	{ 300, 0x02, 0x7f, 0x18, 0x06, 0x04},
45873+	{ 400, 0x03, 0x7e, 0x19, 0x07, 0x04},
45874+	{ 500, 0x03, 0x7c, 0x1b, 0x07, 0x08},
45875+	{ 600, 0x03, 0x70, 0x1d, 0x08, 0x10},
45876+	{ 700, 0x05, 0x40, 0x1e, 0x08, 0x30},
45877+	{ 800, 0x05, 0x02, 0x1f, 0x09, 0x30},
45878+	{1000, 0x05, 0x08, 0x20, 0x09, 0x30},
45879+	{1200, 0x06, 0x03, 0x32, 0x14, 0x0f},
45880+	{1400, 0x09, 0x03, 0x32, 0x14, 0x0f},
45881+	{1600, 0x0d, 0x42, 0x36, 0x0e, 0x0f},
45882+	{1800, 0x0e, 0x47, 0x7a, 0x0e, 0x0f},
45883+	{2000, 0x11, 0x64, 0x7a, 0x0e, 0x0b},
45884+	{2200, 0x13, 0x64, 0x7e, 0x15, 0x0b},
45885+	{2400, 0x13, 0x33, 0x7f, 0x15, 0x6a},
45886+	{2500, 0x15, 0x54, 0x7f, 0x15, 0x6a},
45887+};
45888+
45889 static inline struct inno_dsidphy *hw_to_inno(struct clk_hw *hw)
45890 {
45891 	return container_of(hw, struct inno_dsidphy, pll.hw);
45892@@ -216,6 +303,17 @@ static void phy_update_bits(struct inno_dsidphy *inno,
45893 	writel(tmp, inno->phy_base + reg);
45894 }
45895 
45896+static void host_update_bits(struct inno_dsidphy *inno,
45897+			     u32 reg, u32 mask, u32 val)
45898+{
45899+	unsigned int tmp, orig;
45900+
45901+	orig = readl(inno->host_base + reg);
45902+	tmp = orig & ~mask;
45903+	tmp |= val & mask;
45904+	writel(tmp, inno->host_base + reg);
45905+}
45906+
45907 static unsigned long inno_dsidphy_pll_calc_rate(struct inno_dsidphy *inno,
45908 						unsigned long rate)
45909 {
45910@@ -286,39 +384,48 @@ static unsigned long inno_dsidphy_pll_calc_rate(struct inno_dsidphy *inno,
45911 	return best_freq;
45912 }
45913 
45914-static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
45915+static const struct inno_mipi_dphy_timing *
45916+inno_mipi_dphy_get_timing(struct inno_dsidphy *inno)
45917 {
45918-	struct phy_configure_opts_mipi_dphy *cfg = &inno->dphy_cfg;
45919-	const struct {
45920-		unsigned long rate;
45921-		u8 hs_prepare;
45922-		u8 clk_lane_hs_zero;
45923-		u8 data_lane_hs_zero;
45924-		u8 hs_trail;
45925-	} timings[] = {
45926-		{ 110000000, 0x20, 0x16, 0x02, 0x22},
45927-		{ 150000000, 0x06, 0x16, 0x03, 0x45},
45928-		{ 200000000, 0x18, 0x17, 0x04, 0x0b},
45929-		{ 250000000, 0x05, 0x17, 0x05, 0x16},
45930-		{ 300000000, 0x51, 0x18, 0x06, 0x2c},
45931-		{ 400000000, 0x64, 0x19, 0x07, 0x33},
45932-		{ 500000000, 0x20, 0x1b, 0x07, 0x4e},
45933-		{ 600000000, 0x6a, 0x1d, 0x08, 0x3a},
45934-		{ 700000000, 0x3e, 0x1e, 0x08, 0x6a},
45935-		{ 800000000, 0x21, 0x1f, 0x09, 0x29},
45936-		{1000000000, 0x09, 0x20, 0x09, 0x27},
45937-	};
45938-	u32 t_txbyteclkhs, t_txclkesc;
45939-	u32 txbyteclkhs, txclkesc, esc_clk_div;
45940-	u32 hs_exit, clk_post, clk_pre, wakeup, lpx, ta_go, ta_sure, ta_wait;
45941-	u32 hs_prepare, hs_trail, hs_zero, clk_lane_hs_zero, data_lane_hs_zero;
45942+	const struct inno_mipi_dphy_timing *timings;
45943+	unsigned int num_timings;
45944+	unsigned int lane_mbps = inno->pll.rate / USEC_PER_SEC;
45945 	unsigned int i;
45946 
45947-	inno_dsidphy_pll_calc_rate(inno, cfg->hs_clk_rate);
45948+	timings = inno->pdata->inno_mipi_dphy_timing_table;
45949+	num_timings = inno->pdata->num_timings;
45950 
45951-	/* Select MIPI mode */
45952-	phy_update_bits(inno, REGISTER_PART_LVDS, 0x03,
45953-			MODE_ENABLE_MASK, MIPI_MODE_ENABLE);
45954+	for (i = 0; i < num_timings; i++)
45955+		if (lane_mbps <= timings[i].max_lane_mbps)
45956+			break;
45957+
45958+	if (i == num_timings)
45959+		--i;
45960+
45961+	return &timings[i];
45962+}
45963+
45964+static void inno_mipi_dphy_max_2_5GHz_pll_enable(struct inno_dsidphy *inno)
45965+{
45966+
45967+	phy_update_bits(inno, REGISTER_PART_ANALOG, 0x03,
45968+			REG_PREDIV_MASK, REG_PREDIV(inno->pll.prediv));
45969+	phy_update_bits(inno, REGISTER_PART_ANALOG, 0x03,
45970+			REG_FBDIV_HI_MASK, REG_FBDIV_HI(inno->pll.fbdiv));
45971+	phy_update_bits(inno, REGISTER_PART_ANALOG, 0x04,
45972+			REG_FBDIV_LO_MASK, REG_FBDIV_LO(inno->pll.fbdiv));
45973+	phy_update_bits(inno, REGISTER_PART_ANALOG, 0x08,
45974+			PLL_POST_DIV_ENABLE_MASK, PLL_POST_DIV_ENABLE);
45975+	phy_update_bits(inno, REGISTER_PART_ANALOG, 0x0b,
45976+			CLOCK_LANE_VOD_RANGE_SET_MASK,
45977+			CLOCK_LANE_VOD_RANGE_SET(VOD_MAX_RANGE));
45978+	phy_update_bits(inno, REGISTER_PART_ANALOG, 0x01,
45979+			 REG_LDOPD_MASK | REG_PLLPD_MASK,
45980+			 REG_LDOPD_POWER_ON | REG_PLLPD_POWER_ON);
45981+}
45982+
45983+static void inno_mipi_dphy_max_1GHz_pll_enable(struct inno_dsidphy *inno)
45984+{
45985 	/* Configure PLL */
45986 	phy_update_bits(inno, REGISTER_PART_ANALOG, 0x03,
45987 			REG_PREDIV_MASK, REG_PREDIV(inno->pll.prediv));
45988@@ -330,6 +437,10 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
45989 	phy_update_bits(inno, REGISTER_PART_ANALOG, 0x01,
45990 			REG_LDOPD_MASK | REG_PLLPD_MASK,
45991 			REG_LDOPD_POWER_ON | REG_PLLPD_POWER_ON);
45992+}
45993+
45994+static void inno_mipi_dphy_reset(struct inno_dsidphy *inno)
45995+{
45996 	/* Reset analog */
45997 	phy_update_bits(inno, REGISTER_PART_ANALOG, 0x01,
45998 			REG_SYNCRST_MASK, REG_SYNCRST_RESET);
45999@@ -342,6 +453,17 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
46000 	udelay(1);
46001 	phy_update_bits(inno, REGISTER_PART_DIGITAL, 0x00,
46002 			REG_DIG_RSTN_MASK, REG_DIG_RSTN_NORMAL);
46003+}
46004+
46005+static void inno_mipi_dphy_timing_init(struct inno_dsidphy *inno)
46006+{
46007+	struct phy_configure_opts_mipi_dphy *cfg = &inno->dphy_cfg;
46008+	u32 t_txbyteclkhs, t_txclkesc;
46009+	u32 txbyteclkhs, txclkesc, esc_clk_div;
46010+	u32 hs_exit, clk_post, clk_pre, wakeup, lpx, ta_go, ta_sure, ta_wait;
46011+	u32 hs_prepare, hs_trail, hs_zero, clk_lane_hs_zero, data_lane_hs_zero;
46012+	const struct inno_mipi_dphy_timing *timing;
46013+	unsigned int i;
46014 
46015 	txbyteclkhs = inno->pll.rate / 8;
46016 	t_txbyteclkhs = div_u64(PSEC_PER_SEC, txbyteclkhs);
46017@@ -365,15 +487,6 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
46018 	 * Tclk-pre = Tpin_txbyteclkhs * value
46019 	 */
46020 	clk_pre = DIV_ROUND_UP(cfg->clk_pre, t_txbyteclkhs);
46021-
46022-	/*
46023-	 * The value of counter for HS Tlpx Time
46024-	 * Tlpx = Tpin_txbyteclkhs * (2 + value)
46025-	 */
46026-	lpx = DIV_ROUND_UP(cfg->lpx, t_txbyteclkhs);
46027-	if (lpx >= 2)
46028-		lpx -= 2;
46029-
46030 	/*
46031 	 * The value of counter for HS Tta-go
46032 	 * Tta-go for turnaround
46033@@ -393,17 +506,22 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
46034 	 */
46035 	ta_wait = DIV_ROUND_UP(cfg->ta_get, t_txclkesc);
46036 
46037-	for (i = 0; i < ARRAY_SIZE(timings); i++)
46038-		if (inno->pll.rate <= timings[i].rate)
46039-			break;
46040-
46041-	if (i == ARRAY_SIZE(timings))
46042-		--i;
46043-
46044-	hs_prepare = timings[i].hs_prepare;
46045-	hs_trail = timings[i].hs_trail;
46046-	clk_lane_hs_zero = timings[i].clk_lane_hs_zero;
46047-	data_lane_hs_zero = timings[i].data_lane_hs_zero;
46048+	timing = inno_mipi_dphy_get_timing(inno);
46049+	/*
46050+	 * The value of counter for HS Tlpx Time
46051+	 * Tlpx = Tpin_txbyteclkhs * (2 + value)
46052+	 */
46053+	if (inno->pdata->max_rate == MAX_1GHZ) {
46054+		lpx = DIV_ROUND_UP(cfg->lpx, t_txbyteclkhs);
46055+		if (lpx >= 2)
46056+			lpx -= 2;
46057+	} else
46058+		lpx = timing->lpx;
46059+
46060+	hs_prepare = timing->hs_prepare;
46061+	hs_trail = timing->hs_trail;
46062+	clk_lane_hs_zero = timing->clk_lane_hs_zero;
46063+	data_lane_hs_zero = timing->data_lane_hs_zero;
46064 	wakeup = 0x3ff;
46065 
46066 	for (i = REGISTER_PART_CLOCK_LANE; i <= REGISTER_PART_DATA3_LANE; i++) {
46067@@ -416,14 +534,29 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
46068 				T_LPX_CNT(lpx));
46069 		phy_update_bits(inno, i, 0x06, T_HS_PREPARE_CNT_MASK,
46070 				T_HS_PREPARE_CNT(hs_prepare));
46071-		phy_update_bits(inno, i, 0x07, T_HS_ZERO_CNT_MASK,
46072-				T_HS_ZERO_CNT(hs_zero));
46073+
46074+		if (inno->pdata->max_rate == MAX_2_5GHZ)
46075+			phy_update_bits(inno, i, 0x06, T_HS_ZERO_CNT_HI_MASK,
46076+					T_HS_ZERO_CNT_HI(hs_zero >> 6));
46077+
46078+		phy_update_bits(inno, i, 0x07, T_HS_ZERO_CNT_LO_MASK,
46079+				T_HS_ZERO_CNT_LO(hs_zero));
46080 		phy_update_bits(inno, i, 0x08, T_HS_TRAIL_CNT_MASK,
46081 				T_HS_TRAIL_CNT(hs_trail));
46082-		phy_update_bits(inno, i, 0x09, T_HS_EXIT_CNT_MASK,
46083-				T_HS_EXIT_CNT(hs_exit));
46084-		phy_update_bits(inno, i, 0x0a, T_CLK_POST_CNT_MASK,
46085-				T_CLK_POST_CNT(clk_post));
46086+
46087+		if (inno->pdata->max_rate == MAX_2_5GHZ)
46088+			phy_update_bits(inno, i, 0x11, T_HS_EXIT_CNT_HI_MASK,
46089+					T_HS_EXIT_CNT_HI(hs_exit >> 5));
46090+
46091+		phy_update_bits(inno, i, 0x09, T_HS_EXIT_CNT_LO_MASK,
46092+				T_HS_EXIT_CNT_LO(hs_exit));
46093+
46094+		if (inno->pdata->max_rate == MAX_2_5GHZ)
46095+			phy_update_bits(inno, i, 0x10, T_CLK_POST_HI_MASK,
46096+					T_CLK_POST_HI(clk_post >> 4));
46097+
46098+		phy_update_bits(inno, i, 0x0a, T_CLK_POST_CNT_LO_MASK,
46099+				T_CLK_POST_CNT_LO(clk_post));
46100 		phy_update_bits(inno, i, 0x0e, T_CLK_PRE_CNT_MASK,
46101 				T_CLK_PRE_CNT(clk_pre));
46102 		phy_update_bits(inno, i, 0x0c, T_WAKEUP_CNT_HI_MASK,
46103@@ -437,11 +570,46 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
46104 		phy_update_bits(inno, i, 0x12, T_TA_WAIT_CNT_MASK,
46105 				T_TA_WAIT_CNT(ta_wait));
46106 	}
46107+}
46108 
46109-	/* Enable all lanes on analog part */
46110-	phy_update_bits(inno, REGISTER_PART_ANALOG, 0x00,
46111-			LANE_EN_MASK, LANE_EN_CK | LANE_EN_3 | LANE_EN_2 |
46112-			LANE_EN_1 | LANE_EN_0);
46113+static void inno_mipi_dphy_lane_enable(struct inno_dsidphy *inno)
46114+{
46115+	u8 val = LANE_EN_CK;
46116+
46117+	switch (inno->lanes) {
46118+	case 1:
46119+		val |= LANE_EN_0;
46120+		break;
46121+	case 2:
46122+		val |= LANE_EN_1 | LANE_EN_0;
46123+		break;
46124+	case 3:
46125+		val |= LANE_EN_2 | LANE_EN_1 | LANE_EN_0;
46126+		break;
46127+	case 4:
46128+	default:
46129+		val |= LANE_EN_3 | LANE_EN_2 | LANE_EN_1 | LANE_EN_0;
46130+		break;
46131+	}
46132+
46133+	phy_update_bits(inno, REGISTER_PART_ANALOG, 0x00, LANE_EN_MASK, val);
46134+}
46135+
46136+static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
46137+{
46138+	/* Select MIPI mode */
46139+	phy_update_bits(inno, REGISTER_PART_LVDS, 0x03,
46140+			MODE_ENABLE_MASK, MIPI_MODE_ENABLE);
46141+
46142+	if (inno->pdata->max_rate == MAX_2_5GHZ)
46143+		inno_mipi_dphy_max_2_5GHz_pll_enable(inno);
46144+	else
46145+		inno_mipi_dphy_max_1GHz_pll_enable(inno);
46146+
46147+	inno_mipi_dphy_reset(inno);
46148+	inno_mipi_dphy_timing_init(inno);
46149+	inno_mipi_dphy_lane_enable(inno);
46150+	inno_mipi_dphy_lane_enable(inno);
46151 }
46152 
46153 static void inno_dsidphy_lvds_mode_enable(struct inno_dsidphy *inno)
46154@@ -451,8 +619,9 @@ static void inno_dsidphy_lvds_mode_enable(struct inno_dsidphy *inno)
46155 
46156 	/* Sample clock reverse direction */
46157 	phy_update_bits(inno, REGISTER_PART_ANALOG, 0x08,
46158-			SAMPLE_CLOCK_DIRECTION_MASK,
46159-			SAMPLE_CLOCK_DIRECTION_REVERSE);
46160+			SAMPLE_CLOCK_DIRECTION_MASK | LOWFRE_EN_MASK,
46161+			SAMPLE_CLOCK_DIRECTION_REVERSE |
46162+			PLL_OUTPUT_FREQUENCY_DIV_BY_1);
46163 
46164 	/* Select LVDS mode */
46165 	phy_update_bits(inno, REGISTER_PART_LVDS, 0x03,
46166@@ -472,6 +641,10 @@ static void inno_dsidphy_lvds_mode_enable(struct inno_dsidphy *inno)
46167 
46168 	msleep(20);
46169 
46170+	/* Select PLL mode */
46171+	phy_update_bits(inno, REGISTER_PART_ANALOG, 0x1e,
46172+			PLL_MODE_SEL_MASK, PLL_MODE_SEL_LVDS_MODE);
46173+
46174 	/* Reset LVDS digital logic */
46175 	phy_update_bits(inno, REGISTER_PART_LVDS, 0x00,
46176 			LVDS_DIGITAL_INTERNAL_RESET_MASK,
46177@@ -491,9 +664,36 @@ static void inno_dsidphy_lvds_mode_enable(struct inno_dsidphy *inno)
46178 			LVDS_DATA_LANE2_EN | LVDS_DATA_LANE3_EN);
46179 }
46180 
46181+static void inno_dsidphy_phy_ttl_mode_enable(struct inno_dsidphy *inno)
46182+{
46183+	/* Select TTL mode */
46184+	phy_update_bits(inno, REGISTER_PART_LVDS, 0x03,
46185+			MODE_ENABLE_MASK, TTL_MODE_ENABLE);
46186+	/* Reset digital logic */
46187+	phy_update_bits(inno, REGISTER_PART_LVDS, 0x00,
46188+			LVDS_DIGITAL_INTERNAL_RESET_MASK,
46189+			LVDS_DIGITAL_INTERNAL_RESET_ENABLE);
46190+	udelay(1);
46191+	phy_update_bits(inno, REGISTER_PART_LVDS, 0x00,
46192+			LVDS_DIGITAL_INTERNAL_RESET_MASK,
46193+			LVDS_DIGITAL_INTERNAL_RESET_DISABLE);
46194+	/* Enable digital logic */
46195+	phy_update_bits(inno, REGISTER_PART_LVDS, 0x01,
46196+			LVDS_DIGITAL_INTERNAL_ENABLE_MASK,
46197+			LVDS_DIGITAL_INTERNAL_ENABLE);
46198+	/* Enable analog driver */
46199+	phy_update_bits(inno, REGISTER_PART_LVDS, 0x0b,
46200+			LVDS_LANE_EN_MASK, LVDS_CLK_LANE_EN |
46201+			LVDS_DATA_LANE0_EN | LVDS_DATA_LANE1_EN |
46202+			LVDS_DATA_LANE2_EN | LVDS_DATA_LANE3_EN);
46203+	/* Enable for clk lane in TTL mode */
46204+	host_update_bits(inno, DSI_PHY_RSTZ, PHY_ENABLECLK, PHY_ENABLECLK);
46205+}
46206+
46207 static int inno_dsidphy_power_on(struct phy *phy)
46208 {
46209 	struct inno_dsidphy *inno = phy_get_drvdata(phy);
46210+	enum phy_mode mode = phy_get_mode(phy);
46211 
46212 	clk_prepare_enable(inno->pclk_phy);
46213 	clk_prepare_enable(inno->ref_clk);
46214@@ -506,7 +706,7 @@ static int inno_dsidphy_power_on(struct phy *phy)
46215 	phy_update_bits(inno, REGISTER_PART_ANALOG, 0x00,
46216 			POWER_WORK_MASK, POWER_WORK_ENABLE);
46217 
46218-	switch (inno->mode) {
46219+	switch (mode) {
46220 	case PHY_MODE_MIPI_DPHY:
46221 		inno_dsidphy_mipi_mode_enable(inno);
46222 		break;
46223@@ -514,7 +714,7 @@ static int inno_dsidphy_power_on(struct phy *phy)
46224 		inno_dsidphy_lvds_mode_enable(inno);
46225 		break;
46226 	default:
46227-		return -EINVAL;
46228+		inno_dsidphy_phy_ttl_mode_enable(inno);
46229 	}
46230 
46231 	return 0;
46232@@ -551,17 +751,6 @@ static int inno_dsidphy_power_off(struct phy *phy)
46233 static int inno_dsidphy_set_mode(struct phy *phy, enum phy_mode mode,
46234 				   int submode)
46235 {
46236-	struct inno_dsidphy *inno = phy_get_drvdata(phy);
46237-
46238-	switch (mode) {
46239-	case PHY_MODE_MIPI_DPHY:
46240-	case PHY_MODE_LVDS:
46241-		inno->mode = mode;
46242-		break;
46243-	default:
46244-		return -EINVAL;
46245-	}
46246-
46247 	return 0;
46248 }
46249 
46250@@ -569,9 +758,11 @@ static int inno_dsidphy_configure(struct phy *phy,
46251 				  union phy_configure_opts *opts)
46252 {
46253 	struct inno_dsidphy *inno = phy_get_drvdata(phy);
46254+	struct phy_configure_opts_mipi_dphy *cfg = &inno->dphy_cfg;
46255+	enum phy_mode mode = phy_get_mode(phy);
46256 	int ret;
46257 
46258-	if (inno->mode != PHY_MODE_MIPI_DPHY)
46259+	if (mode != PHY_MODE_MIPI_DPHY)
46260 		return -EINVAL;
46261 
46262 	ret = phy_mipi_dphy_config_validate(&opts->mipi_dphy);
46263@@ -580,6 +771,32 @@ static int inno_dsidphy_configure(struct phy *phy,
46264 
46265 	memcpy(&inno->dphy_cfg, &opts->mipi_dphy, sizeof(inno->dphy_cfg));
46266 
46267+	inno_dsidphy_pll_calc_rate(inno, cfg->hs_clk_rate);
46268+	cfg->hs_clk_rate = inno->pll.rate;
46269+	opts->mipi_dphy.hs_clk_rate = inno->pll.rate;
46270+
46271+	return 0;
46272+}
46273+
46274+static int inno_dsidphy_init(struct phy *phy)
46275+{
46276+	struct inno_dsidphy *inno = phy_get_drvdata(phy);
46277+
46278+	clk_prepare_enable(inno->pclk_phy);
46279+	clk_prepare_enable(inno->ref_clk);
46280+	pm_runtime_get_sync(inno->dev);
46281+
46282+	return 0;
46283+}
46284+
46285+static int inno_dsidphy_exit(struct phy *phy)
46286+{
46287+	struct inno_dsidphy *inno = phy_get_drvdata(phy);
46288+
46289+	pm_runtime_put(inno->dev);
46290+	clk_disable_unprepare(inno->ref_clk);
46291+	clk_disable_unprepare(inno->pclk_phy);
46292+
46293 	return 0;
46294 }
46295 
46296@@ -588,6 +805,8 @@ static const struct phy_ops inno_dsidphy_ops = {
46297 	.set_mode = inno_dsidphy_set_mode,
46298 	.power_on = inno_dsidphy_power_on,
46299 	.power_off = inno_dsidphy_power_off,
46300+	.init = inno_dsidphy_init,
46301+	.exit = inno_dsidphy_exit,
46302 	.owner = THIS_MODULE,
46303 };
46304 
46305@@ -597,6 +816,7 @@ static int inno_dsidphy_probe(struct platform_device *pdev)
46306 	struct inno_dsidphy *inno;
46307 	struct phy_provider *phy_provider;
46308 	struct phy *phy;
46309+	struct resource *res;
46310 	int ret;
46311 
46312 	inno = devm_kzalloc(dev, sizeof(*inno), GFP_KERNEL);
46313@@ -604,12 +824,23 @@ static int inno_dsidphy_probe(struct platform_device *pdev)
46314 		return -ENOMEM;
46315 
46316 	inno->dev = dev;
46317+	inno->pdata = of_device_get_match_data(inno->dev);
46318 	platform_set_drvdata(pdev, inno);
46319 
46320-	inno->phy_base = devm_platform_ioremap_resource(pdev, 0);
46321+	inno->phy_base = devm_platform_ioremap_resource_byname(pdev, "phy");
46322 	if (IS_ERR(inno->phy_base))
46323 		return PTR_ERR(inno->phy_base);
46324 
46325+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "host");
46326+	if (!res) {
46327+		dev_err(dev, "invalid host resource\n");
46328+		return -EINVAL;
46329+	}
46330+
46331+	inno->host_base = devm_ioremap(dev, res->start, resource_size(res));
46332+	if (IS_ERR(inno->host_base))
46333+		return PTR_ERR(inno->host_base);
46334+
46335 	inno->ref_clk = devm_clk_get(dev, "ref");
46336 	if (IS_ERR(inno->ref_clk)) {
46337 		ret = PTR_ERR(inno->ref_clk);
46338@@ -624,6 +855,13 @@ static int inno_dsidphy_probe(struct platform_device *pdev)
46339 		return ret;
46340 	}
46341 
46342+	inno->pclk_host = devm_clk_get(dev, "pclk_host");
46343+	if (IS_ERR(inno->pclk_host)) {
46344+		ret = PTR_ERR(inno->pclk_host);
46345+		dev_err(dev, "failed to get host pclk: %d\n", ret);
46346+		return ret;
46347+	}
46348+
46349 	inno->rst = devm_reset_control_get(dev, "apb");
46350 	if (IS_ERR(inno->rst)) {
46351 		ret = PTR_ERR(inno->rst);
46352@@ -638,6 +876,9 @@ static int inno_dsidphy_probe(struct platform_device *pdev)
46353 		return ret;
46354 	}
46355 
46356+	if (of_property_read_u32(dev->of_node, "inno,lanes", &inno->lanes))
46357+		inno->lanes = 4;
46358+
46359 	phy_set_drvdata(phy, inno);
46360 
46361 	phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
46362@@ -652,6 +893,18 @@ static int inno_dsidphy_probe(struct platform_device *pdev)
46363 	return 0;
46364 }
46365 
46366+static const struct inno_dsidphy_plat_data px30_plat_data = {
46367+	.inno_mipi_dphy_timing_table = inno_mipi_dphy_timing_table_max_1GHz,
46368+	.num_timings = ARRAY_SIZE(inno_mipi_dphy_timing_table_max_1GHz),
46369+	.max_rate = MAX_1GHZ,
46370+};
46371+
46372+static const struct inno_dsidphy_plat_data rk3568_plat_data = {
46373+	.inno_mipi_dphy_timing_table = inno_mipi_dphy_timing_table_max_2_5GHz,
46374+	.num_timings = ARRAY_SIZE(inno_mipi_dphy_timing_table_max_2_5GHz),
46375+	.max_rate = MAX_2_5GHZ,
46376+};
46377+
46378 static int inno_dsidphy_remove(struct platform_device *pdev)
46379 {
46380 	struct inno_dsidphy *inno = platform_get_drvdata(pdev);
46381@@ -662,9 +915,18 @@ static int inno_dsidphy_remove(struct platform_device *pdev)
46382 }
46383 
46384 static const struct of_device_id inno_dsidphy_of_match[] = {
46385-	{ .compatible = "rockchip,px30-dsi-dphy", },
46386-	{ .compatible = "rockchip,rk3128-dsi-dphy", },
46387-	{ .compatible = "rockchip,rk3368-dsi-dphy", },
46388+	{ .compatible = "rockchip,px30-dsi-dphy",
46389+	  .data = &px30_plat_data,
46390+	}, {
46391+	  .compatible = "rockchip,rk3128-dsi-dphy",
46392+	  .data = &px30_plat_data,
46393+	}, {
46394+	  .compatible = "rockchip,rk3368-dsi-dphy",
46395+	  .data = &px30_plat_data,
46396+	}, {
46397+	  .compatible = "rockchip,rk3568-dsi-dphy",
46398+	  .data = &rk3568_plat_data,
46399+	},
46400 	{}
46401 };
46402 MODULE_DEVICE_TABLE(of, inno_dsidphy_of_match);
46403diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
46404index cab6a94bf..6b7d0c24a 100644
46405--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
46406+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
46407@@ -22,15 +22,25 @@
46408 #include <linux/of_platform.h>
46409 #include <linux/phy/phy.h>
46410 #include <linux/platform_device.h>
46411+#include <linux/pm_runtime.h>
46412 #include <linux/power_supply.h>
46413 #include <linux/regmap.h>
46414+#include <linux/reset.h>
46415+#include <linux/rockchip/cpu.h>
46416 #include <linux/mfd/syscon.h>
46417 #include <linux/usb/of.h>
46418 #include <linux/usb/otg.h>
46419+#include <linux/usb/role.h>
46420+#include <linux/usb/typec_mux.h>
46421+#include <linux/wakelock.h>
46422 
46423 #define BIT_WRITEABLE_SHIFT	16
46424 #define SCHEDULE_DELAY		(60 * HZ)
46425-#define OTG_SCHEDULE_DELAY	(2 * HZ)
46426+#define OTG_SCHEDULE_DELAY	(1 * HZ)
46427+#define BYPASS_SCHEDULE_DELAY	(2 * HZ)
46428+#define FILTER_COUNTER		0xF4240
46429+
46430+struct rockchip_usb2phy;
46431 
46432 enum rockchip_usb2phy_port_id {
46433 	USB2PHY_PORT_OTG,
46434@@ -69,6 +79,7 @@ enum usb_chg_state {
46435 static const unsigned int rockchip_usb2phy_extcon_cable[] = {
46436 	EXTCON_USB,
46437 	EXTCON_USB_HOST,
46438+	EXTCON_USB_VBUS_EN,
46439 	EXTCON_CHG_USB_SDP,
46440 	EXTCON_CHG_USB_CDP,
46441 	EXTCON_CHG_USB_DCP,
46442@@ -95,7 +106,7 @@ struct usb2phy_reg {
46443  * @rdm_pdwn_en: open dm pull down resistor.
46444  * @vdm_src_en: open dm voltage source.
46445  * @vdp_src_en: open dp voltage source.
46446- * @opmode: utmi operational mode.
46447+ * @chg_mode: set phy in charge detection mode.
46448  */
46449 struct rockchip_chg_det_reg {
46450 	struct usb2phy_reg	cp_det;
46451@@ -107,49 +118,109 @@ struct rockchip_chg_det_reg {
46452 	struct usb2phy_reg	rdm_pdwn_en;
46453 	struct usb2phy_reg	vdm_src_en;
46454 	struct usb2phy_reg	vdp_src_en;
46455-	struct usb2phy_reg	opmode;
46456+	struct usb2phy_reg	chg_mode;
46457 };
46458 
46459 /**
46460  * struct rockchip_usb2phy_port_cfg - usb-phy port configuration.
46461  * @phy_sus: phy suspend register.
46462+ * @pipe_phystatus: select pipe phystatus from grf or phy.
46463  * @bvalid_det_en: vbus valid rise detection enable register.
46464  * @bvalid_det_st: vbus valid rise detection status register.
46465  * @bvalid_det_clr: vbus valid rise detection clear register.
46466+ * @bvalid_grf_con: vbus valid software control.
46467+ * @bvalid_grf_sel: vbus valid software control select.
46468+ * @bvalid_phy_con: vbus valid external select and enable.
46469+ * @bypass_dm_en: usb bypass uart DM enable register.
46470+ * @bypass_sel: usb bypass uart select register.
46471+ * @bypass_iomux: usb bypass uart GRF iomux register.
46472+ * @bypass_bc: bypass battery charging module.
46473+ * @bypass_otg: bypass otg module.
46474+ * @bypass_host: bypass host module.
46475+ * @disfall_en: host disconnect fall edge detection enable.
46476+ * @disfall_st: host disconnect fall edge detection state.
46477+ * @disfall_clr: host disconnect fall edge detection clear.
46478+ * @disrise_en: host disconnect rise edge detection enable.
46479+ * @disrise_st: host disconnect rise edge detection state.
46480+ * @disrise_clr: host disconnect rise edge detection clear.
46481  * @ls_det_en: linestate detection enable register.
46482  * @ls_det_st: linestate detection state register.
46483  * @ls_det_clr: linestate detection clear register.
46484+ * @iddig_output: iddig output from grf.
46485+ * @iddig_en: utmi iddig select between grf and phy,
46486+ *	      0: from phy; 1: from grf
46487+ * @idfall_det_en: id fall detection enable register.
46488+ * @idfall_det_st: id fall detection state register.
46489+ * @idfall_det_clr: id fall detection clear register.
46490+ * @idrise_det_en: id rise detection enable register.
46491+ * @idrise_det_st: id rise detection state register.
46492+ * @idrise_det_clr: id rise detection clear register.
46493  * @utmi_avalid: utmi vbus avalid status register.
46494  * @utmi_bvalid: utmi vbus bvalid status register.
46495+ * @utmi_iddig: otg port id pin status register.
46496  * @utmi_ls: utmi linestate state register.
46497  * @utmi_hstdet: utmi host disconnect register.
46498+ * @vbus_det_en: vbus detect function power down register.
46499  */
46500 struct rockchip_usb2phy_port_cfg {
46501 	struct usb2phy_reg	phy_sus;
46502+	struct usb2phy_reg	pipe_phystatus;
46503 	struct usb2phy_reg	bvalid_det_en;
46504 	struct usb2phy_reg	bvalid_det_st;
46505 	struct usb2phy_reg	bvalid_det_clr;
46506+	struct usb2phy_reg	bvalid_grf_con;
46507+	struct usb2phy_reg	bvalid_grf_sel;
46508+	struct usb2phy_reg	bvalid_phy_con;
46509+	struct usb2phy_reg	bypass_dm_en;
46510+	struct usb2phy_reg	bypass_sel;
46511+	struct usb2phy_reg	bypass_iomux;
46512+	struct usb2phy_reg	bypass_bc;
46513+	struct usb2phy_reg	bypass_otg;
46514+	struct usb2phy_reg	bypass_host;
46515+	struct usb2phy_reg	disfall_en;
46516+	struct usb2phy_reg	disfall_st;
46517+	struct usb2phy_reg	disfall_clr;
46518+	struct usb2phy_reg	disrise_en;
46519+	struct usb2phy_reg	disrise_st;
46520+	struct usb2phy_reg	disrise_clr;
46521 	struct usb2phy_reg	ls_det_en;
46522 	struct usb2phy_reg	ls_det_st;
46523 	struct usb2phy_reg	ls_det_clr;
46524+	struct usb2phy_reg	iddig_output;
46525+	struct usb2phy_reg	iddig_en;
46526+	struct usb2phy_reg	idfall_det_en;
46527+	struct usb2phy_reg	idfall_det_st;
46528+	struct usb2phy_reg	idfall_det_clr;
46529+	struct usb2phy_reg	idrise_det_en;
46530+	struct usb2phy_reg	idrise_det_st;
46531+	struct usb2phy_reg	idrise_det_clr;
46532 	struct usb2phy_reg	utmi_avalid;
46533 	struct usb2phy_reg	utmi_bvalid;
46534+	struct usb2phy_reg	utmi_iddig;
46535 	struct usb2phy_reg	utmi_ls;
46536 	struct usb2phy_reg	utmi_hstdet;
46537+	struct usb2phy_reg	vbus_det_en;
46538 };
46539 
46540 /**
46541  * struct rockchip_usb2phy_cfg - usb-phy configuration.
46542  * @reg: the address offset of grf for usb-phy config.
46543  * @num_ports: specify how many ports that the phy has.
46544+ * @phy_tuning: phy default parameters tuning.
46545+ * @vbus_detect: vbus voltage level detection function.
46546  * @clkout_ctl: keep on/turn off output clk of phy.
46547+ * @ls_filter_con: set linestate filter time.
46548  * @port_cfgs: usb-phy port configurations.
46549+ * @ls_filter_con: set linestate filter time.
46550  * @chg_det: charger detection registers.
46551  */
46552 struct rockchip_usb2phy_cfg {
46553 	unsigned int	reg;
46554 	unsigned int	num_ports;
46555+	int (*phy_tuning)(struct rockchip_usb2phy *rphy);
46556+	int (*vbus_detect)(struct rockchip_usb2phy *rphy, bool en);
46557 	struct usb2phy_reg	clkout_ctl;
46558+	struct usb2phy_reg	ls_filter_con;
46559 	const struct rockchip_usb2phy_port_cfg	port_cfgs[USB2PHY_NUM_PORTS];
46560 	const struct rockchip_chg_det_reg	chg_det;
46561 };
46562@@ -158,16 +229,32 @@ struct rockchip_usb2phy_cfg {
46563  * struct rockchip_usb2phy_port - usb-phy port data.
46564  * @phy: generic phy.
46565  * @port_id: flag for otg port or host port.
46566+ * @low_power_en: enable enter low power when suspend.
46567+ * @perip_connected: flag for periphyeral connect status.
46568+ * @prev_iddig: previous otg port id pin status.
46569+ * @sel_pipe_phystatus: select pipe phystatus from grf.
46570  * @suspended: phy suspended flag.
46571+ * @typec_vbus_det: Type-C otg vbus detect.
46572+ * @utmi_avalid: utmi avalid status usage flag.
46573+ *	true	- use avalid to get vbus status
46574+ *	false	- use bvalid to get vbus status
46575  * @vbus_attached: otg device vbus status.
46576+ * @vbus_always_on: otg vbus is always powered on.
46577+ * @vbus_enabled: vbus regulator status.
46578+ * @bypass_uart_en: usb bypass uart enable, passed from DT.
46579+ * @host_disconnect: usb host disconnect status.
46580  * @bvalid_irq: IRQ number assigned for vbus valid rise detection.
46581  * @ls_irq: IRQ number assigned for linestate detection.
46582+ * @id_irq: IRQ number assigned for id fall or rise detection.
46583  * @otg_mux_irq: IRQ number which multiplex otg-id/otg-bvalid/linestate
46584  *		 irqs to one irq in otg-port.
46585  * @mutex: for register updating in sm_work.
46586  * @chg_work: charge detect work.
46587+ * @bypass_uart_work: usb bypass uart work.
46588  * @otg_sm_work: OTG state machine work.
46589  * @sm_work: HOST state machine work.
46590+ * @vbus: vbus regulator supply on few rockchip boards.
46591+ * @sw: orientation switch, communicate with TCPM (Type-C Port Manager).
46592  * @port_cfg: port register configuration, assigned by driver data.
46593  * @event_nb: hold event notification callback.
46594  * @state: define OTG enumeration states before device reset.
46595@@ -176,17 +263,32 @@ struct rockchip_usb2phy_cfg {
46596 struct rockchip_usb2phy_port {
46597 	struct phy	*phy;
46598 	unsigned int	port_id;
46599+	bool		low_power_en;
46600+	bool		perip_connected;
46601+	bool		prev_iddig;
46602+	bool		sel_pipe_phystatus;
46603 	bool		suspended;
46604+	bool		typec_vbus_det;
46605+	bool		utmi_avalid;
46606 	bool		vbus_attached;
46607+	bool		vbus_always_on;
46608+	bool		vbus_enabled;
46609+	bool		bypass_uart_en;
46610+	bool		host_disconnect;
46611 	int		bvalid_irq;
46612 	int		ls_irq;
46613+	int             id_irq;
46614 	int		otg_mux_irq;
46615 	struct mutex	mutex;
46616+	struct		delayed_work bypass_uart_work;
46617 	struct		delayed_work chg_work;
46618 	struct		delayed_work otg_sm_work;
46619 	struct		delayed_work sm_work;
46620+	struct		regulator *vbus;
46621+	struct		typec_switch *sw;
46622 	const struct	rockchip_usb2phy_port_cfg *port_cfg;
46623 	struct notifier_block	event_nb;
46624+	struct wake_lock	wakelock;
46625 	enum usb_otg_state	state;
46626 	enum usb_dr_mode	mode;
46627 };
46628@@ -196,13 +298,23 @@ struct rockchip_usb2phy_port {
46629  * @dev: pointer to device.
46630  * @grf: General Register Files regmap.
46631  * @usbgrf: USB General Register Files regmap.
46632- * @clk: clock struct of phy input clk.
46633+ * @usbctrl_grf: USB Controller General Register Files regmap.
46634+ * *phy_base: the base address of USB PHY.
46635+ * @phy_reset: phy reset control.
46636+ * @clks: array of phy input clocks.
46637  * @clk480m: clock struct of phy output clk.
46638  * @clk480m_hw: clock struct of phy output clk management.
46639+ * @num_clks: number of phy input clocks.
46640  * @chg_state: states involved in USB charger detection.
46641  * @chg_type: USB charger types.
46642  * @dcd_retries: The retry count used to track Data contact
46643  *		 detection process.
46644+ * @primary_retries: The retry count used for charger
46645+ *		     detection primary phase.
46646+ * @phy_sus_cfg: Store the phy current suspend configuration.
46647+ * @edev_self: represent the source of extcon.
46648+ * @irq: IRQ number assigned for phy which combined irqs of
46649+ *	 otg port and host port.
46650  * @edev: extcon device for notification registration
46651  * @phy_cfg: phy register configuration, assigned by driver data.
46652  * @ports: phy port instance.
46653@@ -211,12 +323,20 @@ struct rockchip_usb2phy {
46654 	struct device	*dev;
46655 	struct regmap	*grf;
46656 	struct regmap	*usbgrf;
46657-	struct clk	*clk;
46658+	struct regmap	*usbctrl_grf;
46659+	void __iomem	*phy_base;
46660+	struct reset_control	*phy_reset;
46661+	struct clk_bulk_data	*clks;
46662 	struct clk	*clk480m;
46663 	struct clk_hw	clk480m_hw;
46664+	int		num_clks;
46665 	enum usb_chg_state	chg_state;
46666 	enum power_supply_type	chg_type;
46667 	u8			dcd_retries;
46668+	u8			primary_retries;
46669+	unsigned int		phy_sus_cfg;
46670+	bool			edev_self;
46671+	int			irq;
46672 	struct extcon_dev	*edev;
46673 	const struct rockchip_usb2phy_cfg	*phy_cfg;
46674 	struct rockchip_usb2phy_port	ports[USB2PHY_NUM_PORTS];
46675@@ -254,6 +374,53 @@ static inline bool property_enabled(struct regmap *base,
46676 	return tmp == reg->enable;
46677 }
46678 
46679+static inline void phy_clear_bits(void __iomem *reg, u32 bits)
46680+{
46681+	u32 tmp = readl(reg);
46682+
46683+	tmp &= ~bits;
46684+	writel(tmp, reg);
46685+}
46686+
46687+static inline void phy_set_bits(void __iomem *reg, u32 bits)
46688+{
46689+	u32 tmp = readl(reg);
46690+
46691+	tmp |= bits;
46692+	writel(tmp, reg);
46693+}
46694+
46695+static inline void phy_update_bits(void __iomem *reg, u32 mask, u32 val)
46696+{
46697+	u32 tmp = readl(reg);
46698+
46699+	tmp &= ~mask;
46700+	tmp |= val & mask;
46701+	writel(tmp, reg);
46702+}
46703+
46704+static int rockchip_usb2phy_reset(struct rockchip_usb2phy *rphy)
46705+{
46706+	int ret;
46707+
46708+	if (!rphy->phy_reset)
46709+		return 0;
46710+
46711+	ret = reset_control_assert(rphy->phy_reset);
46712+	if (ret)
46713+		return ret;
46714+
46715+	udelay(10);
46716+
46717+	ret = reset_control_deassert(rphy->phy_reset);
46718+	if (ret)
46719+		return ret;
46720+
46721+	usleep_range(100, 200);
46722+
46723+	return 0;
46724+}
46725+
46726 static int rockchip_usb2phy_clk480m_prepare(struct clk_hw *hw)
46727 {
46728 	struct rockchip_usb2phy *rphy =
46729@@ -319,7 +486,8 @@ static int
46730 rockchip_usb2phy_clk480m_register(struct rockchip_usb2phy *rphy)
46731 {
46732 	struct device_node *node = rphy->dev->of_node;
46733-	struct clk_init_data init;
46734+	struct clk_init_data init = {};
46735+	struct clk *refclk = of_clk_get_by_name(node, "phyclk");
46736 	const char *clk_name;
46737 	int ret;
46738 
46739@@ -330,8 +498,8 @@ rockchip_usb2phy_clk480m_register(struct rockchip_usb2phy *rphy)
46740 	/* optional override of the clockname */
46741 	of_property_read_string(node, "clock-output-names", &init.name);
46742 
46743-	if (rphy->clk) {
46744-		clk_name = __clk_get_name(rphy->clk);
46745+	if (!IS_ERR(refclk)) {
46746+		clk_name = __clk_get_name(refclk);
46747 		init.parent_names = &clk_name;
46748 		init.num_parents = 1;
46749 	} else {
46750@@ -393,6 +561,8 @@ static int rockchip_usb2phy_extcon_register(struct rockchip_usb2phy *rphy)
46751 			dev_err(rphy->dev, "failed to register extcon device\n");
46752 			return ret;
46753 		}
46754+
46755+		rphy->edev_self = true;
46756 	}
46757 
46758 	rphy->edev = edev;
46759@@ -400,47 +570,235 @@ static int rockchip_usb2phy_extcon_register(struct rockchip_usb2phy *rphy)
46760 	return 0;
46761 }
46762 
46763+/* The caller must hold rport->mutex lock */
46764+static int rockchip_usb2phy_enable_id_irq(struct rockchip_usb2phy *rphy,
46765+					  struct rockchip_usb2phy_port *rport,
46766+					  bool en)
46767+{
46768+	int ret;
46769+
46770+	ret = property_enable(rphy->grf, &rport->port_cfg->idfall_det_clr, true);
46771+	if (ret)
46772+		goto out;
46773+
46774+	ret = property_enable(rphy->grf, &rport->port_cfg->idfall_det_en, en);
46775+	if (ret)
46776+		goto out;
46777+
46778+	ret = property_enable(rphy->grf, &rport->port_cfg->idrise_det_clr, true);
46779+	if (ret)
46780+		goto out;
46781+
46782+	ret = property_enable(rphy->grf, &rport->port_cfg->idrise_det_en, en);
46783+out:
46784+	return ret;
46785+}
46786+
46787+/* The caller must hold rport->mutex lock */
46788+static int rockchip_usb2phy_enable_vbus_irq(struct rockchip_usb2phy *rphy,
46789+					    struct rockchip_usb2phy_port *rport,
46790+					    bool en)
46791+{
46792+	int ret;
46793+
46794+	ret = property_enable(rphy->grf, &rport->port_cfg->bvalid_det_clr, true);
46795+	if (ret)
46796+		goto out;
46797+
46798+	ret = property_enable(rphy->grf, &rport->port_cfg->bvalid_det_en, en);
46799+out:
46800+	return ret;
46801+}
46802+
46803+static int rockchip_usb2phy_enable_line_irq(struct rockchip_usb2phy *rphy,
46804+					    struct rockchip_usb2phy_port *rport,
46805+					    bool en)
46806+{
46807+	int ret;
46808+
46809+	ret = property_enable(rphy->grf, &rport->port_cfg->ls_det_clr, true);
46810+	if (ret)
46811+		goto out;
46812+
46813+	ret = property_enable(rphy->grf, &rport->port_cfg->ls_det_en, en);
46814+out:
46815+	return ret;
46816+}
46817+
46818+static int rockchip_usb2phy_enable_host_disc_irq(struct rockchip_usb2phy *rphy,
46819+						 struct rockchip_usb2phy_port *rport,
46820+						 bool en)
46821+{
46822+	int ret;
46823+
46824+	ret = property_enable(rphy->grf, &rport->port_cfg->disfall_clr, true);
46825+	if (ret)
46826+		goto out;
46827+
46828+	ret = property_enable(rphy->grf, &rport->port_cfg->disfall_en, en);
46829+	if (ret)
46830+		goto out;
46831+
46832+	ret = property_enable(rphy->grf, &rport->port_cfg->disrise_clr, true);
46833+	if (ret)
46834+		goto out;
46835+
46836+	ret = property_enable(rphy->grf, &rport->port_cfg->disrise_en, en);
46837+out:
46838+	return ret;
46839+}
46840+
46841+static int rockchip_usb_bypass_uart(struct rockchip_usb2phy_port *rport,
46842+				    bool en)
46843+{
46844+	struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
46845+	const struct usb2phy_reg *iomux = &rport->port_cfg->bypass_iomux;
46846+	struct regmap *base = get_reg_base(rphy);
46847+	int ret = 0;
46848+
46849+	mutex_lock(&rport->mutex);
46850+
46851+	if (en == property_enabled(base, &rport->port_cfg->bypass_sel)) {
46852+		dev_info(&rport->phy->dev,
46853+			 "bypass uart %s is already set\n", en ? "on" : "off");
46854+		goto unlock;
46855+	}
46856+
46857+	dev_info(&rport->phy->dev, "bypass uart %s\n", en ? "on" : "off");
46858+
46859+	if (en) {
46860+		/*
46861+		 * To use UART function:
46862+		 * 1. Put the USB PHY in suspend mode and opmode is normal;
46863+		 * 2. Set bypasssel to 1'b1 and bypassdmen to 1'b1;
46864+		 *
46865+		 * Note: Although the datasheet requires that put USB PHY
46866+		 * in non-driving mode to disable resistance when use USB
46867+		 * bypass UART function, but actually we find that if we
46868+		 * set phy in non-driving mode, it will cause UART to print
46869+		 * random codes. So just put USB PHY in normal mode.
46870+		 */
46871+		ret |= property_enable(base, &rport->port_cfg->bypass_sel,
46872+				       true);
46873+		ret |= property_enable(base, &rport->port_cfg->bypass_dm_en,
46874+				       true);
46875+
46876+		/* Some platforms required to set iomux of bypass uart */
46877+		if (iomux->offset)
46878+			ret |= property_enable(rphy->grf, iomux, true);
46879+	} else {
46880+		/* just disable bypass, and resume phy in phy power_on later */
46881+		ret |= property_enable(base, &rport->port_cfg->bypass_sel,
46882+				       false);
46883+		ret |= property_enable(base, &rport->port_cfg->bypass_dm_en,
46884+				       false);
46885+
46886+		/* Some platforms required to set iomux of bypass uart */
46887+		if (iomux->offset)
46888+			ret |= property_enable(rphy->grf, iomux, false);
46889+	}
46890+
46891+unlock:
46892+	mutex_unlock(&rport->mutex);
46893+
46894+	return ret;
46895+}
46896+
46897+static void rockchip_usb_bypass_uart_work(struct work_struct *work)
46898+{
46899+	struct rockchip_usb2phy_port *rport =
46900+		container_of(work, struct rockchip_usb2phy_port,
46901+			     bypass_uart_work.work);
46902+	struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
46903+	bool vbus, iddig;
46904+	int ret;
46905+
46906+	mutex_lock(&rport->mutex);
46907+
46908+	iddig = property_enabled(rphy->grf, &rport->port_cfg->utmi_iddig);
46909+
46910+	if (rport->utmi_avalid)
46911+		vbus = property_enabled(rphy->grf, &rport->port_cfg->utmi_avalid);
46912+	else
46913+		vbus = property_enabled(rphy->grf, &rport->port_cfg->utmi_bvalid);
46914+
46915+	mutex_unlock(&rport->mutex);
46916+
46917+	/*
46918+	 * If the vbus is low and iddig is high, it indicates that usb
46919+	 * otg is not working, then we can enable usb to bypass uart,
46920+	 * otherwise schedule the work until the conditions (vbus is low
46921+	 * and iddig is high) are matched.
46922+	 */
46923+	if (!vbus && iddig) {
46924+		ret = rockchip_usb_bypass_uart(rport, true);
46925+		if (ret)
46926+			dev_warn(&rport->phy->dev,
46927+				 "failed to enable bypass uart\n");
46928+	} else {
46929+		schedule_delayed_work(&rport->bypass_uart_work,
46930+				      BYPASS_SCHEDULE_DELAY);
46931+	}
46932+}
46933+
46934 static int rockchip_usb2phy_init(struct phy *phy)
46935 {
46936 	struct rockchip_usb2phy_port *rport = phy_get_drvdata(phy);
46937 	struct rockchip_usb2phy *rphy = dev_get_drvdata(phy->dev.parent);
46938 	int ret = 0;
46939+	unsigned int ul, ul_mask;
46940 
46941 	mutex_lock(&rport->mutex);
46942 
46943-	if (rport->port_id == USB2PHY_PORT_OTG) {
46944-		if (rport->mode != USB_DR_MODE_HOST &&
46945-		    rport->mode != USB_DR_MODE_UNKNOWN) {
46946-			/* clear bvalid status and enable bvalid detect irq */
46947-			ret = property_enable(rphy->grf,
46948-					      &rport->port_cfg->bvalid_det_clr,
46949-					      true);
46950-			if (ret)
46951+	if (rport->port_id == USB2PHY_PORT_OTG &&
46952+	    (rport->mode == USB_DR_MODE_PERIPHERAL ||
46953+	     rport->mode == USB_DR_MODE_OTG)) {
46954+		/* clear id status and enable id detect irq */
46955+		if (rport->id_irq > 0 || rport->otg_mux_irq > 0 ||
46956+		    rphy->irq > 0) {
46957+			ret = rockchip_usb2phy_enable_id_irq(rphy, rport,
46958+							     true);
46959+			if (ret) {
46960+				dev_err(rphy->dev,
46961+					"failed to enable id irq\n");
46962 				goto out;
46963+			}
46964+		}
46965 
46966-			ret = property_enable(rphy->grf,
46967-					      &rport->port_cfg->bvalid_det_en,
46968-					      true);
46969-			if (ret)
46970+		/* clear bvalid status and enable bvalid detect irq */
46971+		if ((rport->bvalid_irq > 0 || rport->otg_mux_irq > 0 ||
46972+		    rphy->irq > 0) && !rport->vbus_always_on) {
46973+			ret = rockchip_usb2phy_enable_vbus_irq(rphy, rport,
46974+							       true);
46975+			if (ret) {
46976+				dev_err(rphy->dev,
46977+					"failed to enable bvalid irq\n");
46978 				goto out;
46979-
46980+			}
46981 			schedule_delayed_work(&rport->otg_sm_work,
46982-					      OTG_SCHEDULE_DELAY * 3);
46983-		} else {
46984-			/* If OTG works in host only mode, do nothing. */
46985-			dev_dbg(&rport->phy->dev, "mode %d\n", rport->mode);
46986+					      rport->typec_vbus_det ? 0 : OTG_SCHEDULE_DELAY);
46987 		}
46988 	} else if (rport->port_id == USB2PHY_PORT_HOST) {
46989-		/* clear linestate and enable linestate detect irq */
46990-		ret = property_enable(rphy->grf,
46991-				      &rport->port_cfg->ls_det_clr, true);
46992-		if (ret)
46993-			goto out;
46994+		if (rport->port_cfg->disfall_en.offset) {
46995+			ret = regmap_read(rphy->grf, rport->port_cfg->utmi_ls.offset, &ul);
46996+			if (ret < 0)
46997+				goto out;
46998+			ul_mask = GENMASK(rport->port_cfg->utmi_ls.bitend,
46999+					  rport->port_cfg->utmi_ls.bitstart);
47000+			rport->host_disconnect = (ul & ul_mask) == 0 ? true : false;
47001+			ret = rockchip_usb2phy_enable_host_disc_irq(rphy, rport, true);
47002+			if (ret) {
47003+				dev_err(rphy->dev, "failed to enable disconnect irq\n");
47004+				goto out;
47005+			}
47006+		}
47007 
47008-		ret = property_enable(rphy->grf,
47009-				      &rport->port_cfg->ls_det_en, true);
47010-		if (ret)
47011+		/* clear linestate and enable linestate detect irq */
47012+		ret = rockchip_usb2phy_enable_line_irq(rphy, rport, true);
47013+		if (ret) {
47014+			dev_err(rphy->dev, "failed to enable linestate irq\n");
47015 			goto out;
47016+		}
47017 
47018 		schedule_delayed_work(&rport->sm_work, SCHEDULE_DELAY);
47019 	}
47020@@ -459,24 +817,60 @@ static int rockchip_usb2phy_power_on(struct phy *phy)
47021 
47022 	dev_dbg(&rport->phy->dev, "port power on\n");
47023 
47024-	if (!rport->suspended)
47025-		return 0;
47026+	if (rport->bypass_uart_en) {
47027+		ret = rockchip_usb_bypass_uart(rport, false);
47028+		if (ret) {
47029+			dev_warn(&rport->phy->dev,
47030+				 "failed to disable bypass uart\n");
47031+			goto exit;
47032+		}
47033+	}
47034+
47035+	mutex_lock(&rport->mutex);
47036+
47037+	if (!rport->suspended) {
47038+		ret = 0;
47039+		goto unlock;
47040+	}
47041 
47042 	ret = clk_prepare_enable(rphy->clk480m);
47043 	if (ret)
47044-		return ret;
47045+		goto unlock;
47046+
47047+	if (rport->sel_pipe_phystatus)
47048+		property_enable(rphy->usbctrl_grf,
47049+				&rport->port_cfg->pipe_phystatus, true);
47050 
47051 	ret = property_enable(base, &rport->port_cfg->phy_sus, false);
47052-	if (ret) {
47053-		clk_disable_unprepare(rphy->clk480m);
47054-		return ret;
47055-	}
47056+	if (ret)
47057+		goto unlock;
47058+
47059+	/*
47060+	 * For rk3588, it needs to reset phy when exit from
47061+	 * suspend mode with common_on_n 1'b1(aka REFCLK_LOGIC,
47062+	 * Bias, and PLL blocks are powered down) for lower
47063+	 * power consumption. If you don't want to reset phy,
47064+	 * please keep the common_on_n 1'b0 to set these blocks
47065+	 * remain powered.
47066+	 */
47067+	ret = rockchip_usb2phy_reset(rphy);
47068+	if (ret)
47069+		goto unlock;
47070 
47071 	/* waiting for the utmi_clk to become stable */
47072 	usleep_range(1500, 2000);
47073 
47074 	rport->suspended = false;
47075-	return 0;
47076+
47077+unlock:
47078+	mutex_unlock(&rport->mutex);
47079+
47080+	/* Enable bypass uart in the bypass_uart_work. */
47081+	if (rport->bypass_uart_en)
47082+		schedule_delayed_work(&rport->bypass_uart_work, 0);
47083+
47084+exit:
47085+	return ret;
47086 }
47087 
47088 static int rockchip_usb2phy_power_off(struct phy *phy)
47089@@ -488,103 +882,354 @@ static int rockchip_usb2phy_power_off(struct phy *phy)
47090 
47091 	dev_dbg(&rport->phy->dev, "port power off\n");
47092 
47093-	if (rport->suspended)
47094-		return 0;
47095+	mutex_lock(&rport->mutex);
47096+
47097+	if (rport->suspended) {
47098+		ret = 0;
47099+		goto unlock;
47100+	}
47101 
47102 	ret = property_enable(base, &rport->port_cfg->phy_sus, true);
47103 	if (ret)
47104-		return ret;
47105+		goto unlock;
47106 
47107 	rport->suspended = true;
47108 	clk_disable_unprepare(rphy->clk480m);
47109 
47110-	return 0;
47111+unlock:
47112+	mutex_unlock(&rport->mutex);
47113+
47114+	/* Enable bypass uart in the bypass_uart_work. */
47115+	if (rport->bypass_uart_en)
47116+		schedule_delayed_work(&rport->bypass_uart_work, 0);
47117+
47118+	return ret;
47119 }
47120 
47121 static int rockchip_usb2phy_exit(struct phy *phy)
47122 {
47123 	struct rockchip_usb2phy_port *rport = phy_get_drvdata(phy);
47124 
47125-	if (rport->port_id == USB2PHY_PORT_OTG &&
47126-	    rport->mode != USB_DR_MODE_HOST &&
47127-	    rport->mode != USB_DR_MODE_UNKNOWN) {
47128-		cancel_delayed_work_sync(&rport->otg_sm_work);
47129-		cancel_delayed_work_sync(&rport->chg_work);
47130-	} else if (rport->port_id == USB2PHY_PORT_HOST)
47131+	if (rport->port_id == USB2PHY_PORT_HOST)
47132 		cancel_delayed_work_sync(&rport->sm_work);
47133+	else if (rport->port_id == USB2PHY_PORT_OTG &&
47134+		 rport->otg_sm_work.work.func)
47135+		flush_delayed_work(&rport->otg_sm_work);
47136 
47137 	return 0;
47138 }
47139 
47140+static int rockchip_set_vbus_power(struct rockchip_usb2phy_port *rport,
47141+				   bool en)
47142+{
47143+	int ret = 0;
47144+
47145+	if (!rport->vbus)
47146+		return 0;
47147+
47148+	if (en && !rport->vbus_enabled) {
47149+		ret = regulator_enable(rport->vbus);
47150+		if (ret)
47151+			dev_err(&rport->phy->dev,
47152+				"Failed to enable VBUS supply\n");
47153+	} else if (!en && rport->vbus_enabled) {
47154+		ret = regulator_disable(rport->vbus);
47155+	}
47156+
47157+	if (ret == 0)
47158+		rport->vbus_enabled = en;
47159+
47160+	return ret;
47161+}
47162+
47163+static int rockchip_usb2phy_set_mode(struct phy *phy,
47164+				     enum phy_mode mode, int submode)
47165+{
47166+	struct rockchip_usb2phy_port *rport = phy_get_drvdata(phy);
47167+	struct rockchip_usb2phy *rphy = dev_get_drvdata(phy->dev.parent);
47168+	bool vbus_det_en;
47169+	int ret = 0;
47170+
47171+	if (rport->port_id != USB2PHY_PORT_OTG)
47172+		return ret;
47173+
47174+	switch (mode) {
47175+	case PHY_MODE_USB_OTG:
47176+		if (rphy->edev_self && submode) {
47177+			if (submode == USB_ROLE_HOST) {
47178+				extcon_set_state(rphy->edev, EXTCON_USB_HOST, true);
47179+				extcon_set_state(rphy->edev, EXTCON_USB, false);
47180+			} else if (submode == USB_ROLE_DEVICE) {
47181+				extcon_set_state(rphy->edev, EXTCON_USB_HOST, false);
47182+				extcon_set_state(rphy->edev, EXTCON_USB, true);
47183+			}
47184+
47185+			return ret;
47186+		}
47187+
47188+		/*
47189+		 * In case of using vbus to detect connect state by u2phy,
47190+		 * enable vbus detect on otg mode.
47191+		 */
47192+		fallthrough;
47193+	case PHY_MODE_USB_DEVICE:
47194+		/* Disable VBUS supply */
47195+		rockchip_set_vbus_power(rport, false);
47196+		extcon_set_state_sync(rphy->edev, EXTCON_USB_VBUS_EN, false);
47197+		/* For vbus always on, set EXTCON_USB to true. */
47198+		if (rport->vbus_always_on)
47199+			extcon_set_state(rphy->edev, EXTCON_USB, true);
47200+		rport->perip_connected = true;
47201+		vbus_det_en = true;
47202+		break;
47203+	case PHY_MODE_USB_HOST:
47204+		/* Enable VBUS supply */
47205+		ret = rockchip_set_vbus_power(rport, true);
47206+		if (ret) {
47207+			dev_err(&rport->phy->dev,
47208+				"Failed to set host mode\n");
47209+			return ret;
47210+		}
47211+
47212+		extcon_set_state_sync(rphy->edev, EXTCON_USB_VBUS_EN, true);
47213+		/* For vbus always on, deinit EXTCON_USB to false. */
47214+		if (rport->vbus_always_on)
47215+			extcon_set_state(rphy->edev, EXTCON_USB, false);
47216+		rport->perip_connected = false;
47217+		fallthrough;
47218+	case PHY_MODE_INVALID:
47219+		vbus_det_en = false;
47220+		break;
47221+	default:
47222+		dev_info(&rport->phy->dev, "illegal mode\n");
47223+		return ret;
47224+	}
47225+
47226+	if (rphy->phy_cfg->vbus_detect)
47227+		rphy->phy_cfg->vbus_detect(rphy, vbus_det_en);
47228+	else
47229+		ret = property_enable(rphy->grf, &rport->port_cfg->vbus_det_en,
47230+				      vbus_det_en);
47231+
47232+	return ret;
47233+}
47234+
47235 static const struct phy_ops rockchip_usb2phy_ops = {
47236 	.init		= rockchip_usb2phy_init,
47237 	.exit		= rockchip_usb2phy_exit,
47238 	.power_on	= rockchip_usb2phy_power_on,
47239 	.power_off	= rockchip_usb2phy_power_off,
47240+	.set_mode	= rockchip_usb2phy_set_mode,
47241 	.owner		= THIS_MODULE,
47242 };
47243 
47244-static void rockchip_usb2phy_otg_sm_work(struct work_struct *work)
47245+/* Show & store the current value of otg mode for otg port */
47246+static ssize_t otg_mode_show(struct device *device,
47247+			     struct device_attribute *attr,
47248+			     char *buf)
47249 {
47250-	struct rockchip_usb2phy_port *rport =
47251-		container_of(work, struct rockchip_usb2phy_port,
47252-			     otg_sm_work.work);
47253-	struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
47254-	static unsigned int cable;
47255-	unsigned long delay;
47256-	bool vbus_attach, sch_work, notify_charger;
47257+	struct rockchip_usb2phy *rphy = dev_get_drvdata(device);
47258+	struct rockchip_usb2phy_port *rport = NULL;
47259+	unsigned int index;
47260 
47261-	vbus_attach = property_enabled(rphy->grf,
47262-				       &rport->port_cfg->utmi_bvalid);
47263+	for (index = 0; index < rphy->phy_cfg->num_ports; index++) {
47264+		rport = &rphy->ports[index];
47265+		if (rport->port_id == USB2PHY_PORT_OTG)
47266+			break;
47267+	}
47268 
47269-	sch_work = false;
47270-	notify_charger = false;
47271-	delay = OTG_SCHEDULE_DELAY;
47272-	dev_dbg(&rport->phy->dev, "%s otg sm work\n",
47273-		usb_otg_state_string(rport->state));
47274+	if (!rport) {
47275+		dev_err(rphy->dev, "Fail to get otg port\n");
47276+		return -EINVAL;
47277+	} else if (rport->port_id != USB2PHY_PORT_OTG) {
47278+		dev_err(rphy->dev, "No support otg\n");
47279+		return -EINVAL;
47280+	}
47281 
47282-	switch (rport->state) {
47283-	case OTG_STATE_UNDEFINED:
47284-		rport->state = OTG_STATE_B_IDLE;
47285-		if (!vbus_attach)
47286-			rockchip_usb2phy_power_off(rport->phy);
47287-		fallthrough;
47288-	case OTG_STATE_B_IDLE:
47289-		if (extcon_get_state(rphy->edev, EXTCON_USB_HOST) > 0) {
47290-			dev_dbg(&rport->phy->dev, "usb otg host connect\n");
47291-			rport->state = OTG_STATE_A_HOST;
47292-			rockchip_usb2phy_power_on(rport->phy);
47293-			return;
47294-		} else if (vbus_attach) {
47295-			dev_dbg(&rport->phy->dev, "vbus_attach\n");
47296+	switch (rport->mode) {
47297+	case USB_DR_MODE_HOST:
47298+		return sprintf(buf, "host\n");
47299+	case USB_DR_MODE_PERIPHERAL:
47300+		return sprintf(buf, "peripheral\n");
47301+	case USB_DR_MODE_OTG:
47302+		return sprintf(buf, "otg\n");
47303+	case USB_DR_MODE_UNKNOWN:
47304+		return sprintf(buf, "UNKNOWN\n");
47305+	}
47306+
47307+	return -EINVAL;
47308+}
47309+
47310+static ssize_t otg_mode_store(struct device *device,
47311+			      struct device_attribute *attr,
47312+			      const char *buf, size_t count)
47313+{
47314+	struct rockchip_usb2phy *rphy = dev_get_drvdata(device);
47315+	struct rockchip_usb2phy_port *rport = NULL;
47316+	struct regmap *base = get_reg_base(rphy);
47317+	enum usb_dr_mode new_dr_mode;
47318+	unsigned int index;
47319+	int rc = count;
47320+
47321+	for (index = 0; index < rphy->phy_cfg->num_ports; index++) {
47322+		rport = &rphy->ports[index];
47323+		if (rport->port_id == USB2PHY_PORT_OTG)
47324+			break;
47325+	}
47326+
47327+	if (!rport) {
47328+		dev_err(rphy->dev, "Fail to get otg port\n");
47329+		rc = -EINVAL;
47330+		goto err0;
47331+	} else if (rport->port_id != USB2PHY_PORT_OTG ||
47332+		   rport->mode == USB_DR_MODE_UNKNOWN) {
47333+		dev_err(rphy->dev, "No support otg\n");
47334+		rc = -EINVAL;
47335+		goto err0;
47336+	}
47337+
47338+	mutex_lock(&rport->mutex);
47339+
47340+	if (!strncmp(buf, "0", 1) || !strncmp(buf, "otg", 3)) {
47341+		new_dr_mode = USB_DR_MODE_OTG;
47342+	} else if (!strncmp(buf, "1", 1) || !strncmp(buf, "host", 4)) {
47343+		new_dr_mode = USB_DR_MODE_HOST;
47344+	} else if (!strncmp(buf, "2", 1) || !strncmp(buf, "peripheral", 10)) {
47345+		new_dr_mode = USB_DR_MODE_PERIPHERAL;
47346+	} else {
47347+		dev_err(rphy->dev, "Error mode! Input 'otg' or 'host' or 'peripheral'\n");
47348+		rc = -EINVAL;
47349+		goto err1;
47350+	}
47351+
47352+	if (rport->mode == new_dr_mode) {
47353+		dev_warn(rphy->dev, "Same as current mode\n");
47354+		goto err1;
47355+	}
47356+
47357+	rport->mode = new_dr_mode;
47358+
47359+	switch (rport->mode) {
47360+	case USB_DR_MODE_HOST:
47361+		rockchip_usb2phy_set_mode(rport->phy, PHY_MODE_USB_HOST, 0);
47362+		property_enable(base, &rport->port_cfg->iddig_output, false);
47363+		property_enable(base, &rport->port_cfg->iddig_en, true);
47364+		break;
47365+	case USB_DR_MODE_PERIPHERAL:
47366+		rockchip_usb2phy_set_mode(rport->phy, PHY_MODE_USB_DEVICE, 0);
47367+		property_enable(base, &rport->port_cfg->iddig_output, true);
47368+		property_enable(base, &rport->port_cfg->iddig_en, true);
47369+		break;
47370+	case USB_DR_MODE_OTG:
47371+		rockchip_usb2phy_set_mode(rport->phy, PHY_MODE_USB_OTG, 0);
47372+		property_enable(base, &rport->port_cfg->iddig_output, false);
47373+		property_enable(base, &rport->port_cfg->iddig_en, false);
47374+		break;
47375+	default:
47376+		break;
47377+	}
47378+
47379+err1:
47380+	mutex_unlock(&rport->mutex);
47381+
47382+err0:
47383+	return rc;
47384+}
47385+static DEVICE_ATTR_RW(otg_mode);
47386+
47387+/* Group all the usb2 phy attributes */
47388+static struct attribute *usb2_phy_attrs[] = {
47389+	&dev_attr_otg_mode.attr,
47390+	NULL,
47391+};
47392+
47393+static struct attribute_group usb2_phy_attr_group = {
47394+	.name = NULL,	/* we want them in the same directory */
47395+	.attrs = usb2_phy_attrs,
47396+};
47397+
47398+static void rockchip_usb2phy_otg_sm_work(struct work_struct *work)
47399+{
47400+	struct rockchip_usb2phy_port *rport =
47401+		container_of(work, struct rockchip_usb2phy_port,
47402+			     otg_sm_work.work);
47403+	struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
47404+	static unsigned int cable;
47405+	unsigned long delay;
47406+	bool sch_work;
47407+
47408+	mutex_lock(&rport->mutex);
47409+
47410+	if (rport->port_cfg->bvalid_grf_con.enable && rport->typec_vbus_det)
47411+		rport->vbus_attached =
47412+			property_enabled(rphy->grf, &rport->port_cfg->bvalid_grf_con);
47413+	else if (rport->utmi_avalid)
47414+		rport->vbus_attached =
47415+			property_enabled(rphy->grf, &rport->port_cfg->utmi_avalid);
47416+	else
47417+		rport->vbus_attached =
47418+			property_enabled(rphy->grf, &rport->port_cfg->utmi_bvalid);
47419+
47420+	sch_work = false;
47421+	delay = OTG_SCHEDULE_DELAY;
47422+
47423+	dev_dbg(&rport->phy->dev, "%s otg sm work\n",
47424+		usb_otg_state_string(rport->state));
47425+
47426+	switch (rport->state) {
47427+	case OTG_STATE_UNDEFINED:
47428+		rport->state = OTG_STATE_B_IDLE;
47429+		if (!rport->vbus_attached) {
47430+			mutex_unlock(&rport->mutex);
47431+			rockchip_usb2phy_power_off(rport->phy);
47432+			mutex_lock(&rport->mutex);
47433+		}
47434+		fallthrough;
47435+	case OTG_STATE_B_IDLE:
47436+		if (extcon_get_state(rphy->edev, EXTCON_USB_HOST) > 0 ||
47437+		    extcon_get_state(rphy->edev, EXTCON_USB_VBUS_EN) > 0) {
47438+			dev_dbg(&rport->phy->dev, "usb otg host connect\n");
47439+			rport->state = OTG_STATE_A_HOST;
47440+			rphy->chg_state = USB_CHG_STATE_UNDEFINED;
47441+			rphy->chg_type = POWER_SUPPLY_TYPE_UNKNOWN;
47442+			mutex_unlock(&rport->mutex);
47443+			rockchip_usb2phy_power_on(rport->phy);
47444+			return;
47445+		} else if (rport->vbus_attached) {
47446+			dev_dbg(&rport->phy->dev, "vbus_attach\n");
47447 			switch (rphy->chg_state) {
47448 			case USB_CHG_STATE_UNDEFINED:
47449+				mutex_unlock(&rport->mutex);
47450 				schedule_delayed_work(&rport->chg_work, 0);
47451 				return;
47452 			case USB_CHG_STATE_DETECTED:
47453 				switch (rphy->chg_type) {
47454 				case POWER_SUPPLY_TYPE_USB:
47455 					dev_dbg(&rport->phy->dev, "sdp cable is connected\n");
47456+					wake_lock(&rport->wakelock);
47457+					cable = EXTCON_CHG_USB_SDP;
47458+					mutex_unlock(&rport->mutex);
47459 					rockchip_usb2phy_power_on(rport->phy);
47460+					mutex_lock(&rport->mutex);
47461 					rport->state = OTG_STATE_B_PERIPHERAL;
47462-					notify_charger = true;
47463+					rport->perip_connected = true;
47464 					sch_work = true;
47465-					cable = EXTCON_CHG_USB_SDP;
47466 					break;
47467 				case POWER_SUPPLY_TYPE_USB_DCP:
47468 					dev_dbg(&rport->phy->dev, "dcp cable is connected\n");
47469-					rockchip_usb2phy_power_off(rport->phy);
47470-					notify_charger = true;
47471-					sch_work = true;
47472 					cable = EXTCON_CHG_USB_DCP;
47473+					sch_work = true;
47474 					break;
47475 				case POWER_SUPPLY_TYPE_USB_CDP:
47476 					dev_dbg(&rport->phy->dev, "cdp cable is connected\n");
47477+					wake_lock(&rport->wakelock);
47478+					cable = EXTCON_CHG_USB_CDP;
47479+					mutex_unlock(&rport->mutex);
47480 					rockchip_usb2phy_power_on(rport->phy);
47481+					mutex_lock(&rport->mutex);
47482 					rport->state = OTG_STATE_B_PERIPHERAL;
47483-					notify_charger = true;
47484+					rport->perip_connected = true;
47485 					sch_work = true;
47486-					cable = EXTCON_CHG_USB_CDP;
47487 					break;
47488 				default:
47489 					break;
47490@@ -594,48 +1239,81 @@ static void rockchip_usb2phy_otg_sm_work(struct work_struct *work)
47491 				break;
47492 			}
47493 		} else {
47494-			notify_charger = true;
47495 			rphy->chg_state = USB_CHG_STATE_UNDEFINED;
47496 			rphy->chg_type = POWER_SUPPLY_TYPE_UNKNOWN;
47497-		}
47498-
47499-		if (rport->vbus_attached != vbus_attach) {
47500-			rport->vbus_attached = vbus_attach;
47501-
47502-			if (notify_charger && rphy->edev) {
47503-				extcon_set_state_sync(rphy->edev,
47504-							cable, vbus_attach);
47505-				if (cable == EXTCON_CHG_USB_SDP)
47506-					extcon_set_state_sync(rphy->edev,
47507-							      EXTCON_USB,
47508-							      vbus_attach);
47509-			}
47510+			mutex_unlock(&rport->mutex);
47511+			rockchip_usb2phy_power_off(rport->phy);
47512+			mutex_lock(&rport->mutex);
47513 		}
47514 		break;
47515 	case OTG_STATE_B_PERIPHERAL:
47516-		if (!vbus_attach) {
47517-			dev_dbg(&rport->phy->dev, "usb disconnect\n");
47518+		sch_work = true;
47519+
47520+		if (extcon_get_state(rphy->edev, EXTCON_USB_HOST) > 0 ||
47521+		    extcon_get_state(rphy->edev,
47522+					    EXTCON_USB_VBUS_EN) > 0) {
47523+			dev_dbg(&rport->phy->dev, "usb otg host connect\n");
47524+			rport->state = OTG_STATE_A_HOST;
47525 			rphy->chg_state = USB_CHG_STATE_UNDEFINED;
47526 			rphy->chg_type = POWER_SUPPLY_TYPE_UNKNOWN;
47527+			rport->perip_connected = false;
47528+			sch_work = false;
47529+			wake_unlock(&rport->wakelock);
47530+		} else if (!rport->vbus_attached) {
47531+			dev_dbg(&rport->phy->dev, "usb disconnect\n");
47532 			rport->state = OTG_STATE_B_IDLE;
47533-			delay = 0;
47534-			rockchip_usb2phy_power_off(rport->phy);
47535+			rport->perip_connected = false;
47536+			rphy->chg_state = USB_CHG_STATE_UNDEFINED;
47537+			rphy->chg_type = POWER_SUPPLY_TYPE_UNKNOWN;
47538+			delay = OTG_SCHEDULE_DELAY;
47539+			wake_unlock(&rport->wakelock);
47540 		}
47541-		sch_work = true;
47542 		break;
47543 	case OTG_STATE_A_HOST:
47544 		if (extcon_get_state(rphy->edev, EXTCON_USB_HOST) == 0) {
47545 			dev_dbg(&rport->phy->dev, "usb otg host disconnect\n");
47546 			rport->state = OTG_STATE_B_IDLE;
47547-			rockchip_usb2phy_power_off(rport->phy);
47548+			sch_work = true;
47549+		} else {
47550+			mutex_unlock(&rport->mutex);
47551+			return;
47552 		}
47553 		break;
47554 	default:
47555-		break;
47556+		mutex_unlock(&rport->mutex);
47557+		return;
47558 	}
47559 
47560+	if (extcon_get_state(rphy->edev, cable) != rport->vbus_attached) {
47561+		extcon_set_state_sync(rphy->edev,
47562+					cable, rport->vbus_attached);
47563+
47564+		if (!rport->vbus_attached)
47565+			cable = EXTCON_NONE;
47566+	} else if (rport->state == OTG_STATE_A_HOST &&
47567+		 extcon_get_state(rphy->edev, cable)) {
47568+		/*
47569+		 * If plug in OTG host cable when the rport state is
47570+		 * OTG_STATE_B_PERIPHERAL, the vbus voltage will stay
47571+		 * in high, so the rport->vbus_attached may not be
47572+		 * changed. We need to set cable state here.
47573+		 */
47574+		extcon_set_state_sync(rphy->edev, cable, false);
47575+		cable = EXTCON_NONE;
47576+	}
47577+
47578+	if (rphy->edev_self &&
47579+	    (extcon_get_state(rphy->edev, EXTCON_USB) !=
47580+	     rport->perip_connected)) {
47581+		extcon_set_state_sync(rphy->edev,
47582+					EXTCON_USB,
47583+					rport->perip_connected);
47584+		extcon_sync(rphy->edev, EXTCON_USB_HOST);
47585+	}
47586 	if (sch_work)
47587 		schedule_delayed_work(&rport->otg_sm_work, delay);
47588+
47589+	mutex_unlock(&rport->mutex);
47590 }
47591 
47592 static const char *chg_to_string(enum power_supply_type chg_type)
47593@@ -689,21 +1367,45 @@ static void rockchip_chg_detect_work(struct work_struct *work)
47594 		container_of(work, struct rockchip_usb2phy_port, chg_work.work);
47595 	struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
47596 	struct regmap *base = get_reg_base(rphy);
47597+	const struct usb2phy_reg *phy_sus_reg;
47598 	bool is_dcd, tmout, vout;
47599 	unsigned long delay;
47600+	unsigned int mask;
47601+	int ret;
47602 
47603 	dev_dbg(&rport->phy->dev, "chg detection work state = %d\n",
47604 		rphy->chg_state);
47605+
47606+	/*
47607+	 * The conditions for charger detection:
47608+	 * 1. Set the PHY in normal mode to keep the UTMI_CLK on.
47609+	 * 2. Set the utmi_opmode in non-driving mode.
47610+	 * 3. Set the utmi_xcvrselect to FS speed.
47611+	 * 4. Set the utmi_termselect to FS speed.
47612+	 * 5. Enable the DP/DM pulldown resistor.
47613+	 */
47614 	switch (rphy->chg_state) {
47615 	case USB_CHG_STATE_UNDEFINED:
47616-		if (!rport->suspended)
47617-			rockchip_usb2phy_power_off(rport->phy);
47618-		/* put the controller in non-driving mode */
47619-		property_enable(base, &rphy->phy_cfg->chg_det.opmode, false);
47620+		mutex_lock(&rport->mutex);
47621+		/* Store the PHY current suspend configuration */
47622+		phy_sus_reg = &rport->port_cfg->phy_sus;
47623+		ret = regmap_read(base, phy_sus_reg->offset,
47624+				  &rphy->phy_sus_cfg);
47625+		if (ret) {
47626+			dev_err(&rport->phy->dev,
47627+				"Fail to read phy_sus reg offset 0x%x, ret %d\n",
47628+				phy_sus_reg->offset, ret);
47629+			mutex_unlock(&rport->mutex);
47630+			return;
47631+		}
47632+
47633+		/* Set the PHY in charger detection mode */
47634+		property_enable(base, &rphy->phy_cfg->chg_det.chg_mode, true);
47635 		/* Start DCD processing stage 1 */
47636 		rockchip_chg_enable_dcd(rphy, true);
47637 		rphy->chg_state = USB_CHG_STATE_WAIT_FOR_DCD;
47638 		rphy->dcd_retries = 0;
47639+		rphy->primary_retries = 0;
47640 		delay = CHG_DCD_POLL_TIME;
47641 		break;
47642 	case USB_CHG_STATE_WAIT_FOR_DCD:
47643@@ -741,6 +1443,19 @@ static void rockchip_chg_detect_work(struct work_struct *work)
47644 				rphy->chg_state = USB_CHG_STATE_DETECTED;
47645 				delay = 0;
47646 			} else {
47647+				if (rphy->primary_retries < 2) {
47648+					/* Turn off DCD circuitry */
47649+					rockchip_chg_enable_dcd(rphy, false);
47650+					/* Voltage Source on DP, Probe on DM */
47651+					rockchip_chg_enable_primary_det(rphy,
47652+									true);
47653+					delay = CHG_PRIMARY_DET_TIME;
47654+					rphy->chg_state =
47655+						USB_CHG_STATE_DCD_DONE;
47656+					rphy->primary_retries++;
47657+					/* break USB_CHG_STATE_DCD_DONE */
47658+					break;
47659+				}
47660 				rphy->chg_type = POWER_SUPPLY_TYPE_USB;
47661 				rphy->chg_state = USB_CHG_STATE_DETECTED;
47662 				delay = 0;
47663@@ -759,19 +1474,36 @@ static void rockchip_chg_detect_work(struct work_struct *work)
47664 		fallthrough;
47665 	case USB_CHG_STATE_SECONDARY_DONE:
47666 		rphy->chg_state = USB_CHG_STATE_DETECTED;
47667-		delay = 0;
47668 		fallthrough;
47669 	case USB_CHG_STATE_DETECTED:
47670-		/* put the controller in normal mode */
47671-		property_enable(base, &rphy->phy_cfg->chg_det.opmode, true);
47672+		if (rphy->phy_cfg->chg_det.chg_mode.offset !=
47673+		    rport->port_cfg->phy_sus.offset)
47674+			property_enable(base, &rphy->phy_cfg->chg_det.chg_mode, false);
47675+
47676+		/* Restore the PHY suspend configuration */
47677+		phy_sus_reg = &rport->port_cfg->phy_sus;
47678+		mask = GENMASK(phy_sus_reg->bitend, phy_sus_reg->bitstart);
47679+		ret = regmap_write(base, phy_sus_reg->offset,
47680+				   (rphy->phy_sus_cfg | (mask << BIT_WRITEABLE_SHIFT)));
47681+		if (ret)
47682+			dev_err(&rport->phy->dev,
47683+				"Fail to set phy_sus reg offset 0x%x, ret %d\n",
47684+				phy_sus_reg->offset, ret);
47685+		mutex_unlock(&rport->mutex);
47686 		rockchip_usb2phy_otg_sm_work(&rport->otg_sm_work.work);
47687 		dev_dbg(&rport->phy->dev, "charger = %s\n",
47688 			 chg_to_string(rphy->chg_type));
47689 		return;
47690 	default:
47691+		mutex_unlock(&rport->mutex);
47692 		return;
47693 	}
47694 
47695+	/*
47696+	 * Hold the mutex lock during the whole charger
47697+	 * detection stage, and release it after detect
47698+	 * the charger type.
47699+	 */
47700 	schedule_delayed_work(&rport->chg_work, delay);
47701 }
47702 
47703@@ -793,30 +1525,43 @@ static void rockchip_usb2phy_sm_work(struct work_struct *work)
47704 	struct rockchip_usb2phy_port *rport =
47705 		container_of(work, struct rockchip_usb2phy_port, sm_work.work);
47706 	struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
47707-	unsigned int sh = rport->port_cfg->utmi_hstdet.bitend -
47708-			  rport->port_cfg->utmi_hstdet.bitstart + 1;
47709-	unsigned int ul, uhd, state;
47710+	unsigned int sh, ul, uhd, state;
47711 	unsigned int ul_mask, uhd_mask;
47712 	int ret;
47713 
47714+	if (!rport->port_cfg->utmi_ls.offset ||
47715+	    (!rport->port_cfg->utmi_hstdet.offset &&
47716+	     !rport->port_cfg->disfall_en.offset)) {
47717+		dev_dbg(&rport->phy->dev, "some property may not be specified\n");
47718+		return;
47719+	}
47720+
47721 	mutex_lock(&rport->mutex);
47722 
47723 	ret = regmap_read(rphy->grf, rport->port_cfg->utmi_ls.offset, &ul);
47724 	if (ret < 0)
47725 		goto next_schedule;
47726 
47727-	ret = regmap_read(rphy->grf, rport->port_cfg->utmi_hstdet.offset, &uhd);
47728-	if (ret < 0)
47729-		goto next_schedule;
47730-
47731-	uhd_mask = GENMASK(rport->port_cfg->utmi_hstdet.bitend,
47732-			   rport->port_cfg->utmi_hstdet.bitstart);
47733 	ul_mask = GENMASK(rport->port_cfg->utmi_ls.bitend,
47734 			  rport->port_cfg->utmi_ls.bitstart);
47735 
47736-	/* stitch on utmi_ls and utmi_hstdet as phy state */
47737-	state = ((uhd & uhd_mask) >> rport->port_cfg->utmi_hstdet.bitstart) |
47738-		(((ul & ul_mask) >> rport->port_cfg->utmi_ls.bitstart) << sh);
47739+	if (rport->port_cfg->utmi_hstdet.offset) {
47740+		ret = regmap_read(rphy->grf, rport->port_cfg->utmi_hstdet.offset, &uhd);
47741+		if (ret < 0)
47742+			goto next_schedule;
47743+
47744+		uhd_mask = GENMASK(rport->port_cfg->utmi_hstdet.bitend,
47745+				   rport->port_cfg->utmi_hstdet.bitstart);
47746+
47747+		sh = rport->port_cfg->utmi_hstdet.bitend -
47748+		     rport->port_cfg->utmi_hstdet.bitstart + 1;
47749+		/* stitch on utmi_ls and utmi_hstdet as phy state */
47750+		state = ((uhd & uhd_mask) >> rport->port_cfg->utmi_hstdet.bitstart) |
47751+			(((ul & ul_mask) >> rport->port_cfg->utmi_ls.bitstart) << sh);
47752+	} else {
47753+		state = ((ul & ul_mask) >> rport->port_cfg->utmi_ls.bitstart) << 1 |
47754+			rport->host_disconnect;
47755+	}
47756 
47757 	switch (state) {
47758 	case PHY_STATE_HS_ONLINE:
47759@@ -841,7 +1586,9 @@ static void rockchip_usb2phy_sm_work(struct work_struct *work)
47760 	case PHY_STATE_CONNECT:
47761 		if (rport->suspended) {
47762 			dev_dbg(&rport->phy->dev, "Connected\n");
47763+			mutex_unlock(&rport->mutex);
47764 			rockchip_usb2phy_power_on(rport->phy);
47765+			mutex_lock(&rport->mutex);
47766 			rport->suspended = false;
47767 		} else {
47768 			/* D+ line pull-up, D- line pull-down */
47769@@ -851,7 +1598,9 @@ static void rockchip_usb2phy_sm_work(struct work_struct *work)
47770 	case PHY_STATE_DISCONNECT:
47771 		if (!rport->suspended) {
47772 			dev_dbg(&rport->phy->dev, "Disconnected\n");
47773+			mutex_unlock(&rport->mutex);
47774 			rockchip_usb2phy_power_off(rport->phy);
47775+			mutex_lock(&rport->mutex);
47776 			rport->suspended = true;
47777 		}
47778 
47779@@ -859,8 +1608,7 @@ static void rockchip_usb2phy_sm_work(struct work_struct *work)
47780 		 * activate the linestate detection to get the next device
47781 		 * plug-in irq.
47782 		 */
47783-		property_enable(rphy->grf, &rport->port_cfg->ls_det_clr, true);
47784-		property_enable(rphy->grf, &rport->port_cfg->ls_det_en, true);
47785+		rockchip_usb2phy_enable_line_irq(rphy, rport, true);
47786 
47787 		/*
47788 		 * we don't need to rearm the delayed work when the phy port
47789@@ -869,7 +1617,7 @@ static void rockchip_usb2phy_sm_work(struct work_struct *work)
47790 		mutex_unlock(&rport->mutex);
47791 		return;
47792 	default:
47793-		dev_dbg(&rport->phy->dev, "unknown phy state\n");
47794+		dev_dbg(&rport->phy->dev, "unknown phy state %d\n", state);
47795 		break;
47796 	}
47797 
47798@@ -883,14 +1631,24 @@ static irqreturn_t rockchip_usb2phy_linestate_irq(int irq, void *data)
47799 	struct rockchip_usb2phy_port *rport = data;
47800 	struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
47801 
47802-	if (!property_enabled(rphy->grf, &rport->port_cfg->ls_det_st))
47803+	if (!property_enabled(rphy->grf, &rport->port_cfg->ls_det_st) ||
47804+	    !property_enabled(rphy->grf, &rport->port_cfg->ls_det_en))
47805 		return IRQ_NONE;
47806 
47807+	dev_dbg(&rport->phy->dev, "linestate interrupt\n");
47808+
47809 	mutex_lock(&rport->mutex);
47810 
47811 	/* disable linestate detect irq and clear its status */
47812-	property_enable(rphy->grf, &rport->port_cfg->ls_det_en, false);
47813-	property_enable(rphy->grf, &rport->port_cfg->ls_det_clr, true);
47814+	rockchip_usb2phy_enable_line_irq(rphy, rport, false);
47815+
47816+	/*
47817+	 * For host port, it may miss disc irq when device is connected,
47818+	 * in this case, we can clear host_disconnect state depend on
47819+	 * the linestate irq.
47820+	 */
47821+	if (rport->port_id == USB2PHY_PORT_HOST && rport->port_cfg->disfall_en.offset)
47822+		rport->host_disconnect = false;
47823 
47824 	mutex_unlock(&rport->mutex);
47825 
47826@@ -920,99 +1678,163 @@ static irqreturn_t rockchip_usb2phy_bvalid_irq(int irq, void *data)
47827 
47828 	mutex_unlock(&rport->mutex);
47829 
47830-	rockchip_usb2phy_otg_sm_work(&rport->otg_sm_work.work);
47831+	if (rport->bypass_uart_en)
47832+		rockchip_usb_bypass_uart(rport, false);
47833+
47834+	if (rport->otg_sm_work.work.func) {
47835+		cancel_delayed_work_sync(&rport->otg_sm_work);
47836+		rockchip_usb2phy_otg_sm_work(&rport->otg_sm_work.work);
47837+	}
47838 
47839 	return IRQ_HANDLED;
47840 }
47841 
47842-static irqreturn_t rockchip_usb2phy_otg_mux_irq(int irq, void *data)
47843+static irqreturn_t rockchip_usb2phy_id_irq(int irq, void *data)
47844 {
47845 	struct rockchip_usb2phy_port *rport = data;
47846 	struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
47847+	bool cable_vbus_state = false;
47848 
47849-	if (property_enabled(rphy->grf, &rport->port_cfg->bvalid_det_st))
47850-		return rockchip_usb2phy_bvalid_irq(irq, data);
47851-	else
47852+	if (!property_enabled(rphy->grf, &rport->port_cfg->idfall_det_st) &&
47853+	    !property_enabled(rphy->grf, &rport->port_cfg->idrise_det_st))
47854 		return IRQ_NONE;
47855+
47856+	mutex_lock(&rport->mutex);
47857+
47858+	/* clear id fall or rise detect irq pending status */
47859+	if (property_enabled(rphy->grf, &rport->port_cfg->idfall_det_st)) {
47860+		property_enable(rphy->grf, &rport->port_cfg->idfall_det_clr,
47861+				true);
47862+		cable_vbus_state = true;
47863+	} else if (property_enabled(rphy->grf, &rport->port_cfg->idrise_det_st)) {
47864+		property_enable(rphy->grf, &rport->port_cfg->idrise_det_clr,
47865+				true);
47866+		cable_vbus_state = false;
47867+	}
47868+
47869+	extcon_set_state(rphy->edev, EXTCON_USB_HOST, cable_vbus_state);
47870+	extcon_set_state(rphy->edev, EXTCON_USB_VBUS_EN, cable_vbus_state);
47871+
47872+	extcon_sync(rphy->edev, EXTCON_USB_HOST);
47873+	extcon_sync(rphy->edev, EXTCON_USB_VBUS_EN);
47874+
47875+	rockchip_set_vbus_power(rport, cable_vbus_state);
47876+
47877+	mutex_unlock(&rport->mutex);
47878+
47879+	return IRQ_HANDLED;
47880 }
47881 
47882-static int rockchip_usb2phy_host_port_init(struct rockchip_usb2phy *rphy,
47883-					   struct rockchip_usb2phy_port *rport,
47884-					   struct device_node *child_np)
47885+static irqreturn_t rockchip_usb2phy_host_disc_irq(int irq, void *data)
47886 {
47887-	int ret;
47888+	struct rockchip_usb2phy_port *rport = data;
47889+	struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
47890 
47891-	rport->port_id = USB2PHY_PORT_HOST;
47892-	rport->port_cfg = &rphy->phy_cfg->port_cfgs[USB2PHY_PORT_HOST];
47893-	rport->suspended = true;
47894+	if (!property_enabled(rphy->grf, &rport->port_cfg->disfall_st) &&
47895+	    !property_enabled(rphy->grf, &rport->port_cfg->disrise_st))
47896+		return IRQ_NONE;
47897 
47898-	mutex_init(&rport->mutex);
47899-	INIT_DELAYED_WORK(&rport->sm_work, rockchip_usb2phy_sm_work);
47900+	mutex_lock(&rport->mutex);
47901 
47902-	rport->ls_irq = of_irq_get_byname(child_np, "linestate");
47903-	if (rport->ls_irq < 0) {
47904-		dev_err(rphy->dev, "no linestate irq provided\n");
47905-		return rport->ls_irq;
47906+	/* clear disconnect fall or rise detect irq pending status */
47907+	if (property_enabled(rphy->grf, &rport->port_cfg->disfall_st)) {
47908+		property_enable(rphy->grf, &rport->port_cfg->disfall_clr,
47909+				true);
47910+		rport->host_disconnect = false;
47911+	} else if (property_enabled(rphy->grf, &rport->port_cfg->disrise_st)) {
47912+		property_enable(rphy->grf, &rport->port_cfg->disrise_clr,
47913+				true);
47914+		rport->host_disconnect = true;
47915 	}
47916 
47917-	ret = devm_request_threaded_irq(rphy->dev, rport->ls_irq, NULL,
47918-					rockchip_usb2phy_linestate_irq,
47919-					IRQF_ONESHOT,
47920-					"rockchip_usb2phy", rport);
47921-	if (ret) {
47922-		dev_err(rphy->dev, "failed to request linestate irq handle\n");
47923-		return ret;
47924-	}
47925+	mutex_unlock(&rport->mutex);
47926 
47927-	return 0;
47928+	return IRQ_HANDLED;
47929 }
47930 
47931-static int rockchip_otg_event(struct notifier_block *nb,
47932-			      unsigned long event, void *ptr)
47933+static irqreturn_t rockchip_usb2phy_otg_mux_irq(int irq, void *data)
47934 {
47935-	struct rockchip_usb2phy_port *rport =
47936-		container_of(nb, struct rockchip_usb2phy_port, event_nb);
47937+	irqreturn_t ret = IRQ_NONE;
47938 
47939-	schedule_delayed_work(&rport->otg_sm_work, OTG_SCHEDULE_DELAY);
47940+	ret  = rockchip_usb2phy_id_irq(irq, data);
47941+	ret |= rockchip_usb2phy_bvalid_irq(irq, data);
47942+	ret |= rockchip_usb2phy_linestate_irq(irq, data);
47943 
47944-	return NOTIFY_DONE;
47945+	return ret;
47946 }
47947 
47948-static int rockchip_usb2phy_otg_port_init(struct rockchip_usb2phy *rphy,
47949-					  struct rockchip_usb2phy_port *rport,
47950-					  struct device_node *child_np)
47951+static irqreturn_t rockchip_usb2phy_irq(int irq, void *data)
47952 {
47953-	int ret;
47954+	struct rockchip_usb2phy *rphy = data;
47955+	struct rockchip_usb2phy_port *rport;
47956+	irqreturn_t ret = IRQ_NONE;
47957+	unsigned int index;
47958+	bool force_mode;
47959 
47960-	rport->port_id = USB2PHY_PORT_OTG;
47961-	rport->port_cfg = &rphy->phy_cfg->port_cfgs[USB2PHY_PORT_OTG];
47962-	rport->state = OTG_STATE_UNDEFINED;
47963+	for (index = 0; index < rphy->phy_cfg->num_ports; index++) {
47964+		rport = &rphy->ports[index];
47965+		if (!rport->phy)
47966+			continue;
47967 
47968-	/*
47969-	 * set suspended flag to true, but actually don't
47970-	 * put phy in suspend mode, it aims to enable usb
47971-	 * phy and clock in power_on() called by usb controller
47972-	 * driver during probe.
47973-	 */
47974-	rport->suspended = true;
47975-	rport->vbus_attached = false;
47976+		/*
47977+		 * Handle disc irq before linestate irq to set the disc
47978+		 * state for sm work scheduled in the linestate irq handler.
47979+		 */
47980+		if (rport->port_id == USB2PHY_PORT_HOST &&
47981+		    rport->port_cfg->disfall_en.offset)
47982+			ret |= rockchip_usb2phy_host_disc_irq(irq, rport);
47983 
47984-	mutex_init(&rport->mutex);
47985+		/* Handle linestate irq for both otg port and host port */
47986+		ret |= rockchip_usb2phy_linestate_irq(irq, rport);
47987 
47988-	rport->mode = of_usb_get_dr_mode_by_phy(child_np, -1);
47989-	if (rport->mode == USB_DR_MODE_HOST ||
47990-	    rport->mode == USB_DR_MODE_UNKNOWN) {
47991-		ret = 0;
47992-		goto out;
47993+		/*
47994+		 * Handle bvalid irq and id irq for otg port which
47995+		 * is assigned to otg controller.
47996+		 */
47997+		if (rport->port_id == USB2PHY_PORT_OTG &&
47998+		    rport->mode != USB_DR_MODE_UNKNOWN) {
47999+			if (rport->mode == USB_DR_MODE_HOST) {
48000+				/*
48001+				 * If otg port work as usb host mode and
48002+				 * force_mode is true, it means that the
48003+				 * otg port is forced to host mode by the
48004+				 * grf plug iddig indicator via the sys
48005+				 * interface "otg_mode". We need to handle
48006+				 * the bvalid irq and id irq in this case.
48007+				 */
48008+				force_mode = property_enabled(rphy->grf,
48009+						&rport->port_cfg->iddig_en);
48010+				if (!force_mode)
48011+					continue;
48012+			}
48013+
48014+			if (!rport->vbus_always_on)
48015+				ret |= rockchip_usb2phy_bvalid_irq(irq, rport);
48016+
48017+			ret |= rockchip_usb2phy_id_irq(irq, rport);
48018+		}
48019 	}
48020 
48021-	INIT_DELAYED_WORK(&rport->chg_work, rockchip_chg_detect_work);
48022-	INIT_DELAYED_WORK(&rport->otg_sm_work, rockchip_usb2phy_otg_sm_work);
48023+	return ret;
48024+}
48025+
48026+static int rockchip_usb2phy_port_irq_init(struct rockchip_usb2phy *rphy,
48027+					  struct rockchip_usb2phy_port *rport,
48028+					  struct device_node *child_np)
48029+{
48030+	int ret;
48031+
48032+	/*
48033+	 * If the usb2 phy used combined irq for otg and host port,
48034+	 * don't need to init otg and host port irq separately.
48035+	 */
48036+	if (rphy->irq > 0)
48037+		return 0;
48038 
48039 	/*
48040-	 * Some SoCs use one interrupt with otg-id/otg-bvalid/linestate
48041-	 * interrupts muxed together, so probe the otg-mux interrupt first,
48042-	 * if not found, then look for the regular interrupts one by one.
48043+	 * Some SoCs (e.g. RV1108) use one combined irq for all of
48044+	 * the irqs of otg port. So probe the otg-mux interrupt first,
48045+	 * if not found, then init the regular irqs one by one.
48046 	 */
48047 	rport->otg_mux_irq = of_irq_get_byname(child_np, "otg-mux");
48048 	if (rport->otg_mux_irq > 0) {
48049@@ -1022,20 +1844,50 @@ static int rockchip_usb2phy_otg_port_init(struct rockchip_usb2phy *rphy,
48050 						IRQF_ONESHOT,
48051 						"rockchip_usb2phy_otg",
48052 						rport);
48053-		if (ret) {
48054+		if (ret)
48055 			dev_err(rphy->dev,
48056 				"failed to request otg-mux irq handle\n");
48057-			goto out;
48058-		}
48059-	} else {
48060-		rport->bvalid_irq = of_irq_get_byname(child_np, "otg-bvalid");
48061-		if (rport->bvalid_irq < 0) {
48062-			dev_err(rphy->dev, "no vbus valid irq provided\n");
48063-			ret = rport->bvalid_irq;
48064-			goto out;
48065+
48066+		return ret;
48067+	}
48068+
48069+	/* Init linestate irq for both otg port and host port */
48070+	rport->ls_irq = of_irq_get_byname(child_np, "linestate");
48071+	if (rport->ls_irq <= 0) {
48072+		dev_err(rphy->dev, "no linestate irq provided\n");
48073+		return -EINVAL;
48074+	}
48075+
48076+	ret = devm_request_threaded_irq(rphy->dev, rport->ls_irq, NULL,
48077+					rockchip_usb2phy_linestate_irq,
48078+					IRQF_ONESHOT,
48079+					"rockchip_usb2phy_ls", rport);
48080+	if (ret) {
48081+		dev_err(rphy->dev, "failed to request linestate irq handle\n");
48082+		return ret;
48083+	}
48084+
48085+	/*
48086+	 * If it's host port or it's otg port but only support
48087+	 * host mode, return immediately without init the bvalid
48088+	 * and id irqs/
48089+	 */
48090+	if (rport->port_id == USB2PHY_PORT_HOST ||
48091+	    rport->mode == USB_DR_MODE_HOST ||
48092+	    rport->mode == USB_DR_MODE_UNKNOWN)
48093+		return ret;
48094+
48095+	/* Init the bvalid irq for otg port */
48096+	if (!rport->vbus_always_on) {
48097+		rport->bvalid_irq = of_irq_get_byname(child_np,
48098+						      "otg-bvalid");
48099+		if (rport->bvalid_irq <= 0) {
48100+			dev_err(rphy->dev, "no bvalid irq provided\n");
48101+			return -EINVAL;
48102 		}
48103 
48104-		ret = devm_request_threaded_irq(rphy->dev, rport->bvalid_irq,
48105+		ret = devm_request_threaded_irq(rphy->dev,
48106+						rport->bvalid_irq,
48107 						NULL,
48108 						rockchip_usb2phy_bvalid_irq,
48109 						IRQF_ONESHOT,
48110@@ -1044,20 +1896,283 @@ static int rockchip_usb2phy_otg_port_init(struct rockchip_usb2phy *rphy,
48111 		if (ret) {
48112 			dev_err(rphy->dev,
48113 				"failed to request otg-bvalid irq handle\n");
48114-			goto out;
48115+			return ret;
48116 		}
48117 	}
48118 
48119-	if (!IS_ERR(rphy->edev)) {
48120-		rport->event_nb.notifier_call = rockchip_otg_event;
48121+	/* Init the id irq for otg port */
48122+	if (rphy->edev_self) {
48123+		rport->id_irq = of_irq_get_byname(child_np, "otg-id");
48124+		if (rport->id_irq <= 0) {
48125+			dev_err(rphy->dev, "no otg id irq provided\n");
48126+			return -EINVAL;
48127+		}
48128 
48129-		ret = devm_extcon_register_notifier(rphy->dev, rphy->edev,
48130-					EXTCON_USB_HOST, &rport->event_nb);
48131+		ret = devm_request_threaded_irq(rphy->dev,
48132+						rport->id_irq, NULL,
48133+						rockchip_usb2phy_id_irq,
48134+						IRQF_ONESHOT,
48135+						"rockchip_usb2phy_id",
48136+						rport);
48137+		if (ret) {
48138+			dev_err(rphy->dev,
48139+				"failed to request otg-id irq handle\n");
48140+			return ret;
48141+		}
48142+	}
48143+
48144+	return ret;
48145+}
48146+
48147+static void rockchip_usb2phy_usb_bvalid_enable(struct rockchip_usb2phy_port *rport,
48148+					       u8 enable)
48149+{
48150+	struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
48151+	const struct rockchip_usb2phy_port_cfg *cfg = rport->port_cfg;
48152+
48153+	if (cfg->bvalid_phy_con.enable)
48154+		property_enable(rphy->grf, &cfg->bvalid_phy_con, enable);
48155+
48156+	if (cfg->bvalid_grf_con.enable)
48157+		property_enable(rphy->grf, &cfg->bvalid_grf_con, enable);
48158+}
48159+
48160+static int rockchip_usb2phy_orien_sw_set(struct typec_switch *sw,
48161+					 enum typec_orientation orien)
48162+{
48163+	struct rockchip_usb2phy_port *rport = typec_switch_get_drvdata(sw);
48164+
48165+	dev_dbg(&rport->phy->dev, "type-c orientation: %d\n", orien);
48166+
48167+	mutex_lock(&rport->mutex);
48168+	rockchip_usb2phy_usb_bvalid_enable(rport, orien != TYPEC_ORIENTATION_NONE);
48169+	mutex_unlock(&rport->mutex);
48170+
48171+	return 0;
48172+}
48173+
48174+static int
48175+rockchip_usb2phy_setup_orien_switch(struct rockchip_usb2phy *rphy,
48176+				    struct rockchip_usb2phy_port *rport)
48177+{
48178+	struct typec_switch_desc sw_desc = { };
48179+	struct device *dev = rphy->dev;
48180+
48181+	sw_desc.drvdata = rport;
48182+	sw_desc.fwnode = dev_fwnode(dev);
48183+	sw_desc.set = rockchip_usb2phy_orien_sw_set;
48184+
48185+	rport->sw = typec_switch_register(dev, &sw_desc);
48186+	if (IS_ERR(rport->sw)) {
48187+		dev_err(dev, "Error register typec orientation switch: %ld\n",
48188+			PTR_ERR(rport->sw));
48189+		return PTR_ERR(rport->sw);
48190+	}
48191+
48192+	return 0;
48193+}
48194+
48195+static void rockchip_usb2phy_orien_switch_unregister(void *data)
48196+{
48197+	struct rockchip_usb2phy_port *rport = data;
48198+
48199+	typec_switch_unregister(rport->sw);
48200+}
48201+
48202+static int rockchip_usb2phy_host_port_init(struct rockchip_usb2phy *rphy,
48203+					   struct rockchip_usb2phy_port *rport,
48204+					   struct device_node *child_np)
48205+{
48206+	int ret;
48207+	struct regmap *base = get_reg_base(rphy);
48208+
48209+	rport->port_id = USB2PHY_PORT_HOST;
48210+	rport->port_cfg = &rphy->phy_cfg->port_cfgs[USB2PHY_PORT_HOST];
48211+
48212+	/* enter lower power state when suspend */
48213+	rport->low_power_en =
48214+		of_property_read_bool(child_np, "rockchip,low-power-mode");
48215+
48216+	mutex_init(&rport->mutex);
48217+	INIT_DELAYED_WORK(&rport->sm_work, rockchip_usb2phy_sm_work);
48218+
48219+	ret = rockchip_usb2phy_port_irq_init(rphy, rport, child_np);
48220+	if (ret) {
48221+		dev_err(rphy->dev, "failed to init irq for host port\n");
48222+		return ret;
48223+	}
48224+
48225+	/*
48226+	 * Let us put phy-port into suspend mode here for saving power
48227+	 * consumption, and usb controller will resume it during probe
48228+	 * time if needed.
48229+	 */
48230+	ret = property_enable(base, &rport->port_cfg->phy_sus, true);
48231+	if (ret)
48232+		return ret;
48233+	rport->suspended = true;
48234+
48235+	return 0;
48236+}
48237+
48238+static int rockchip_otg_event(struct notifier_block *nb,
48239+			      unsigned long event, void *ptr)
48240+{
48241+	struct rockchip_usb2phy_port *rport =
48242+		container_of(nb, struct rockchip_usb2phy_port, event_nb);
48243+
48244+	schedule_delayed_work(&rport->otg_sm_work, OTG_SCHEDULE_DELAY);
48245+
48246+	return NOTIFY_DONE;
48247+}
48248+
48249+static int rockchip_usb2phy_otg_port_init(struct rockchip_usb2phy *rphy,
48250+					  struct rockchip_usb2phy_port *rport,
48251+					  struct device_node *child_np)
48252+{
48253+	int ret;
48254+	int iddig;
48255+	struct regmap *base = get_reg_base(rphy);
48256+
48257+	rport->port_id = USB2PHY_PORT_OTG;
48258+	rport->port_cfg = &rphy->phy_cfg->port_cfgs[USB2PHY_PORT_OTG];
48259+	rport->state = OTG_STATE_UNDEFINED;
48260+	rport->vbus_attached = false;
48261+	rport->vbus_enabled = false;
48262+	rport->perip_connected = false;
48263+	rport->prev_iddig = true;
48264+
48265+	mutex_init(&rport->mutex);
48266+
48267+	/* bypass uart function is only used in debug stage. */
48268+	rport->bypass_uart_en =
48269+		of_property_read_bool(child_np, "rockchip,bypass-uart");
48270+	rport->vbus_always_on =
48271+		of_property_read_bool(child_np, "rockchip,vbus-always-on");
48272+	rport->utmi_avalid =
48273+		of_property_read_bool(child_np, "rockchip,utmi-avalid");
48274+
48275+	/* enter lower power state when suspend */
48276+	rport->low_power_en =
48277+		of_property_read_bool(child_np, "rockchip,low-power-mode");
48278+
48279+	/* For type-c with vbus_det always pull up */
48280+	rport->typec_vbus_det =
48281+		of_property_read_bool(child_np, "rockchip,typec-vbus-det");
48282+
48283+	rport->sel_pipe_phystatus =
48284+		of_property_read_bool(child_np, "rockchip,sel-pipe-phystatus");
48285+
48286+	if (rport->sel_pipe_phystatus) {
48287+		rphy->usbctrl_grf =
48288+			syscon_regmap_lookup_by_phandle(rphy->dev->of_node,
48289+							"rockchip,usbctrl-grf");
48290+		if (IS_ERR(rphy->usbctrl_grf)) {
48291+			dev_err(rphy->dev, "Failed to map usbctrl-grf\n");
48292+			return PTR_ERR(rphy->usbctrl_grf);
48293+		}
48294+	}
48295+
48296+	/* Get Vbus regulators */
48297+	rport->vbus = devm_regulator_get_optional(&rport->phy->dev, "vbus");
48298+	if (IS_ERR(rport->vbus)) {
48299+		ret = PTR_ERR(rport->vbus);
48300+		if (ret == -EPROBE_DEFER)
48301+			return ret;
48302+
48303+		if (rport->mode == USB_DR_MODE_OTG)
48304+			dev_warn(&rport->phy->dev, "No vbus specified for otg port\n");
48305+		rport->vbus = NULL;
48306+	}
48307+
48308+	rport->mode = of_usb_get_dr_mode_by_phy(child_np, -1);
48309+	iddig = property_enabled(rphy->grf, &rport->port_cfg->utmi_iddig);
48310+	if (rphy->edev_self && (rport->mode == USB_DR_MODE_HOST ||
48311+	    rport->mode == USB_DR_MODE_UNKNOWN || !iddig)) {
48312+		/* Enable VBUS supply for otg port */
48313+		extcon_set_state(rphy->edev, EXTCON_USB, false);
48314+		extcon_set_state(rphy->edev, EXTCON_USB_HOST, true);
48315+		extcon_set_state(rphy->edev, EXTCON_USB_VBUS_EN, true);
48316+		ret = rockchip_set_vbus_power(rport, true);
48317+		if (ret)
48318+			return ret;
48319+	}
48320+
48321+	ret = rockchip_usb2phy_port_irq_init(rphy, rport, child_np);
48322+	if (ret) {
48323+		dev_err(rphy->dev, "failed to init irq for otg port\n");
48324+		return ret;
48325+	}
48326+
48327+	if (IS_REACHABLE(CONFIG_TYPEC) &&
48328+	    device_property_present(rphy->dev, "orientation-switch")) {
48329+		ret = rockchip_usb2phy_setup_orien_switch(rphy, rport);
48330 		if (ret)
48331+			return ret;
48332+
48333+		ret = devm_add_action_or_reset(rphy->dev,
48334+					       rockchip_usb2phy_orien_switch_unregister,
48335+					       rport);
48336+		if (ret)
48337+			return ret;
48338+	}
48339+
48340+	/*
48341+	 * Set the utmi bvalid come from the usb phy or grf.
48342+	 * For most of Rockchip SoCs, them have VBUSDET pin
48343+	 * for the usb phy to detect the USB VBUS and set
48344+	 * the bvalid signal, so select the bvalid from the
48345+	 * usb phy by default. And for those SoCs which don't
48346+	 * have VBUSDET pin (e.g. RV1103), it needs to select
48347+	 * the bvaid from the grf and set bvalid to be valid
48348+	 * (high) by default.
48349+	 */
48350+	if (rport->port_cfg->bvalid_grf_sel.enable != 0) {
48351+		if (of_machine_is_compatible("rockchip,rv1103"))
48352+			property_enable(base, &rport->port_cfg->bvalid_grf_sel, true);
48353+		else
48354+			property_enable(base, &rport->port_cfg->bvalid_grf_sel, false);
48355+	}
48356+
48357+	if (rport->vbus_always_on)
48358+		extcon_set_state(rphy->edev, EXTCON_USB, true);
48359+
48360+	if (rport->vbus_always_on || rport->mode == USB_DR_MODE_HOST ||
48361+	    rport->mode == USB_DR_MODE_UNKNOWN)
48362+		goto out;
48363+
48364+	wake_lock_init(&rport->wakelock, WAKE_LOCK_SUSPEND, "rockchip_otg");
48365+	INIT_DELAYED_WORK(&rport->bypass_uart_work,
48366+			  rockchip_usb_bypass_uart_work);
48367+	INIT_DELAYED_WORK(&rport->chg_work, rockchip_chg_detect_work);
48368+	INIT_DELAYED_WORK(&rport->otg_sm_work, rockchip_usb2phy_otg_sm_work);
48369+
48370+	if (!IS_ERR(rphy->edev)) {
48371+		rport->event_nb.notifier_call = rockchip_otg_event;
48372+
48373+		ret = devm_extcon_register_notifier(rphy->dev, rphy->edev,
48374+					EXTCON_USB_HOST, &rport->event_nb);
48375+		if (ret) {
48376 			dev_err(rphy->dev, "register USB HOST notifier failed\n");
48377+			goto err;
48378+		}
48379 	}
48380 
48381 out:
48382+	/*
48383+	 * Let us put phy-port into suspend mode here for saving power
48384+	 * consumption, and usb controller will resume it during probe
48385+	 * time if needed.
48386+	 */
48387+	ret = property_enable(base, &rport->port_cfg->phy_sus, true);
48388+	if (ret)
48389+		return ret;
48390+	rport->suspended = true;
48391+
48392+	return 0;
48393+
48394+err:
48395+	wake_lock_destroy(&rport->wakelock);
48396 	return ret;
48397 }
48398 
48399@@ -1068,10 +2183,12 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
48400 	struct device_node *child_np;
48401 	struct phy_provider *provider;
48402 	struct rockchip_usb2phy *rphy;
48403+	struct resource *res;
48404 	const struct rockchip_usb2phy_cfg *phy_cfgs;
48405 	const struct of_device_id *match;
48406 	unsigned int reg;
48407-	int index, ret;
48408+	unsigned int index;
48409+	int ret;
48410 
48411 	rphy = devm_kzalloc(dev, sizeof(*rphy), GFP_KERNEL);
48412 	if (!rphy)
48413@@ -1083,33 +2200,51 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
48414 		return -EINVAL;
48415 	}
48416 
48417-	if (!dev->parent || !dev->parent->of_node)
48418-		return -EINVAL;
48419+	if (!dev->parent || !dev->parent->of_node) {
48420+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
48421+		if (!res) {
48422+			dev_err(dev, "missing memory resource\n");
48423+			return -ENODEV;
48424+		}
48425 
48426-	rphy->grf = syscon_node_to_regmap(dev->parent->of_node);
48427-	if (IS_ERR(rphy->grf))
48428-		return PTR_ERR(rphy->grf);
48429+		rphy->phy_base = devm_ioremap_resource(dev, res);
48430+		if (IS_ERR(rphy->phy_base))
48431+			return PTR_ERR(rphy->phy_base);
48432 
48433-	if (of_device_is_compatible(np, "rockchip,rv1108-usb2phy")) {
48434-		rphy->usbgrf =
48435-			syscon_regmap_lookup_by_phandle(dev->of_node,
48436-							"rockchip,usbgrf");
48437-		if (IS_ERR(rphy->usbgrf))
48438-			return PTR_ERR(rphy->usbgrf);
48439+		rphy->grf = syscon_regmap_lookup_by_phandle(np,
48440+							    "rockchip,usbgrf");
48441+		if (IS_ERR(rphy->grf))
48442+			return PTR_ERR(rphy->grf);
48443+
48444+		reg = res->start;
48445 	} else {
48446-		rphy->usbgrf = NULL;
48447-	}
48448+		rphy->grf = syscon_node_to_regmap(dev->parent->of_node);
48449+		if (IS_ERR(rphy->grf))
48450+			return PTR_ERR(rphy->grf);
48451 
48452-	if (of_property_read_u32(np, "reg", &reg)) {
48453-		dev_err(dev, "the reg property is not assigned in %pOFn node\n",
48454-			np);
48455-		return -EINVAL;
48456+		if (of_device_is_compatible(np, "rockchip,rv1108-usb2phy")) {
48457+			rphy->usbgrf =
48458+				syscon_regmap_lookup_by_phandle(dev->of_node,
48459+							"rockchip,usbgrf");
48460+			if (IS_ERR(rphy->usbgrf))
48461+				return PTR_ERR(rphy->usbgrf);
48462+		} else {
48463+			rphy->usbgrf = NULL;
48464+		}
48465+
48466+		if (of_property_read_u32(np, "reg", &reg)) {
48467+			dev_err(dev, "missing reg property in %s node\n",
48468+				np->name);
48469+			return -EINVAL;
48470+		}
48471 	}
48472 
48473 	rphy->dev = dev;
48474 	phy_cfgs = match->data;
48475 	rphy->chg_state = USB_CHG_STATE_UNDEFINED;
48476 	rphy->chg_type = POWER_SUPPLY_TYPE_UNKNOWN;
48477+	rphy->edev_self = false;
48478+	rphy->irq = platform_get_irq(pdev, 0);
48479 	platform_set_drvdata(pdev, rphy);
48480 
48481 	ret = rockchip_usb2phy_extcon_register(rphy);
48482@@ -1118,113 +2253,778 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
48483 
48484 	/* find out a proper config which can be matched with dt. */
48485 	index = 0;
48486-	while (phy_cfgs[index].reg) {
48487+	do {
48488 		if (phy_cfgs[index].reg == reg) {
48489 			rphy->phy_cfg = &phy_cfgs[index];
48490 			break;
48491 		}
48492 
48493-		++index;
48494-	}
48495+		++index;
48496+	} while (phy_cfgs[index].reg);
48497+
48498+	if (!rphy->phy_cfg) {
48499+		dev_err(dev, "no phy-config can be matched with %pOFn node\n",
48500+			np);
48501+		return -EINVAL;
48502+	}
48503+
48504+	pm_runtime_set_active(dev);
48505+	pm_runtime_enable(dev);
48506+	pm_runtime_get_sync(dev);
48507+
48508+
48509+	rphy->phy_reset = devm_reset_control_get_optional(dev, "phy");
48510+	if (IS_ERR(rphy->phy_reset))
48511+		return PTR_ERR(rphy->phy_reset);
48512+
48513+	ret = devm_clk_bulk_get_all(dev, &rphy->clks);
48514+	if (ret == -EPROBE_DEFER)
48515+		return ret;
48516+
48517+	/* Clocks are optional */
48518+	if (ret < 0)
48519+		rphy->num_clks = 0;
48520+	else
48521+		rphy->num_clks = ret;
48522+
48523+	ret = clk_bulk_prepare_enable(rphy->num_clks, rphy->clks);
48524+	if (ret)
48525+		return ret;
48526+
48527+	if (rphy->phy_cfg->phy_tuning) {
48528+		ret = rphy->phy_cfg->phy_tuning(rphy);
48529+		if (ret)
48530+			goto disable_clks;
48531+	}
48532+
48533+	index = 0;
48534+	for_each_available_child_of_node(np, child_np) {
48535+		struct rockchip_usb2phy_port *rport = &rphy->ports[index];
48536+		struct phy *phy;
48537+
48538+		/* This driver aims to support both otg-port and host-port */
48539+		if (!of_node_name_eq(child_np, "host-port") &&
48540+		    !of_node_name_eq(child_np, "otg-port"))
48541+			goto next_child;
48542+
48543+		phy = devm_phy_create(dev, child_np, &rockchip_usb2phy_ops);
48544+		if (IS_ERR(phy)) {
48545+			dev_err(dev, "failed to create phy\n");
48546+			ret = PTR_ERR(phy);
48547+			goto put_child;
48548+		}
48549+
48550+		rport->phy = phy;
48551+		phy_set_drvdata(rport->phy, rport);
48552+
48553+		/* initialize otg/host port separately */
48554+		if (of_node_name_eq(child_np, "host-port")) {
48555+			ret = rockchip_usb2phy_host_port_init(rphy, rport,
48556+							      child_np);
48557+			if (ret)
48558+				goto put_child;
48559+		} else {
48560+			ret = rockchip_usb2phy_otg_port_init(rphy, rport,
48561+							     child_np);
48562+			if (ret)
48563+				goto put_child;
48564+		}
48565+
48566+next_child:
48567+		/* to prevent out of boundary */
48568+		if (++index >= rphy->phy_cfg->num_ports)
48569+			break;
48570+	}
48571+
48572+	provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
48573+	if (IS_ERR(provider)) {
48574+		dev_err(dev, "Failed to register phy provider\n");
48575+		ret = PTR_ERR(provider);
48576+		goto put_child;
48577+	}
48578+
48579+	/* Attributes */
48580+	ret = sysfs_create_group(&dev->kobj, &usb2_phy_attr_group);
48581+	if (ret) {
48582+		dev_err(dev, "Cannot create sysfs group: %d\n", ret);
48583+		goto put_child;
48584+	}
48585+
48586+	ret = rockchip_usb2phy_clk480m_register(rphy);
48587+	if (ret) {
48588+		dev_err(dev, "failed to register 480m output clock\n");
48589+		goto put_child;
48590+	}
48591+
48592+	if (rphy->irq > 0) {
48593+		ret = devm_request_threaded_irq(rphy->dev, rphy->irq, NULL,
48594+						rockchip_usb2phy_irq,
48595+						IRQF_ONESHOT,
48596+						"rockchip_usb2phy",
48597+						rphy);
48598+		if (ret) {
48599+			dev_err(rphy->dev,
48600+				"failed to request usb2 phy irq handle\n");
48601+			goto put_child;
48602+		}
48603+	}
48604+
48605+	if (of_property_read_bool(np, "wakeup-source"))
48606+		device_init_wakeup(rphy->dev, true);
48607+	else
48608+		device_init_wakeup(rphy->dev, false);
48609+
48610+	return 0;
48611+
48612+put_child:
48613+	of_node_put(child_np);
48614+disable_clks:
48615+	pm_runtime_put_sync(dev);
48616+	pm_runtime_disable(dev);
48617+	clk_bulk_disable_unprepare(rphy->num_clks, rphy->clks);
48618+	return ret;
48619+}
48620+
48621+static int __maybe_unused
48622+rockchip_usb2phy_low_power_enable(struct rockchip_usb2phy *rphy,
48623+				  struct rockchip_usb2phy_port *rport,
48624+				  bool value)
48625+{
48626+	int ret = 0;
48627+
48628+	if (!rport->low_power_en)
48629+		return ret;
48630+
48631+	if (rport->port_id == USB2PHY_PORT_OTG) {
48632+		dev_info(&rport->phy->dev, "set otg port low power state %d\n",
48633+			 value);
48634+		ret = property_enable(rphy->grf, &rport->port_cfg->bypass_bc,
48635+				      value);
48636+		if (ret)
48637+			return ret;
48638+
48639+		ret = property_enable(rphy->grf, &rport->port_cfg->bypass_otg,
48640+				      value);
48641+		if (ret)
48642+			return ret;
48643+
48644+		ret = property_enable(rphy->grf, &rport->port_cfg->vbus_det_en,
48645+				      !value);
48646+	} else if (rport->port_id == USB2PHY_PORT_HOST) {
48647+		dev_info(&rport->phy->dev, "set host port low power state %d\n",
48648+			 value);
48649+
48650+		ret = property_enable(rphy->grf, &rport->port_cfg->bypass_host,
48651+				      value);
48652+	}
48653+
48654+	return ret;
48655+}
48656+
48657+static int rk312x_usb2phy_tuning(struct rockchip_usb2phy *rphy)
48658+{
48659+	int ret;
48660+
48661+	/* Turn off differential receiver in suspend mode */
48662+	ret = regmap_write(rphy->grf, 0x298, 0x00040000);
48663+	if (ret)
48664+		return ret;
48665+
48666+	return 0;
48667+}
48668+
48669+static int rk3228_usb2phy_tuning(struct rockchip_usb2phy *rphy)
48670+{
48671+	int ret = 0;
48672+
48673+	/* Open pre-emphasize in non-chirp state for PHY0 otg port */
48674+	if (rphy->phy_cfg->reg == 0x760)
48675+		ret = regmap_write(rphy->grf, 0x76c, 0x00070004);
48676+
48677+	return ret;
48678+}
48679+
48680+static int rk3366_usb2phy_tuning(struct rockchip_usb2phy *rphy)
48681+{
48682+	unsigned int open_pre_emphasize = 0xffff851f;
48683+	unsigned int eye_height_tuning = 0xffff68c8;
48684+	unsigned int compensation_tuning = 0xffff026e;
48685+	int ret = 0;
48686+
48687+	/* open HS pre-emphasize to expand HS slew rate for each port. */
48688+	ret |= regmap_write(rphy->grf, 0x0780, open_pre_emphasize);
48689+	ret |= regmap_write(rphy->grf, 0x079c, eye_height_tuning);
48690+	ret |= regmap_write(rphy->grf, 0x07b0, open_pre_emphasize);
48691+	ret |= regmap_write(rphy->grf, 0x07cc, eye_height_tuning);
48692+
48693+	/* compensate default tuning reference relate to ODT and etc. */
48694+	ret |= regmap_write(rphy->grf, 0x078c, compensation_tuning);
48695+
48696+	return ret;
48697+}
48698+
48699+static int rk3399_usb2phy_tuning(struct rockchip_usb2phy *rphy)
48700+{
48701+	struct device_node *node = rphy->dev->of_node;
48702+	int ret = 0;
48703+
48704+	if (rphy->phy_cfg->reg == 0xe450) {
48705+		/*
48706+		 * Disable the pre-emphasize in eop state
48707+		 * and chirp state to avoid mis-trigger the
48708+		 * disconnect detection and also avoid hs
48709+		 * handshake fail for PHY0.
48710+		 */
48711+		ret |= regmap_write(rphy->grf, 0x4480,
48712+				    GENMASK(17, 16) | 0x0);
48713+		ret |= regmap_write(rphy->grf, 0x44b4,
48714+				    GENMASK(17, 16) | 0x0);
48715+	} else {
48716+		/*
48717+		 * Disable the pre-emphasize in eop state
48718+		 * and chirp state to avoid mis-trigger the
48719+		 * disconnect detection and also avoid hs
48720+		 * handshake fail for PHY1.
48721+		 */
48722+		ret |= regmap_write(rphy->grf, 0x4500,
48723+				    GENMASK(17, 16) | 0x0);
48724+		ret |= regmap_write(rphy->grf, 0x4534,
48725+				    GENMASK(17, 16) | 0x0);
48726+	}
48727+
48728+	if (!of_property_read_bool(node, "rockchip,u2phy-tuning"))
48729+		return ret;
48730+
48731+	if (rphy->phy_cfg->reg == 0xe450) {
48732+		/*
48733+		 * Set max ODT compensation voltage and
48734+		 * current tuning reference for PHY0.
48735+		 */
48736+		ret |= regmap_write(rphy->grf, 0x448c,
48737+				    GENMASK(23, 16) | 0xe3);
48738+
48739+		/* Set max pre-emphasis level for PHY0 */
48740+		ret |= regmap_write(rphy->grf, 0x44b0,
48741+				    GENMASK(18, 16) | 0x07);
48742+
48743+		/*
48744+		 * Set PHY0 A port squelch trigger point to 125mv
48745+		 */
48746+		ret |= regmap_write(rphy->grf, 0x4480,
48747+				    GENMASK(30, 30) | 0x4000);
48748+	} else {
48749+		/*
48750+		 * Set max ODT compensation voltage and
48751+		 * current tuning reference for PHY1.
48752+		 */
48753+		ret |= regmap_write(rphy->grf, 0x450c,
48754+				    GENMASK(23, 16) | 0xe3);
48755+
48756+		/* Set max pre-emphasis level for PHY1 */
48757+		ret |= regmap_write(rphy->grf, 0x4530,
48758+				    GENMASK(18, 16) | 0x07);
48759+
48760+		/*
48761+		 * Set PHY1 A port squelch trigger point to 125mv
48762+		 */
48763+		ret |= regmap_write(rphy->grf, 0x4500,
48764+				    GENMASK(30, 30) | 0x4000);
48765+	}
48766+
48767+	return ret;
48768+}
48769+
48770+static int rk3568_usb2phy_tuning(struct rockchip_usb2phy *rphy)
48771+{
48772+	int ret = 0;
48773+
48774+	/* Turn off differential receiver by default to save power */
48775+	phy_clear_bits(rphy->phy_base + 0x30, BIT(2));
48776+
48777+	/* Enable otg port pre-emphasis during non-chirp phase */
48778+	phy_update_bits(rphy->phy_base, GENMASK(2, 0), 0x04);
48779+
48780+	/* Enable host port pre-emphasis during non-chirp phase */
48781+	phy_update_bits(rphy->phy_base + 0x0400, GENMASK(2, 0), 0x04);
48782+
48783+	if (rphy->phy_cfg->reg == 0xfe8a0000) {
48784+		/* Set otg port HS eye height to 437.5mv(default is 400mv) */
48785+		phy_update_bits(rphy->phy_base + 0x30, GENMASK(6, 4), (0x06 << 4));
48786+
48787+		/*
48788+		 * Set the bvalid filter time to 10ms
48789+		 * based on the usb2 phy grf pclk 100MHz.
48790+		 */
48791+		ret |= regmap_write(rphy->grf, 0x0048, FILTER_COUNTER);
48792+
48793+		/*
48794+		 * Set the id filter time to 10ms based
48795+		 * on the usb2 phy grf pclk 100MHz.
48796+		 */
48797+		ret |= regmap_write(rphy->grf, 0x004c, FILTER_COUNTER);
48798+	}
48799+
48800+	/* Enable host port (usb3 host1 and usb2 host1) wakeup irq */
48801+	ret |= regmap_write(rphy->grf, 0x000c, 0x80008000);
48802+
48803+	return ret;
48804+}
48805+
48806+static int rv1106_usb2phy_tuning(struct rockchip_usb2phy *rphy)
48807+{
48808+	/* Always enable pre-emphasis in SOF & EOP & chirp & non-chirp state */
48809+	phy_update_bits(rphy->phy_base + 0x30, GENMASK(2, 0), 0x07);
48810+
48811+	if (rockchip_get_cpu_version()) {
48812+		/* Set Tx HS pre_emphasize strength to 3'b001 */
48813+		phy_update_bits(rphy->phy_base + 0x40, GENMASK(5, 3), (0x01 << 3));
48814+	} else {
48815+		/* Set Tx HS pre_emphasize strength to 3'b011 */
48816+		phy_update_bits(rphy->phy_base + 0x40, GENMASK(5, 3), (0x03 << 3));
48817+	}
48818+
48819+	/* Set RX Squelch trigger point configure to 4'b0000(112.5 mV) */
48820+	phy_update_bits(rphy->phy_base + 0x64, GENMASK(6, 3), (0x00 << 3));
48821+
48822+	/* Turn off differential receiver by default to save power */
48823+	phy_clear_bits(rphy->phy_base + 0x100, BIT(6));
48824+
48825+	/* Set 45ohm HS ODT value to 5'b10111 to increase driver strength */
48826+	phy_update_bits(rphy->phy_base + 0x11c, GENMASK(4, 0), 0x17);
48827+
48828+	/* Set Tx HS eye height tuning to 3'b011(462 mV)*/
48829+	phy_update_bits(rphy->phy_base + 0x124, GENMASK(4, 2), (0x03 << 2));
48830+
48831+	/* Bypass Squelch detector calibration */
48832+	phy_update_bits(rphy->phy_base + 0x1a4, GENMASK(7, 4), (0x01 << 4));
48833+	phy_update_bits(rphy->phy_base + 0x1b4, GENMASK(7, 4), (0x01 << 4));
48834+
48835+	return 0;
48836+}
48837+
48838+static int rk3568_vbus_detect_control(struct rockchip_usb2phy *rphy, bool en)
48839+{
48840+	if (en) {
48841+		/* Enable vbus voltage level detection function */
48842+		phy_clear_bits(rphy->phy_base + 0x3c, BIT(7));
48843+	} else {
48844+		/* Disable vbus voltage level detection function */
48845+		phy_set_bits(rphy->phy_base + 0x3c, BIT(7));
48846+	}
48847+
48848+	return 0;
48849+}
48850+
48851+static int rk3588_usb2phy_tuning(struct rockchip_usb2phy *rphy)
48852+{
48853+	unsigned int reg;
48854+	int ret = 0;
48855+
48856+	/* Read the SIDDQ control register */
48857+	ret = regmap_read(rphy->grf, 0x0008, &reg);
48858+	if (ret)
48859+		return ret;
48860+
48861+	if (reg & BIT(13)) {
48862+		/* Deassert SIDDQ to power on analog block */
48863+		ret = regmap_write(rphy->grf, 0x0008,
48864+				   GENMASK(29, 29) | 0x0000);
48865+		if (ret)
48866+			return ret;
48867+
48868+		/* Do reset after exit IDDQ mode */
48869+		ret = rockchip_usb2phy_reset(rphy);
48870+		if (ret)
48871+			return ret;
48872+	}
48873+
48874+	if (rphy->phy_cfg->reg == 0x0000) {
48875+		/*
48876+		 * Set USB2 PHY0 suspend configuration for USB3_0
48877+		 * 1. Set utmi_termselect to 1'b1 (en FS terminations)
48878+		 * 2. Set utmi_xcvrselect to 2'b01 (FS transceiver)
48879+		 * 3. Set utmi_opmode to 2'b01 (no-driving)
48880+		 */
48881+		ret |= regmap_write(rphy->grf, 0x000c,
48882+				    GENMASK(20, 16) | 0x0015);
48883+
48884+		/* HS DC Voltage Level Adjustment 4'b1001 : +5.89% */
48885+		ret |= regmap_write(rphy->grf, 0x0004,
48886+				   GENMASK(27, 24) | 0x0900);
48887+
48888+		/* HS Transmitter Pre-Emphasis Current Control 2'b10 : 2x */
48889+		ret |= regmap_write(rphy->grf, 0x0008,
48890+				   GENMASK(20, 19) | 0x0010);
48891+
48892+		/* Pullup iddig pin for USB3_0 OTG mode */
48893+		ret |= regmap_write(rphy->grf, 0x0010,
48894+				    GENMASK(17, 16) | 0x0003);
48895+	} else if (rphy->phy_cfg->reg == 0x4000) {
48896+		/*
48897+		 * Set USB2 PHY1 suspend configuration for USB3_1
48898+		 * 1. Set utmi_termselect to 1'b1 (en FS terminations)
48899+		 * 2. Set utmi_xcvrselect to 2'b01(FS transceiver)
48900+		 * 3. Set utmi_opmode to 2'b01 (no-driving)
48901+		 */
48902+		ret |= regmap_write(rphy->grf, 0x000c,
48903+				    GENMASK(20, 16) | 0x0015);
48904+
48905+		/* HS DC Voltage Level Adjustment 4'b1001 : +5.89% */
48906+		ret |= regmap_write(rphy->grf, 0x0004,
48907+				   GENMASK(27, 24) | 0x0900);
48908+
48909+		/* HS Transmitter Pre-Emphasis Current Control 2'b10 : 2x */
48910+		ret |= regmap_write(rphy->grf, 0x0008,
48911+				   GENMASK(20, 19) | 0x0010);
48912+
48913+		/* Pullup iddig pin for USB3_1 OTG mode */
48914+		ret |= regmap_write(rphy->grf, 0x0010,
48915+				    GENMASK(17, 16) | 0x0003);
48916+	} else if (rphy->phy_cfg->reg == 0x8000) {
48917+		/*
48918+		 * Set USB2 PHY2 suspend configuration for USB2_0
48919+		 * 1. Set utmi_termselect to 1'b1 (en FS terminations)
48920+		 * 2. Set utmi_xcvrselect to 2'b01(FS transceiver)
48921+		 * 3. Set utmi_opmode to 2'b00 (normal)
48922+		 */
48923+		ret |= regmap_write(rphy->grf, 0x000c,
48924+				    GENMASK(20, 16) | 0x0014);
48925+
48926+		/* HS DC Voltage Level Adjustment 4'b1001 : +5.89% */
48927+		ret |= regmap_write(rphy->grf, 0x0004,
48928+				   GENMASK(27, 24) | 0x0900);
48929+
48930+		/* HS Transmitter Pre-Emphasis Current Control 2'b10 : 2x */
48931+		ret |= regmap_write(rphy->grf, 0x0008,
48932+				   GENMASK(20, 19) | 0x0010);
48933+	} else if (rphy->phy_cfg->reg == 0xc000) {
48934+		/*
48935+		 * Set USB2 PHY3 suspend configuration for USB2_1
48936+		 * 1. Set utmi_termselect to 1'b1 (en FS terminations)
48937+		 * 2. Set utmi_xcvrselect to 2'b01(FS transceiver)
48938+		 * 3. Set utmi_opmode to 2'b00 (normal)
48939+		 */
48940+		ret |= regmap_write(rphy->grf, 0x000c,
48941+				    GENMASK(20, 16) | 0x0014);
48942+
48943+		/* HS DC Voltage Level Adjustment 4'b1001 : +5.89% */
48944+		ret |= regmap_write(rphy->grf, 0x0004,
48945+				   GENMASK(27, 24) | 0x0900);
48946+
48947+		/* HS Transmitter Pre-Emphasis Current Control 2'b10 : 2x */
48948+		ret |= regmap_write(rphy->grf, 0x0008,
48949+				   GENMASK(20, 19) | 0x0010);
48950+	}
48951+
48952+	return ret;
48953+}
48954+
48955+#ifdef CONFIG_PM_SLEEP
48956+static int rockchip_usb2phy_pm_suspend(struct device *dev)
48957+{
48958+	struct rockchip_usb2phy *rphy = dev_get_drvdata(dev);
48959+	const struct rockchip_usb2phy_cfg *phy_cfg = rphy->phy_cfg;
48960+	struct rockchip_usb2phy_port *rport;
48961+	unsigned int index;
48962+	int ret = 0;
48963+	bool wakeup_enable = false;
48964+
48965+	if (device_may_wakeup(rphy->dev))
48966+		wakeup_enable = true;
48967+
48968+	/*
48969+	 * Set the linestate filter time to 1ms based
48970+	 * on the usb2 phy grf pclk 32KHz on suspend.
48971+	 */
48972+	if (phy_cfg->ls_filter_con.enable) {
48973+		ret = regmap_write(rphy->grf, phy_cfg->ls_filter_con.offset,
48974+				   phy_cfg->ls_filter_con.enable);
48975+		if (ret)
48976+			dev_err(rphy->dev, "failed to set ls filter %d\n", ret);
48977+	}
48978+
48979+	for (index = 0; index < phy_cfg->num_ports; index++) {
48980+		rport = &rphy->ports[index];
48981+		if (!rport->phy)
48982+			continue;
48983+
48984+		if (rport->port_id == USB2PHY_PORT_OTG &&
48985+		    (rport->id_irq > 0 || rphy->irq > 0)) {
48986+			mutex_lock(&rport->mutex);
48987+			rport->prev_iddig = property_enabled(rphy->grf,
48988+						&rport->port_cfg->utmi_iddig);
48989+			ret = rockchip_usb2phy_enable_id_irq(rphy, rport,
48990+							     false);
48991+			mutex_unlock(&rport->mutex);
48992+			if (ret) {
48993+				dev_err(rphy->dev,
48994+					"failed to disable id irq\n");
48995+				return ret;
48996+			}
48997+		}
48998+
48999+		if (rport->port_id == USB2PHY_PORT_OTG && wakeup_enable &&
49000+		    rport->bvalid_irq > 0)
49001+			enable_irq_wake(rport->bvalid_irq);
49002 
49003-	if (!rphy->phy_cfg) {
49004-		dev_err(dev, "no phy-config can be matched with %pOFn node\n",
49005-			np);
49006-		return -EINVAL;
49007-	}
49008+		/* activate the linestate to detect the next interrupt. */
49009+		mutex_lock(&rport->mutex);
49010+		ret = rockchip_usb2phy_enable_line_irq(rphy, rport, true);
49011+		mutex_unlock(&rport->mutex);
49012+		if (ret) {
49013+			dev_err(rphy->dev, "failed to enable linestate irq\n");
49014+			return ret;
49015+		}
49016 
49017-	rphy->clk = of_clk_get_by_name(np, "phyclk");
49018-	if (!IS_ERR(rphy->clk)) {
49019-		clk_prepare_enable(rphy->clk);
49020-	} else {
49021-		dev_info(&pdev->dev, "no phyclk specified\n");
49022-		rphy->clk = NULL;
49023-	}
49024+		if (wakeup_enable && rport->ls_irq > 0)
49025+			enable_irq_wake(rport->ls_irq);
49026 
49027-	ret = rockchip_usb2phy_clk480m_register(rphy);
49028-	if (ret) {
49029-		dev_err(dev, "failed to register 480m output clock\n");
49030-		goto disable_clks;
49031+		/* enter low power state */
49032+		rockchip_usb2phy_low_power_enable(rphy, rport, true);
49033 	}
49034 
49035-	index = 0;
49036-	for_each_available_child_of_node(np, child_np) {
49037-		struct rockchip_usb2phy_port *rport = &rphy->ports[index];
49038-		struct phy *phy;
49039+	if (wakeup_enable && rphy->irq > 0)
49040+		enable_irq_wake(rphy->irq);
49041 
49042-		/* This driver aims to support both otg-port and host-port */
49043-		if (!of_node_name_eq(child_np, "host-port") &&
49044-		    !of_node_name_eq(child_np, "otg-port"))
49045-			goto next_child;
49046+	return ret;
49047+}
49048 
49049-		phy = devm_phy_create(dev, child_np, &rockchip_usb2phy_ops);
49050-		if (IS_ERR(phy)) {
49051-			dev_err(dev, "failed to create phy\n");
49052-			ret = PTR_ERR(phy);
49053-			goto put_child;
49054-		}
49055+static int rockchip_usb2phy_pm_resume(struct device *dev)
49056+{
49057+	struct rockchip_usb2phy *rphy = dev_get_drvdata(dev);
49058+	const struct rockchip_usb2phy_cfg *phy_cfg = rphy->phy_cfg;
49059+	struct rockchip_usb2phy_port *rport;
49060+	unsigned int index;
49061+	bool iddig;
49062+	int ret = 0;
49063+	bool wakeup_enable = false;
49064 
49065-		rport->phy = phy;
49066-		phy_set_drvdata(rport->phy, rport);
49067+	if (device_may_wakeup(rphy->dev))
49068+		wakeup_enable = true;
49069 
49070-		/* initialize otg/host port separately */
49071-		if (of_node_name_eq(child_np, "host-port")) {
49072-			ret = rockchip_usb2phy_host_port_init(rphy, rport,
49073-							      child_np);
49074-			if (ret)
49075-				goto put_child;
49076-		} else {
49077-			ret = rockchip_usb2phy_otg_port_init(rphy, rport,
49078-							     child_np);
49079-			if (ret)
49080-				goto put_child;
49081-		}
49082+	/*
49083+	 * PHY lost power in suspend, it needs to reset
49084+	 * PHY to recovery clock to usb controller.
49085+	 */
49086+	if (!wakeup_enable)
49087+		rockchip_usb2phy_reset(rphy);
49088 
49089-next_child:
49090-		/* to prevent out of boundary */
49091-		if (++index >= rphy->phy_cfg->num_ports)
49092-			break;
49093+	if (phy_cfg->phy_tuning)
49094+		ret = phy_cfg->phy_tuning(rphy);
49095+
49096+	if (phy_cfg->ls_filter_con.disable) {
49097+		ret = regmap_write(rphy->grf, phy_cfg->ls_filter_con.offset,
49098+				   phy_cfg->ls_filter_con.disable);
49099+		if (ret)
49100+			dev_err(rphy->dev, "failed to set ls filter %d\n", ret);
49101 	}
49102 
49103-	provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
49104-	return PTR_ERR_OR_ZERO(provider);
49105+	for (index = 0; index < phy_cfg->num_ports; index++) {
49106+		rport = &rphy->ports[index];
49107+		if (!rport->phy)
49108+			continue;
49109+
49110+		if (rport->port_id == USB2PHY_PORT_OTG &&
49111+		    (rport->id_irq > 0 || rphy->irq > 0)) {
49112+			mutex_lock(&rport->mutex);
49113+			iddig = property_enabled(rphy->grf,
49114+						 &rport->port_cfg->utmi_iddig);
49115+			ret = rockchip_usb2phy_enable_id_irq(rphy, rport,
49116+							     true);
49117+			mutex_unlock(&rport->mutex);
49118+			if (ret) {
49119+				dev_err(rphy->dev,
49120+					"failed to enable id irq\n");
49121+				return ret;
49122+			}
49123 
49124-put_child:
49125-	of_node_put(child_np);
49126-disable_clks:
49127-	if (rphy->clk) {
49128-		clk_disable_unprepare(rphy->clk);
49129-		clk_put(rphy->clk);
49130+			if (iddig != rport->prev_iddig) {
49131+				dev_dbg(&rport->phy->dev,
49132+					"iddig changed during resume\n");
49133+				rport->prev_iddig = iddig;
49134+				extcon_set_state_sync(rphy->edev,
49135+						      EXTCON_USB_HOST,
49136+						      !iddig);
49137+				extcon_set_state_sync(rphy->edev,
49138+						      EXTCON_USB_VBUS_EN,
49139+						      !iddig);
49140+				ret = rockchip_set_vbus_power(rport, !iddig);
49141+				if (ret)
49142+					return ret;
49143+			}
49144+		}
49145+
49146+		if (rport->port_id == USB2PHY_PORT_OTG && wakeup_enable &&
49147+		    rport->bvalid_irq > 0)
49148+			disable_irq_wake(rport->bvalid_irq);
49149+
49150+		if (wakeup_enable && rport->ls_irq > 0)
49151+			disable_irq_wake(rport->ls_irq);
49152+
49153+		/* exit low power state */
49154+		rockchip_usb2phy_low_power_enable(rphy, rport, false);
49155 	}
49156+
49157+	if (wakeup_enable && rphy->irq > 0)
49158+		disable_irq_wake(rphy->irq);
49159+
49160 	return ret;
49161 }
49162 
49163+static const struct dev_pm_ops rockchip_usb2phy_dev_pm_ops = {
49164+	SET_SYSTEM_SLEEP_PM_OPS(rockchip_usb2phy_pm_suspend,
49165+				rockchip_usb2phy_pm_resume)
49166+};
49167+
49168+#define ROCKCHIP_USB2PHY_DEV_PM	(&rockchip_usb2phy_dev_pm_ops)
49169+#else
49170+#define ROCKCHIP_USB2PHY_DEV_PM	NULL
49171+#endif /* CONFIG_PM_SLEEP */
49172+
49173+static const struct rockchip_usb2phy_cfg rk1808_phy_cfgs[] = {
49174+	{
49175+		.reg = 0x100,
49176+		.num_ports	= 2,
49177+		.clkout_ctl	= { 0x108, 4, 4, 1, 0 },
49178+		.port_cfgs	= {
49179+			[USB2PHY_PORT_OTG] = {
49180+				.phy_sus	= { 0x0100, 8, 0, 0, 0x1d1 },
49181+				.bvalid_det_en	= { 0x0110, 2, 2, 0, 1 },
49182+				.bvalid_det_st	= { 0x0114, 2, 2, 0, 1 },
49183+				.bvalid_det_clr = { 0x0118, 2, 2, 0, 1 },
49184+				.bypass_dm_en	= { 0x0108, 2, 2, 0, 1},
49185+				.bypass_sel	= { 0x0108, 3, 3, 0, 1},
49186+				.iddig_output	= { 0x0100, 10, 10, 0, 1 },
49187+				.iddig_en	= { 0x0100, 9, 9, 0, 1 },
49188+				.idfall_det_en	= { 0x0110, 5, 5, 0, 1 },
49189+				.idfall_det_st	= { 0x0114, 5, 5, 0, 1 },
49190+				.idfall_det_clr = { 0x0118, 5, 5, 0, 1 },
49191+				.idrise_det_en	= { 0x0110, 4, 4, 0, 1 },
49192+				.idrise_det_st	= { 0x0114, 4, 4, 0, 1 },
49193+				.idrise_det_clr = { 0x0118, 4, 4, 0, 1 },
49194+				.ls_det_en	= { 0x0110, 0, 0, 0, 1 },
49195+				.ls_det_st	= { 0x0114, 0, 0, 0, 1 },
49196+				.ls_det_clr	= { 0x0118, 0, 0, 0, 1 },
49197+				.utmi_avalid	= { 0x0120, 10, 10, 0, 1 },
49198+				.utmi_bvalid	= { 0x0120, 9, 9, 0, 1 },
49199+				.utmi_iddig	= { 0x0120, 6, 6, 0, 1 },
49200+				.utmi_ls	= { 0x0120, 5, 4, 0, 1 },
49201+				.vbus_det_en	= { 0x001c, 15, 15, 1, 0 },
49202+			},
49203+			[USB2PHY_PORT_HOST] = {
49204+				.phy_sus	= { 0x104, 8, 0, 0, 0x1d1 },
49205+				.ls_det_en	= { 0x110, 1, 1, 0, 1 },
49206+				.ls_det_st	= { 0x114, 1, 1, 0, 1 },
49207+				.ls_det_clr	= { 0x118, 1, 1, 0, 1 },
49208+				.utmi_ls	= { 0x120, 17, 16, 0, 1 },
49209+				.utmi_hstdet	= { 0x120, 19, 19, 0, 1 }
49210+			}
49211+		},
49212+		.chg_det = {
49213+			.chg_mode	= { 0x0100, 8, 0, 0, 0x1d7 },
49214+			.cp_det		= { 0x0120, 24, 24, 0, 1 },
49215+			.dcp_det	= { 0x0120, 23, 23, 0, 1 },
49216+			.dp_det		= { 0x0120, 25, 25, 0, 1 },
49217+			.idm_sink_en	= { 0x0108, 8, 8, 0, 1 },
49218+			.idp_sink_en	= { 0x0108, 7, 7, 0, 1 },
49219+			.idp_src_en	= { 0x0108, 9, 9, 0, 1 },
49220+			.rdm_pdwn_en	= { 0x0108, 10, 10, 0, 1 },
49221+			.vdm_src_en	= { 0x0108, 12, 12, 0, 1 },
49222+			.vdp_src_en	= { 0x0108, 11, 11, 0, 1 },
49223+		},
49224+	},
49225+	{ /* sentinel */ }
49226+};
49227+
49228+static const struct rockchip_usb2phy_cfg rk312x_phy_cfgs[] = {
49229+	{
49230+		.reg = 0x17c,
49231+		.num_ports	= 2,
49232+		.phy_tuning	= rk312x_usb2phy_tuning,
49233+		.clkout_ctl	= { 0x0190, 15, 15, 1, 0 },
49234+		.port_cfgs	= {
49235+			[USB2PHY_PORT_OTG] = {
49236+				.phy_sus	= { 0x017c, 8, 0, 0, 0x1d1 },
49237+				.bvalid_det_en	= { 0x017c, 14, 14, 0, 1 },
49238+				.bvalid_det_st	= { 0x017c, 15, 15, 0, 1 },
49239+				.bvalid_det_clr	= { 0x017c, 15, 15, 0, 1 },
49240+				.bypass_dm_en	= { 0x0190, 12, 12, 0, 1},
49241+				.bypass_sel	= { 0x0190, 13, 13, 0, 1},
49242+				.iddig_output	= { 0x017c, 10, 10, 0, 1 },
49243+				.iddig_en	= { 0x017c, 9, 9, 0, 1 },
49244+				.idfall_det_en  = { 0x01a0, 2, 2, 0, 1 },
49245+				.idfall_det_st  = { 0x01a0, 3, 3, 0, 1 },
49246+				.idfall_det_clr = { 0x01a0, 3, 3, 0, 1 },
49247+				.idrise_det_en  = { 0x01a0, 0, 0, 0, 1 },
49248+				.idrise_det_st  = { 0x01a0, 1, 1, 0, 1 },
49249+				.idrise_det_clr = { 0x01a0, 1, 1, 0, 1 },
49250+				.ls_det_en	= { 0x017c, 12, 12, 0, 1 },
49251+				.ls_det_st	= { 0x017c, 13, 13, 0, 1 },
49252+				.ls_det_clr	= { 0x017c, 13, 13, 0, 1 },
49253+				.utmi_bvalid	= { 0x014c, 5, 5, 0, 1 },
49254+				.utmi_iddig	= { 0x014c, 8, 8, 0, 1 },
49255+				.utmi_ls	= { 0x014c, 7, 6, 0, 1 },
49256+			},
49257+			[USB2PHY_PORT_HOST] = {
49258+				.phy_sus	= { 0x0194, 8, 0, 0, 0x1d1 },
49259+				.ls_det_en	= { 0x0194, 14, 14, 0, 1 },
49260+				.ls_det_st	= { 0x0194, 15, 15, 0, 1 },
49261+				.ls_det_clr	= { 0x0194, 15, 15, 0, 1 }
49262+			}
49263+		},
49264+		.chg_det = {
49265+			.chg_mode	= { 0x017c, 8, 0, 0, 0x1d7 },
49266+			.cp_det		= { 0x02c0, 6, 6, 0, 1 },
49267+			.dcp_det	= { 0x02c0, 5, 5, 0, 1 },
49268+			.dp_det		= { 0x02c0, 7, 7, 0, 1 },
49269+			.idm_sink_en	= { 0x0184, 8, 8, 0, 1 },
49270+			.idp_sink_en	= { 0x0184, 7, 7, 0, 1 },
49271+			.idp_src_en	= { 0x0184, 9, 9, 0, 1 },
49272+			.rdm_pdwn_en	= { 0x0184, 10, 10, 0, 1 },
49273+			.vdm_src_en	= { 0x0184, 12, 12, 0, 1 },
49274+			.vdp_src_en	= { 0x0184, 11, 11, 0, 1 },
49275+		},
49276+	},
49277+	{ /* sentinel */ }
49278+};
49279+
49280 static const struct rockchip_usb2phy_cfg rk3228_phy_cfgs[] = {
49281 	{
49282 		.reg = 0x760,
49283 		.num_ports	= 2,
49284+		.phy_tuning	= rk3228_usb2phy_tuning,
49285 		.clkout_ctl	= { 0x0768, 4, 4, 1, 0 },
49286 		.port_cfgs	= {
49287 			[USB2PHY_PORT_OTG] = {
49288-				.phy_sus	= { 0x0760, 15, 0, 0, 0x1d1 },
49289+				.phy_sus	= { 0x0760, 8, 0, 0, 0x1d1 },
49290 				.bvalid_det_en	= { 0x0680, 3, 3, 0, 1 },
49291 				.bvalid_det_st	= { 0x0690, 3, 3, 0, 1 },
49292 				.bvalid_det_clr	= { 0x06a0, 3, 3, 0, 1 },
49293+				.iddig_output   = { 0x0760, 10, 10, 0, 1 },
49294+				.iddig_en       = { 0x0760, 9, 9, 0, 1 },
49295+				.idfall_det_en	= { 0x0680, 6, 6, 0, 1 },
49296+				.idfall_det_st	= { 0x0690, 6, 6, 0, 1 },
49297+				.idfall_det_clr	= { 0x06a0, 6, 6, 0, 1 },
49298+				.idrise_det_en	= { 0x0680, 5, 5, 0, 1 },
49299+				.idrise_det_st	= { 0x0690, 5, 5, 0, 1 },
49300+				.idrise_det_clr	= { 0x06a0, 5, 5, 0, 1 },
49301 				.ls_det_en	= { 0x0680, 2, 2, 0, 1 },
49302 				.ls_det_st	= { 0x0690, 2, 2, 0, 1 },
49303 				.ls_det_clr	= { 0x06a0, 2, 2, 0, 1 },
49304 				.utmi_bvalid	= { 0x0480, 4, 4, 0, 1 },
49305+				.utmi_iddig	= { 0x0480, 1, 1, 0, 1 },
49306 				.utmi_ls	= { 0x0480, 3, 2, 0, 1 },
49307+				.vbus_det_en	= { 0x0788, 15, 15, 1, 0 },
49308 			},
49309 			[USB2PHY_PORT_HOST] = {
49310-				.phy_sus	= { 0x0764, 15, 0, 0, 0x1d1 },
49311+				.phy_sus	= { 0x0764, 8, 0, 0, 0x1d1 },
49312 				.ls_det_en	= { 0x0680, 4, 4, 0, 1 },
49313 				.ls_det_st	= { 0x0690, 4, 4, 0, 1 },
49314 				.ls_det_clr	= { 0x06a0, 4, 4, 0, 1 }
49315 			}
49316 		},
49317 		.chg_det = {
49318-			.opmode		= { 0x0760, 3, 0, 5, 1 },
49319+			.chg_mode	= { 0x0760, 8, 0, 0, 0x1d7 },
49320 			.cp_det		= { 0x0884, 4, 4, 0, 1 },
49321 			.dcp_det	= { 0x0884, 3, 3, 0, 1 },
49322 			.dp_det		= { 0x0884, 5, 5, 0, 1 },
49323@@ -1242,80 +3042,88 @@ static const struct rockchip_usb2phy_cfg rk3228_phy_cfgs[] = {
49324 		.clkout_ctl	= { 0x0808, 4, 4, 1, 0 },
49325 		.port_cfgs	= {
49326 			[USB2PHY_PORT_OTG] = {
49327-				.phy_sus	= { 0x800, 15, 0, 0, 0x1d1 },
49328-				.ls_det_en	= { 0x0684, 0, 0, 0, 1 },
49329-				.ls_det_st	= { 0x0694, 0, 0, 0, 1 },
49330-				.ls_det_clr	= { 0x06a4, 0, 0, 0, 1 }
49331-			},
49332-			[USB2PHY_PORT_HOST] = {
49333-				.phy_sus	= { 0x804, 15, 0, 0, 0x1d1 },
49334+				.phy_sus	= { 0x804, 8, 0, 0, 0x1d1 },
49335 				.ls_det_en	= { 0x0684, 1, 1, 0, 1 },
49336 				.ls_det_st	= { 0x0694, 1, 1, 0, 1 },
49337 				.ls_det_clr	= { 0x06a4, 1, 1, 0, 1 }
49338+			},
49339+			[USB2PHY_PORT_HOST] = {
49340+				.phy_sus	= { 0x800, 8, 0, 0, 0x1d1 },
49341+				.ls_det_en	= { 0x0684, 0, 0, 0, 1 },
49342+				.ls_det_st	= { 0x0694, 0, 0, 0, 1 },
49343+				.ls_det_clr	= { 0x06a4, 0, 0, 0, 1 }
49344 			}
49345 		},
49346 	},
49347 	{ /* sentinel */ }
49348 };
49349 
49350-static const struct rockchip_usb2phy_cfg rk3328_phy_cfgs[] = {
49351+static const struct rockchip_usb2phy_cfg rk3366_phy_cfgs[] = {
49352 	{
49353-		.reg = 0x100,
49354+		.reg = 0x700,
49355 		.num_ports	= 2,
49356-		.clkout_ctl	= { 0x108, 4, 4, 1, 0 },
49357+		.phy_tuning	= rk3366_usb2phy_tuning,
49358+		.clkout_ctl	= { 0x0724, 15, 15, 1, 0 },
49359 		.port_cfgs	= {
49360-			[USB2PHY_PORT_OTG] = {
49361-				.phy_sus	= { 0x0100, 15, 0, 0, 0x1d1 },
49362-				.bvalid_det_en	= { 0x0110, 2, 2, 0, 1 },
49363-				.bvalid_det_st	= { 0x0114, 2, 2, 0, 1 },
49364-				.bvalid_det_clr = { 0x0118, 2, 2, 0, 1 },
49365-				.ls_det_en	= { 0x0110, 0, 0, 0, 1 },
49366-				.ls_det_st	= { 0x0114, 0, 0, 0, 1 },
49367-				.ls_det_clr	= { 0x0118, 0, 0, 0, 1 },
49368-				.utmi_avalid	= { 0x0120, 10, 10, 0, 1 },
49369-				.utmi_bvalid	= { 0x0120, 9, 9, 0, 1 },
49370-				.utmi_ls	= { 0x0120, 5, 4, 0, 1 },
49371-			},
49372 			[USB2PHY_PORT_HOST] = {
49373-				.phy_sus	= { 0x104, 15, 0, 0, 0x1d1 },
49374-				.ls_det_en	= { 0x110, 1, 1, 0, 1 },
49375-				.ls_det_st	= { 0x114, 1, 1, 0, 1 },
49376-				.ls_det_clr	= { 0x118, 1, 1, 0, 1 },
49377-				.utmi_ls	= { 0x120, 17, 16, 0, 1 },
49378-				.utmi_hstdet	= { 0x120, 19, 19, 0, 1 }
49379+				.phy_sus	= { 0x0728, 8, 0, 0, 0x1d1 },
49380+				.ls_det_en	= { 0x0680, 4, 4, 0, 1 },
49381+				.ls_det_st	= { 0x0690, 4, 4, 0, 1 },
49382+				.ls_det_clr	= { 0x06a0, 4, 4, 0, 1 },
49383+				.utmi_ls	= { 0x049c, 14, 13, 0, 1 },
49384+				.utmi_hstdet	= { 0x049c, 12, 12, 0, 1 }
49385 			}
49386 		},
49387-		.chg_det = {
49388-			.opmode		= { 0x0100, 3, 0, 5, 1 },
49389-			.cp_det		= { 0x0120, 24, 24, 0, 1 },
49390-			.dcp_det	= { 0x0120, 23, 23, 0, 1 },
49391-			.dp_det		= { 0x0120, 25, 25, 0, 1 },
49392-			.idm_sink_en	= { 0x0108, 8, 8, 0, 1 },
49393-			.idp_sink_en	= { 0x0108, 7, 7, 0, 1 },
49394-			.idp_src_en	= { 0x0108, 9, 9, 0, 1 },
49395-			.rdm_pdwn_en	= { 0x0108, 10, 10, 0, 1 },
49396-			.vdm_src_en	= { 0x0108, 12, 12, 0, 1 },
49397-			.vdp_src_en	= { 0x0108, 11, 11, 0, 1 },
49398-		},
49399 	},
49400 	{ /* sentinel */ }
49401 };
49402 
49403-static const struct rockchip_usb2phy_cfg rk3366_phy_cfgs[] = {
49404+static const struct rockchip_usb2phy_cfg rk3368_phy_cfgs[] = {
49405 	{
49406 		.reg = 0x700,
49407 		.num_ports	= 2,
49408 		.clkout_ctl	= { 0x0724, 15, 15, 1, 0 },
49409 		.port_cfgs	= {
49410+			[USB2PHY_PORT_OTG] = {
49411+				.phy_sus	= { 0x0700, 8, 0, 0, 0x1d1 },
49412+				.bvalid_det_en	= { 0x0680, 3, 3, 0, 1 },
49413+				.bvalid_det_st	= { 0x0690, 3, 3, 0, 1 },
49414+				.bvalid_det_clr = { 0x06a0, 3, 3, 0, 1 },
49415+				.iddig_output	= { 0x0700, 10, 10, 0, 1 },
49416+				.iddig_en	= { 0x0700, 9, 9, 0, 1 },
49417+				.idfall_det_en	= { 0x0680, 6, 6, 0, 1 },
49418+				.idfall_det_st	= { 0x0690, 6, 6, 0, 1 },
49419+				.idfall_det_clr	= { 0x06a0, 6, 6, 0, 1 },
49420+				.idrise_det_en	= { 0x0680, 5, 5, 0, 1 },
49421+				.idrise_det_st	= { 0x0690, 5, 5, 0, 1 },
49422+				.idrise_det_clr	= { 0x06a0, 5, 5, 0, 1 },
49423+				.ls_det_en	= { 0x0680, 2, 2, 0, 1 },
49424+				.ls_det_st	= { 0x0690, 2, 2, 0, 1 },
49425+				.ls_det_clr	= { 0x06a0, 2, 2, 0, 1 },
49426+				.utmi_bvalid	= { 0x04bc, 23, 23, 0, 1 },
49427+				.utmi_iddig     = { 0x04bc, 26, 26, 0, 1 },
49428+				.utmi_ls	= { 0x04bc, 25, 24, 0, 1 },
49429+				.vbus_det_en    = { 0x079c, 15, 15, 1, 0 },
49430+			},
49431 			[USB2PHY_PORT_HOST] = {
49432 				.phy_sus	= { 0x0728, 15, 0, 0, 0x1d1 },
49433 				.ls_det_en	= { 0x0680, 4, 4, 0, 1 },
49434 				.ls_det_st	= { 0x0690, 4, 4, 0, 1 },
49435-				.ls_det_clr	= { 0x06a0, 4, 4, 0, 1 },
49436-				.utmi_ls	= { 0x049c, 14, 13, 0, 1 },
49437-				.utmi_hstdet	= { 0x049c, 12, 12, 0, 1 }
49438+				.ls_det_clr	= { 0x06a0, 4, 4, 0, 1 }
49439 			}
49440 		},
49441+		.chg_det = {
49442+			.chg_mode	= { 0x0700, 8, 0, 0, 0x1d7 },
49443+			.cp_det		= { 0x04b8, 30, 30, 0, 1 },
49444+			.dcp_det	= { 0x04b8, 29, 29, 0, 1 },
49445+			.dp_det		= { 0x04b8, 31, 31, 0, 1 },
49446+			.idm_sink_en	= { 0x0718, 8, 8, 0, 1 },
49447+			.idp_sink_en	= { 0x0718, 7, 7, 0, 1 },
49448+			.idp_src_en	= { 0x0718, 9, 9, 0, 1 },
49449+			.rdm_pdwn_en	= { 0x0718, 10, 10, 0, 1 },
49450+			.vdm_src_en	= { 0x0718, 12, 12, 0, 1 },
49451+			.vdp_src_en	= { 0x0718, 11, 11, 0, 1 },
49452+		},
49453 	},
49454 	{ /* sentinel */ }
49455 };
49456@@ -1324,15 +3132,32 @@ static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = {
49457 	{
49458 		.reg		= 0xe450,
49459 		.num_ports	= 2,
49460+		.phy_tuning	= rk3399_usb2phy_tuning,
49461 		.clkout_ctl	= { 0xe450, 4, 4, 1, 0 },
49462 		.port_cfgs	= {
49463 			[USB2PHY_PORT_OTG] = {
49464-				.phy_sus	= { 0xe454, 1, 0, 2, 1 },
49465+				.phy_sus = { 0xe454, 8, 0, 0x052, 0x1d1 },
49466 				.bvalid_det_en	= { 0xe3c0, 3, 3, 0, 1 },
49467 				.bvalid_det_st	= { 0xe3e0, 3, 3, 0, 1 },
49468 				.bvalid_det_clr	= { 0xe3d0, 3, 3, 0, 1 },
49469+				.bypass_dm_en   = { 0xe450, 2, 2, 0, 1 },
49470+				.bypass_sel     = { 0xe450, 3, 3, 0, 1 },
49471+				.iddig_output	= { 0xe454, 10, 10, 0, 1 },
49472+				.iddig_en	= { 0xe454, 9, 9, 0, 1 },
49473+				.idfall_det_en	= { 0xe3c0, 5, 5, 0, 1 },
49474+				.idfall_det_st	= { 0xe3e0, 5, 5, 0, 1 },
49475+				.idfall_det_clr	= { 0xe3d0, 5, 5, 0, 1 },
49476+				.idrise_det_en	= { 0xe3c0, 4, 4, 0, 1 },
49477+				.idrise_det_st	= { 0xe3e0, 4, 4, 0, 1 },
49478+				.idrise_det_clr	= { 0xe3d0, 4, 4, 0, 1 },
49479+				.ls_det_en	= { 0xe3c0, 2, 2, 0, 1 },
49480+				.ls_det_st	= { 0xe3e0, 2, 2, 0, 1 },
49481+				.ls_det_clr	= { 0xe3d0, 2, 2, 0, 1 },
49482 				.utmi_avalid	= { 0xe2ac, 7, 7, 0, 1 },
49483 				.utmi_bvalid	= { 0xe2ac, 12, 12, 0, 1 },
49484+				.utmi_iddig     = { 0xe2ac, 8, 8, 0, 1 },
49485+				.utmi_ls	= { 0xe2ac, 14, 13, 0, 1 },
49486+				.vbus_det_en    = { 0x449c, 15, 15, 1, 0 },
49487 			},
49488 			[USB2PHY_PORT_HOST] = {
49489 				.phy_sus	= { 0xe458, 1, 0, 0x2, 0x1 },
49490@@ -1344,7 +3169,7 @@ static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = {
49491 			}
49492 		},
49493 		.chg_det = {
49494-			.opmode		= { 0xe454, 3, 0, 5, 1 },
49495+			.chg_mode	= { 0xe454, 8, 0, 0, 0x1d7 },
49496 			.cp_det		= { 0xe2ac, 2, 2, 0, 1 },
49497 			.dcp_det	= { 0xe2ac, 1, 1, 0, 1 },
49498 			.dp_det		= { 0xe2ac, 0, 0, 0, 1 },
49499@@ -1359,15 +3184,30 @@ static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = {
49500 	{
49501 		.reg		= 0xe460,
49502 		.num_ports	= 2,
49503+		.phy_tuning	= rk3399_usb2phy_tuning,
49504 		.clkout_ctl	= { 0xe460, 4, 4, 1, 0 },
49505 		.port_cfgs	= {
49506 			[USB2PHY_PORT_OTG] = {
49507-				.phy_sus        = { 0xe464, 1, 0, 2, 1 },
49508+				.phy_sus = { 0xe464, 8, 0, 0x052, 0x1d1 },
49509 				.bvalid_det_en  = { 0xe3c0, 8, 8, 0, 1 },
49510 				.bvalid_det_st  = { 0xe3e0, 8, 8, 0, 1 },
49511 				.bvalid_det_clr = { 0xe3d0, 8, 8, 0, 1 },
49512+				.iddig_output	= { 0xe464, 10, 10, 0, 1 },
49513+				.iddig_en	= { 0xe464, 9, 9, 0, 1 },
49514+				.idfall_det_en	= { 0xe3c0, 10, 10, 0, 1 },
49515+				.idfall_det_st	= { 0xe3e0, 10, 10, 0, 1 },
49516+				.idfall_det_clr	= { 0xe3d0, 10, 10, 0, 1 },
49517+				.idrise_det_en	= { 0xe3c0, 9, 9, 0, 1 },
49518+				.idrise_det_st	= { 0xe3e0, 9, 9, 0, 1 },
49519+				.idrise_det_clr	= { 0xe3d0, 9, 9, 0, 1 },
49520+				.ls_det_en	= { 0xe3c0, 7, 7, 0, 1 },
49521+				.ls_det_st	= { 0xe3e0, 7, 7, 0, 1 },
49522+				.ls_det_clr	= { 0xe3d0, 7, 7, 0, 1 },
49523 				.utmi_avalid	= { 0xe2ac, 10, 10, 0, 1 },
49524 				.utmi_bvalid    = { 0xe2ac, 16, 16, 0, 1 },
49525+				.utmi_iddig     = { 0xe2ac, 11, 11, 0, 1 },
49526+				.utmi_ls	= { 0xe2ac, 18, 17, 0, 1 },
49527+				.vbus_det_en    = { 0x451c, 15, 15, 1, 0 },
49528 			},
49529 			[USB2PHY_PORT_HOST] = {
49530 				.phy_sus	= { 0xe468, 1, 0, 0x2, 0x1 },
49531@@ -1378,6 +3218,304 @@ static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = {
49532 				.utmi_hstdet	= { 0xe2ac, 27, 27, 0, 1 }
49533 			}
49534 		},
49535+		.chg_det = {
49536+			.chg_mode	= { 0xe464, 8, 0, 0, 0x1d7 },
49537+			.cp_det		= { 0xe2ac, 5, 5, 0, 1 },
49538+			.dcp_det	= { 0xe2ac, 4, 4, 0, 1 },
49539+			.dp_det		= { 0xe2ac, 3, 3, 0, 1 },
49540+			.idm_sink_en	= { 0xe460, 8, 8, 0, 1 },
49541+			.idp_sink_en	= { 0xe460, 7, 7, 0, 1 },
49542+			.idp_src_en	= { 0xe460, 9, 9, 0, 1 },
49543+			.rdm_pdwn_en	= { 0xe460, 10, 10, 0, 1 },
49544+			.vdm_src_en	= { 0xe460, 12, 12, 0, 1 },
49545+			.vdp_src_en	= { 0xe460, 11, 11, 0, 1 },
49546+		},
49547+	},
49548+	{ /* sentinel */ }
49549+};
49550+
49551+static const struct rockchip_usb2phy_cfg rk3568_phy_cfgs[] = {
49552+	{
49553+		.reg = 0xfe8a0000,
49554+		.num_ports	= 2,
49555+		.phy_tuning	= rk3568_usb2phy_tuning,
49556+		.vbus_detect	= rk3568_vbus_detect_control,
49557+		.clkout_ctl	= { 0x0008, 4, 4, 1, 0 },
49558+		.ls_filter_con	= { 0x0040, 19, 0, 0x30100, 0x00020 },
49559+		.port_cfgs	= {
49560+			[USB2PHY_PORT_OTG] = {
49561+				.phy_sus	= { 0x0000, 8, 0, 0, 0x1d1 },
49562+				.bvalid_det_en	= { 0x0080, 2, 2, 0, 1 },
49563+				.bvalid_det_st	= { 0x0084, 2, 2, 0, 1 },
49564+				.bvalid_det_clr = { 0x0088, 2, 2, 0, 1 },
49565+				.bvalid_grf_sel	= { 0x0008, 15, 14, 0, 3 },
49566+				.bypass_dm_en	= { 0x0008, 2, 2, 0, 1},
49567+				.bypass_sel	= { 0x0008, 3, 3, 0, 1},
49568+				.iddig_output	= { 0x0000, 10, 10, 0, 1 },
49569+				.iddig_en	= { 0x0000, 9, 9, 0, 1 },
49570+				.idfall_det_en	= { 0x0080, 5, 5, 0, 1 },
49571+				.idfall_det_st	= { 0x0084, 5, 5, 0, 1 },
49572+				.idfall_det_clr = { 0x0088, 5, 5, 0, 1 },
49573+				.idrise_det_en	= { 0x0080, 4, 4, 0, 1 },
49574+				.idrise_det_st	= { 0x0084, 4, 4, 0, 1 },
49575+				.idrise_det_clr = { 0x0088, 4, 4, 0, 1 },
49576+				.ls_det_en	= { 0x0080, 0, 0, 0, 1 },
49577+				.ls_det_st	= { 0x0084, 0, 0, 0, 1 },
49578+				.ls_det_clr	= { 0x0088, 0, 0, 0, 1 },
49579+				.utmi_avalid	= { 0x00c0, 10, 10, 0, 1 },
49580+				.utmi_bvalid	= { 0x00c0, 9, 9, 0, 1 },
49581+				.utmi_iddig	= { 0x00c0, 6, 6, 0, 1 },
49582+				.utmi_ls	= { 0x00c0, 5, 4, 0, 1 },
49583+			},
49584+			[USB2PHY_PORT_HOST] = {
49585+				/* Select suspend control from controller */
49586+				.phy_sus	= { 0x0004, 8, 0, 0x1d2, 0x1d2 },
49587+				.ls_det_en	= { 0x0080, 1, 1, 0, 1 },
49588+				.ls_det_st	= { 0x0084, 1, 1, 0, 1 },
49589+				.ls_det_clr	= { 0x0088, 1, 1, 0, 1 },
49590+				.utmi_ls	= { 0x00c0, 17, 16, 0, 1 },
49591+				.utmi_hstdet	= { 0x00c0, 19, 19, 0, 1 }
49592+			}
49593+		},
49594+		.chg_det = {
49595+			.chg_mode	= { 0x0000, 8, 0, 0, 0x1d7 },
49596+			.cp_det		= { 0x00c0, 24, 24, 0, 1 },
49597+			.dcp_det	= { 0x00c0, 23, 23, 0, 1 },
49598+			.dp_det		= { 0x00c0, 25, 25, 0, 1 },
49599+			.idm_sink_en	= { 0x0008, 8, 8, 0, 1 },
49600+			.idp_sink_en	= { 0x0008, 7, 7, 0, 1 },
49601+			.idp_src_en	= { 0x0008, 9, 9, 0, 1 },
49602+			.rdm_pdwn_en	= { 0x0008, 10, 10, 0, 1 },
49603+			.vdm_src_en	= { 0x0008, 12, 12, 0, 1 },
49604+			.vdp_src_en	= { 0x0008, 11, 11, 0, 1 },
49605+		},
49606+	},
49607+	{
49608+		.reg = 0xfe8b0000,
49609+		.num_ports	= 2,
49610+		.phy_tuning	= rk3568_usb2phy_tuning,
49611+		.clkout_ctl	= { 0x0008, 4, 4, 1, 0 },
49612+		.ls_filter_con	= { 0x0040, 19, 0, 0x30100, 0x00020 },
49613+		.port_cfgs	= {
49614+			[USB2PHY_PORT_OTG] = {
49615+				.phy_sus	= { 0x0000, 8, 0, 0x1d2, 0x1d1 },
49616+				.ls_det_en	= { 0x0080, 0, 0, 0, 1 },
49617+				.ls_det_st	= { 0x0084, 0, 0, 0, 1 },
49618+				.ls_det_clr	= { 0x0088, 0, 0, 0, 1 },
49619+				.utmi_ls	= { 0x00c0, 5, 4, 0, 1 },
49620+				.utmi_hstdet	= { 0x00c0, 7, 7, 0, 1 }
49621+			},
49622+			[USB2PHY_PORT_HOST] = {
49623+				.phy_sus	= { 0x0004, 8, 0, 0x1d2, 0x1d1 },
49624+				.ls_det_en	= { 0x0080, 1, 1, 0, 1 },
49625+				.ls_det_st	= { 0x0084, 1, 1, 0, 1 },
49626+				.ls_det_clr	= { 0x0088, 1, 1, 0, 1 },
49627+				.utmi_ls	= { 0x00c0, 17, 16, 0, 1 },
49628+				.utmi_hstdet	= { 0x00c0, 19, 19, 0, 1 }
49629+			}
49630+		},
49631+	},
49632+	{ /* sentinel */ }
49633+};
49634+
49635+static const struct rockchip_usb2phy_cfg rk3588_phy_cfgs[] = {
49636+	{
49637+		.reg = 0x0000,
49638+		.num_ports	= 1,
49639+		.phy_tuning	= rk3588_usb2phy_tuning,
49640+		.clkout_ctl	= { 0x0000, 0, 0, 1, 0 },
49641+		.ls_filter_con	= { 0x0040, 19, 0, 0x30100, 0x00020 },
49642+		.port_cfgs	= {
49643+			[USB2PHY_PORT_OTG] = {
49644+				.phy_sus	= { 0x000c, 11, 11, 0, 1 },
49645+				.pipe_phystatus	= { 0x001c, 3, 2, 0, 2 },
49646+				.bvalid_det_en	= { 0x0080, 1, 1, 0, 1 },
49647+				.bvalid_det_st	= { 0x0084, 1, 1, 0, 1 },
49648+				.bvalid_det_clr = { 0x0088, 1, 1, 0, 1 },
49649+				.bvalid_grf_sel	= { 0x0010, 3, 3, 0, 1 },
49650+				.bvalid_grf_con	= { 0x0010, 3, 2, 2, 3 },
49651+				.bvalid_phy_con	= { 0x0008, 1, 0, 2, 3 },
49652+				.bypass_dm_en	= { 0x000c, 5, 5, 0, 1 },
49653+				.bypass_sel	= { 0x000c, 6, 6, 0, 1 },
49654+				.iddig_output	= { 0x0010, 0, 0, 0, 1 },
49655+				.iddig_en	= { 0x0010, 1, 1, 0, 1 },
49656+				.idfall_det_en	= { 0x0080, 4, 4, 0, 1 },
49657+				.idfall_det_st	= { 0x0084, 4, 4, 0, 1 },
49658+				.idfall_det_clr = { 0x0088, 4, 4, 0, 1 },
49659+				.idrise_det_en	= { 0x0080, 3, 3, 0, 1 },
49660+				.idrise_det_st	= { 0x0084, 3, 3, 0, 1 },
49661+				.idrise_det_clr = { 0x0088, 3, 3, 0, 1 },
49662+				.ls_det_en	= { 0x0080, 0, 0, 0, 1 },
49663+				.ls_det_st	= { 0x0084, 0, 0, 0, 1 },
49664+				.ls_det_clr	= { 0x0088, 0, 0, 0, 1 },
49665+				.disfall_en	= { 0x0080, 6, 6, 0, 1 },
49666+				.disfall_st	= { 0x0084, 6, 6, 0, 1 },
49667+				.disfall_clr	= { 0x0088, 6, 6, 0, 1 },
49668+				.disrise_en	= { 0x0080, 5, 5, 0, 1 },
49669+				.disrise_st	= { 0x0084, 5, 5, 0, 1 },
49670+				.disrise_clr	= { 0x0088, 5, 5, 0, 1 },
49671+				.utmi_avalid	= { 0x00c0, 7, 7, 0, 1 },
49672+				.utmi_bvalid	= { 0x00c0, 6, 6, 0, 1 },
49673+				.utmi_iddig	= { 0x00c0, 5, 5, 0, 1 },
49674+				.utmi_ls	= { 0x00c0, 10, 9, 0, 1 },
49675+			}
49676+		},
49677+		.chg_det = {
49678+			.chg_mode	= { 0x0008, 2, 2, 0, 1 },
49679+			.cp_det		= { 0x00c0, 0, 0, 0, 1 },
49680+			.dcp_det	= { 0x00c0, 0, 0, 0, 1 },
49681+			.dp_det		= { 0x00c0, 1, 1, 1, 0 },
49682+			.idm_sink_en	= { 0x0008, 5, 5, 1, 0 },
49683+			.idp_sink_en	= { 0x0008, 5, 5, 0, 1 },
49684+			.idp_src_en	= { 0x0008, 14, 14, 0, 1 },
49685+			.rdm_pdwn_en	= { 0x0008, 14, 14, 0, 1 },
49686+			.vdm_src_en	= { 0x0008, 7, 6, 0, 3 },
49687+			.vdp_src_en	= { 0x0008, 7, 6, 0, 3 },
49688+		},
49689+	},
49690+	{
49691+		.reg = 0x4000,
49692+		.num_ports	= 1,
49693+		.phy_tuning	= rk3588_usb2phy_tuning,
49694+		.clkout_ctl	= { 0x0000, 0, 0, 1, 0 },
49695+		.ls_filter_con	= { 0x0040, 19, 0, 0x30100, 0x00020 },
49696+		.port_cfgs	= {
49697+			[USB2PHY_PORT_OTG] = {
49698+				.phy_sus	= { 0x000c, 11, 11, 0, 1 },
49699+				.pipe_phystatus	= { 0x0034, 3, 2, 0, 2 },
49700+				.bvalid_det_en	= { 0x0080, 1, 1, 0, 1 },
49701+				.bvalid_det_st	= { 0x0084, 1, 1, 0, 1 },
49702+				.bvalid_det_clr = { 0x0088, 1, 1, 0, 1 },
49703+				.bvalid_grf_sel	= { 0x0010, 3, 3, 0, 1 },
49704+				.bvalid_grf_con	= { 0x0010, 3, 2, 2, 3 },
49705+				.bvalid_phy_con = { 0x0008, 1, 0, 2, 3 },
49706+				.bypass_dm_en	= { 0x000c, 5, 5, 0, 1 },
49707+				.bypass_sel	= { 0x000c, 6, 6, 0, 1 },
49708+				.iddig_output	= { 0x0010, 0, 0, 0, 1 },
49709+				.iddig_en	= { 0x0010, 1, 1, 0, 1 },
49710+				.idfall_det_en	= { 0x0080, 4, 4, 0, 1 },
49711+				.idfall_det_st	= { 0x0084, 4, 4, 0, 1 },
49712+				.idfall_det_clr = { 0x0088, 4, 4, 0, 1 },
49713+				.idrise_det_en	= { 0x0080, 3, 3, 0, 1 },
49714+				.idrise_det_st	= { 0x0084, 3, 3, 0, 1 },
49715+				.idrise_det_clr = { 0x0088, 3, 3, 0, 1 },
49716+				.ls_det_en	= { 0x0080, 0, 0, 0, 1 },
49717+				.ls_det_st	= { 0x0084, 0, 0, 0, 1 },
49718+				.ls_det_clr	= { 0x0088, 0, 0, 0, 1 },
49719+				.disfall_en	= { 0x0080, 6, 6, 0, 1 },
49720+				.disfall_st	= { 0x0084, 6, 6, 0, 1 },
49721+				.disfall_clr	= { 0x0088, 6, 6, 0, 1 },
49722+				.disrise_en	= { 0x0080, 5, 5, 0, 1 },
49723+				.disrise_st	= { 0x0084, 5, 5, 0, 1 },
49724+				.disrise_clr	= { 0x0088, 5, 5, 0, 1 },
49725+				.utmi_avalid	= { 0x00c0, 7, 7, 0, 1 },
49726+				.utmi_bvalid	= { 0x00c0, 6, 6, 0, 1 },
49727+				.utmi_iddig	= { 0x00c0, 5, 5, 0, 1 },
49728+				.utmi_ls	= { 0x00c0, 10, 9, 0, 1 },
49729+			}
49730+		},
49731+		.chg_det = {
49732+			.chg_mode	= { 0x0008, 2, 2, 0, 1 },
49733+			.cp_det		= { 0x00c0, 0, 0, 0, 1 },
49734+			.dcp_det	= { 0x00c0, 0, 0, 0, 1 },
49735+			.dp_det		= { 0x00c0, 1, 1, 1, 0 },
49736+			.idm_sink_en	= { 0x0008, 5, 5, 1, 0 },
49737+			.idp_sink_en	= { 0x0008, 5, 5, 0, 1 },
49738+			.idp_src_en	= { 0x0008, 14, 14, 0, 1 },
49739+			.rdm_pdwn_en	= { 0x0008, 14, 14, 0, 1 },
49740+			.vdm_src_en	= { 0x0008, 7, 6, 0, 3 },
49741+			.vdp_src_en	= { 0x0008, 7, 6, 0, 3 },
49742+		},
49743+	},
49744+	{
49745+		.reg = 0x8000,
49746+		.num_ports	= 1,
49747+		.phy_tuning	= rk3588_usb2phy_tuning,
49748+		.clkout_ctl	= { 0x0000, 0, 0, 1, 0 },
49749+		.ls_filter_con	= { 0x0040, 19, 0, 0x30100, 0x00020 },
49750+		.port_cfgs	= {
49751+			[USB2PHY_PORT_HOST] = {
49752+				.phy_sus	= { 0x0008, 2, 2, 0, 1 },
49753+				.ls_det_en	= { 0x0080, 0, 0, 0, 1 },
49754+				.ls_det_st	= { 0x0084, 0, 0, 0, 1 },
49755+				.ls_det_clr	= { 0x0088, 0, 0, 0, 1 },
49756+				.disfall_en	= { 0x0080, 6, 6, 0, 1 },
49757+				.disfall_st	= { 0x0084, 6, 6, 0, 1 },
49758+				.disfall_clr	= { 0x0088, 6, 6, 0, 1 },
49759+				.disrise_en	= { 0x0080, 5, 5, 0, 1 },
49760+				.disrise_st	= { 0x0084, 5, 5, 0, 1 },
49761+				.disrise_clr	= { 0x0088, 5, 5, 0, 1 },
49762+				.utmi_ls	= { 0x00c0, 10, 9, 0, 1 },
49763+			}
49764+		},
49765+	},
49766+	{
49767+		.reg = 0xc000,
49768+		.num_ports	= 1,
49769+		.phy_tuning	= rk3588_usb2phy_tuning,
49770+		.clkout_ctl	= { 0x0000, 0, 0, 1, 0 },
49771+		.ls_filter_con	= { 0x0040, 19, 0, 0x30100, 0x00020 },
49772+		.port_cfgs	= {
49773+			[USB2PHY_PORT_HOST] = {
49774+				.phy_sus	= { 0x0008, 2, 2, 0, 1 },
49775+				.ls_det_en	= { 0x0080, 0, 0, 0, 1 },
49776+				.ls_det_st	= { 0x0084, 0, 0, 0, 1 },
49777+				.ls_det_clr	= { 0x0088, 0, 0, 0, 1 },
49778+				.disfall_en	= { 0x0080, 6, 6, 0, 1 },
49779+				.disfall_st	= { 0x0084, 6, 6, 0, 1 },
49780+				.disfall_clr	= { 0x0088, 6, 6, 0, 1 },
49781+				.disrise_en	= { 0x0080, 5, 5, 0, 1 },
49782+				.disrise_st	= { 0x0084, 5, 5, 0, 1 },
49783+				.disrise_clr	= { 0x0088, 5, 5, 0, 1 },
49784+				.utmi_ls	= { 0x00c0, 10, 9, 0, 1 },
49785+			}
49786+		},
49787+	},
49788+	{ /* sentinel */ }
49789+};
49790+
49791+static const struct rockchip_usb2phy_cfg rv1106_phy_cfgs[] = {
49792+	{
49793+		.reg = 0xff3e0000,
49794+		.num_ports	= 1,
49795+		.phy_tuning	= rv1106_usb2phy_tuning,
49796+		.clkout_ctl	= { 0x0058, 4, 4, 1, 0 },
49797+		.port_cfgs	= {
49798+			[USB2PHY_PORT_OTG] = {
49799+				.phy_sus	= { 0x0050, 8, 0, 0, 0x1d1 },
49800+				.bvalid_det_en	= { 0x0100, 2, 2, 0, 1 },
49801+				.bvalid_det_st	= { 0x0104, 2, 2, 0, 1 },
49802+				.bvalid_det_clr = { 0x0108, 2, 2, 0, 1 },
49803+				.bvalid_grf_sel	= { 0x0058, 15, 14, 0, 3 },
49804+				.iddig_output	= { 0x0050, 10, 10, 0, 1 },
49805+				.iddig_en	= { 0x0050, 9, 9, 0, 1 },
49806+				.idfall_det_en	= { 0x0100, 5, 5, 0, 1 },
49807+				.idfall_det_st	= { 0x0104, 5, 5, 0, 1 },
49808+				.idfall_det_clr = { 0x0108, 5, 5, 0, 1 },
49809+				.idrise_det_en	= { 0x0100, 4, 4, 0, 1 },
49810+				.idrise_det_st	= { 0x0104, 4, 4, 0, 1 },
49811+				.idrise_det_clr = { 0x0108, 4, 4, 0, 1 },
49812+				.ls_det_en	= { 0x0100, 0, 0, 0, 1 },
49813+				.ls_det_st	= { 0x0104, 0, 0, 0, 1 },
49814+				.ls_det_clr	= { 0x0108, 0, 0, 0, 1 },
49815+				.utmi_avalid	= { 0x0060, 10, 10, 0, 1 },
49816+				.utmi_bvalid	= { 0x0060, 9, 9, 0, 1 },
49817+				.utmi_iddig	= { 0x0060, 6, 6, 0, 1 },
49818+				.utmi_ls	= { 0x0060, 5, 4, 0, 1 },
49819+			},
49820+		},
49821+		.chg_det = {
49822+			.chg_mode	= { 0x0050, 8, 0, 0, 0x1d7 },
49823+			.cp_det		= { 0x0060, 13, 13, 0, 1 },
49824+			.dcp_det	= { 0x0060, 12, 12, 0, 1 },
49825+			.dp_det		= { 0x0060, 14, 14, 0, 1 },
49826+			.idm_sink_en	= { 0x0058, 8, 8, 0, 1 },
49827+			.idp_sink_en	= { 0x0058, 7, 7, 0, 1 },
49828+			.idp_src_en	= { 0x0058, 9, 9, 0, 1 },
49829+			.rdm_pdwn_en	= { 0x0058, 10, 10, 0, 1 },
49830+			.vdm_src_en	= { 0x0058, 12, 12, 0, 1 },
49831+			.vdp_src_en	= { 0x0058, 11, 11, 0, 1 },
49832+		},
49833 	},
49834 	{ /* sentinel */ }
49835 };
49836@@ -1409,7 +3547,7 @@ static const struct rockchip_usb2phy_cfg rv1108_phy_cfgs[] = {
49837 			}
49838 		},
49839 		.chg_det = {
49840-			.opmode		= { 0x0100, 3, 0, 5, 1 },
49841+			.chg_mode	= { 0x0100, 8, 0, 0, 0x1d7 },
49842 			.cp_det		= { 0x0804, 1, 1, 0, 1 },
49843 			.dcp_det	= { 0x0804, 0, 0, 0, 1 },
49844 			.dp_det		= { 0x0804, 2, 2, 0, 1 },
49845@@ -1425,12 +3563,36 @@ static const struct rockchip_usb2phy_cfg rv1108_phy_cfgs[] = {
49846 };
49847 
49848 static const struct of_device_id rockchip_usb2phy_dt_match[] = {
49849-	{ .compatible = "rockchip,px30-usb2phy", .data = &rk3328_phy_cfgs },
49850+#ifdef CONFIG_CPU_RK1808
49851+	{ .compatible = "rockchip,rk1808-usb2phy", .data = &rk1808_phy_cfgs },
49852+#endif
49853+#ifdef CONFIG_CPU_RK312X
49854+	{ .compatible = "rockchip,rk3128-usb2phy", .data = &rk312x_phy_cfgs },
49855+#endif
49856+#ifdef CONFIG_CPU_RK322X
49857 	{ .compatible = "rockchip,rk3228-usb2phy", .data = &rk3228_phy_cfgs },
49858-	{ .compatible = "rockchip,rk3328-usb2phy", .data = &rk3328_phy_cfgs },
49859+#endif
49860+#ifdef CONFIG_CPU_RK3366
49861 	{ .compatible = "rockchip,rk3366-usb2phy", .data = &rk3366_phy_cfgs },
49862+#endif
49863+#ifdef CONFIG_CPU_RK3368
49864+	{ .compatible = "rockchip,rk3368-usb2phy", .data = &rk3368_phy_cfgs },
49865+#endif
49866+#ifdef CONFIG_CPU_RK3399
49867 	{ .compatible = "rockchip,rk3399-usb2phy", .data = &rk3399_phy_cfgs },
49868+#endif
49869+#ifdef CONFIG_CPU_RK3568
49870+	{ .compatible = "rockchip,rk3568-usb2phy", .data = &rk3568_phy_cfgs },
49871+#endif
49872+#ifdef CONFIG_CPU_RK3588
49873+	{ .compatible = "rockchip,rk3588-usb2phy", .data = &rk3588_phy_cfgs },
49874+#endif
49875+#ifdef CONFIG_CPU_RV1106
49876+	{ .compatible = "rockchip,rv1106-usb2phy", .data = &rv1106_phy_cfgs },
49877+#endif
49878+#ifdef CONFIG_CPU_RV1108
49879 	{ .compatible = "rockchip,rv1108-usb2phy", .data = &rv1108_phy_cfgs },
49880+#endif
49881 	{}
49882 };
49883 MODULE_DEVICE_TABLE(of, rockchip_usb2phy_dt_match);
49884@@ -1439,6 +3601,7 @@ static struct platform_driver rockchip_usb2phy_driver = {
49885 	.probe		= rockchip_usb2phy_probe,
49886 	.driver		= {
49887 		.name	= "rockchip-usb2phy",
49888+		.pm	= ROCKCHIP_USB2PHY_DEV_PM,
49889 		.of_match_table = rockchip_usb2phy_dt_match,
49890 	},
49891 };
49892diff --git a/drivers/phy/rockchip/phy-rockchip-pcie.c b/drivers/phy/rockchip/phy-rockchip-pcie.c
49893index 75216091d..2f47a3e7f 100644
49894--- a/drivers/phy/rockchip/phy-rockchip-pcie.c
49895+++ b/drivers/phy/rockchip/phy-rockchip-pcie.c
49896@@ -182,6 +182,12 @@ static int rockchip_pcie_phy_power_on(struct phy *phy)
49897 
49898 	mutex_lock(&rk_phy->pcie_mutex);
49899 
49900+	regmap_write(rk_phy->reg_base,
49901+		     rk_phy->phy_data->pcie_laneoff,
49902+		     HIWORD_UPDATE(!PHY_LANE_IDLE_OFF,
49903+				   PHY_LANE_IDLE_MASK,
49904+				   PHY_LANE_IDLE_A_SHIFT + inst->index));
49905+
49906 	if (rk_phy->pwr_cnt++)
49907 		goto err_out;
49908 
49909@@ -196,12 +202,6 @@ static int rockchip_pcie_phy_power_on(struct phy *phy)
49910 				   PHY_CFG_ADDR_MASK,
49911 				   PHY_CFG_ADDR_SHIFT));
49912 
49913-	regmap_write(rk_phy->reg_base,
49914-		     rk_phy->phy_data->pcie_laneoff,
49915-		     HIWORD_UPDATE(!PHY_LANE_IDLE_OFF,
49916-				   PHY_LANE_IDLE_MASK,
49917-				   PHY_LANE_IDLE_A_SHIFT + inst->index));
49918-
49919 	/*
49920 	 * No documented timeout value for phy operation below,
49921 	 * so we make it large enough here. And we use loop-break
49922diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
49923index 70a31251b..676c213ce 100644
49924--- a/drivers/phy/rockchip/phy-rockchip-typec.c
49925+++ b/drivers/phy/rockchip/phy-rockchip-typec.c
49926@@ -54,6 +54,7 @@
49927 
49928 #include <linux/mfd/syscon.h>
49929 #include <linux/phy/phy.h>
49930+#include <linux/phy/phy-rockchip-typec.h>
49931 
49932 #define CMN_SSM_BANDGAP			(0x21 << 2)
49933 #define CMN_SSM_BIAS			(0x22 << 2)
49934@@ -285,13 +286,37 @@
49935 #define RX_DIAG_SIGDET_TUNE(n)		((0x81dc | ((n) << 9)) << 2)
49936 #define RX_DIAG_SC2C_DELAY		(0x81e1 << 2)
49937 
49938-#define PMA_LANE_CFG			(0xc000 << 2)
49939+#define PHY_PMA_LANE_CFG		(0xc000 << 2)
49940+#define PMA_LANE3_DP_LANE_SEL(x)	(((x) & 0x3) << 14)
49941+#define PMA_LANE3_INTERFACE_SEL(x)	(((x) & 0x1) << 12)
49942+#define PMA_LANE2_DP_LANE_SEL(x)	(((x) & 0x3) << 10)
49943+#define PMA_LANE2_INTERFACE_SEL(x)	(((x) & 0x1) << 8)
49944+#define PMA_LANE1_DP_LANE_SEL(x)	(((x) & 0x3) << 6)
49945+#define PMA_LANE1_INTERFACE_SEL(x)	(((x) & 0x1) << 4)
49946+#define PMA_LANE0_DP_LANE_SEL(x)	(((x) & 0x3) << 2)
49947+#define PMA_LANE0_INTERFACE_SEL(x)	(((x) & 0x1) << 0)
49948 #define PIPE_CMN_CTRL1			(0xc001 << 2)
49949 #define PIPE_CMN_CTRL2			(0xc002 << 2)
49950 #define PIPE_COM_LOCK_CFG1		(0xc003 << 2)
49951 #define PIPE_COM_LOCK_CFG2		(0xc004 << 2)
49952 #define PIPE_RCV_DET_INH		(0xc005 << 2)
49953-#define DP_MODE_CTL			(0xc008 << 2)
49954+#define PHY_DP_MODE_CTL			(0xc008 << 2)
49955+#define PHY_DP_LANE_DISABLE		GENMASK(15, 12)
49956+#define PHY_DP_LANE_3_DISABLE		BIT(15)
49957+#define PHY_DP_LANE_2_DISABLE		BIT(14)
49958+#define PHY_DP_LANE_1_DISABLE		BIT(13)
49959+#define PHY_DP_LANE_0_DISABLE		BIT(12)
49960+#define PHY_DP_POWER_STATE_ACK_MASK	GENMASK(7, 4)
49961+#define PHY_DP_POWER_STATE_ACK_SHIFT	4
49962+#define PHY_DP_POWER_STATE_MASK		GENMASK(3, 0)
49963+#define PHY_DP_CLK_CTL			(0xc009 << 2)
49964+#define DP_PLL_CLOCK_ENABLE_ACK		BIT(3)
49965+#define DP_PLL_CLOCK_ENABLE_MASK	BIT(2)
49966+#define DP_PLL_CLOCK_DISABLE		0
49967+#define DP_PLL_READY			BIT(1)
49968+#define DP_PLL_ENABLE_MASK		BIT(0)
49969+#define DP_PLL_ENABLE			BIT(0)
49970+#define DP_PLL_DISABLE			0
49971 #define DP_CLK_CTL			(0xc009 << 2)
49972 #define STS				(0xc00F << 2)
49973 #define PHY_ISO_CMN_CTRL		(0xc010 << 2)
49974@@ -314,21 +339,29 @@
49975  * clock 0: PLL 0 div 1
49976  * clock 1: PLL 1 div 2
49977  */
49978-#define CLK_PLL_CONFIG			0X30
49979+#define CLK_PLL1_DIV1			0x20
49980+#define CLK_PLL1_DIV2			0x30
49981 #define CLK_PLL_MASK			0x33
49982 
49983 #define CMN_READY			BIT(0)
49984 
49985+#define DP_PLL_CLOCK_ENABLE_ACK		BIT(3)
49986 #define DP_PLL_CLOCK_ENABLE		BIT(2)
49987+#define DP_PLL_ENABLE_ACK		BIT(1)
49988 #define DP_PLL_ENABLE			BIT(0)
49989 #define DP_PLL_DATA_RATE_RBR		((2 << 12) | (4 << 8))
49990 #define DP_PLL_DATA_RATE_HBR		((2 << 12) | (4 << 8))
49991 #define DP_PLL_DATA_RATE_HBR2		((1 << 12) | (2 << 8))
49992+#define DP_PLL_DATA_RATE_MASK		0xff00
49993 
49994-#define DP_MODE_A0			BIT(4)
49995-#define DP_MODE_A2			BIT(6)
49996-#define DP_MODE_ENTER_A0		0xc101
49997-#define DP_MODE_ENTER_A2		0xc104
49998+#define DP_MODE_MASK			0xf
49999+#define DP_MODE_ENTER_A0		BIT(0)
50000+#define DP_MODE_ENTER_A2		BIT(2)
50001+#define DP_MODE_ENTER_A3		BIT(3)
50002+#define DP_MODE_A0_ACK			BIT(4)
50003+#define DP_MODE_A2_ACK			BIT(6)
50004+#define DP_MODE_A3_ACK			BIT(7)
50005+#define DP_LINK_RESET_DEASSERTED	BIT(8)
50006 
50007 #define PHY_MODE_SET_TIMEOUT		100000
50008 
50009@@ -340,6 +373,10 @@
50010 #define MODE_DFP_USB			BIT(1)
50011 #define MODE_DFP_DP			BIT(2)
50012 
50013+#define DP_DEFAULT_RATE			162000
50014+
50015+#define POWER_ON_TRIES			5
50016+
50017 struct usb3phy_reg {
50018 	u32 offset;
50019 	u32 enable_bit;
50020@@ -368,6 +405,11 @@ struct rockchip_usb3phy_port_cfg {
50021 	struct usb3phy_reg uphy_dp_sel;
50022 };
50023 
50024+struct phy_config {
50025+	int swing;
50026+	int pe;
50027+};
50028+
50029 struct rockchip_typec_phy {
50030 	struct device *dev;
50031 	void __iomem *base;
50032@@ -384,6 +426,7 @@ struct rockchip_typec_phy {
50033 
50034 	bool flip;
50035 	u8 mode;
50036+	struct phy_config config[3][4];
50037 };
50038 
50039 struct phy_reg {
50040@@ -408,26 +451,136 @@ static struct phy_reg usb3_pll_cfg[] = {
50041 	{ 0x8,		CMN_DIAG_PLL0_LF_PROG },
50042 };
50043 
50044-static struct phy_reg dp_pll_cfg[] = {
50045-	{ 0xf0,		CMN_PLL1_VCOCAL_INIT },
50046-	{ 0x18,		CMN_PLL1_VCOCAL_ITER },
50047-	{ 0x30b9,	CMN_PLL1_VCOCAL_START },
50048-	{ 0x21c,	CMN_PLL1_INTDIV },
50049-	{ 0,		CMN_PLL1_FRACDIV },
50050-	{ 0x5,		CMN_PLL1_HIGH_THR },
50051-	{ 0x35,		CMN_PLL1_SS_CTRL1 },
50052-	{ 0x7f1e,	CMN_PLL1_SS_CTRL2 },
50053-	{ 0x20,		CMN_PLL1_DSM_DIAG },
50054-	{ 0,		CMN_PLLSM1_USER_DEF_CTRL },
50055-	{ 0,		CMN_DIAG_PLL1_OVRD },
50056-	{ 0,		CMN_DIAG_PLL1_FBH_OVRD },
50057-	{ 0,		CMN_DIAG_PLL1_FBL_OVRD },
50058-	{ 0x6,		CMN_DIAG_PLL1_V2I_TUNE },
50059-	{ 0x45,		CMN_DIAG_PLL1_CP_TUNE },
50060-	{ 0x8,		CMN_DIAG_PLL1_LF_PROG },
50061-	{ 0x100,	CMN_DIAG_PLL1_PTATIS_TUNE1 },
50062-	{ 0x7,		CMN_DIAG_PLL1_PTATIS_TUNE2 },
50063-	{ 0x4,		CMN_DIAG_PLL1_INCLK_CTRL },
50064+static const struct phy_reg dp_pll_rbr_cfg[] = {
50065+	{ 0x00f0, CMN_PLL1_VCOCAL_INIT },
50066+	{ 0x0018, CMN_PLL1_VCOCAL_ITER },
50067+	{ 0x30b9, CMN_PLL1_VCOCAL_START },
50068+	{ 0x0087, CMN_PLL1_INTDIV },
50069+	{ 0x0000, CMN_PLL1_FRACDIV },
50070+	{ 0x0022, CMN_PLL1_HIGH_THR },
50071+	{ 0x8000, CMN_PLL1_SS_CTRL1 },
50072+	{ 0x0000, CMN_PLL1_SS_CTRL2 },
50073+	{ 0x0020, CMN_PLL1_DSM_DIAG },
50074+	{ 0x0000, CMN_PLLSM1_USER_DEF_CTRL },
50075+	{ 0x0000, CMN_DIAG_PLL1_OVRD },
50076+	{ 0x0000, CMN_DIAG_PLL1_FBH_OVRD },
50077+	{ 0x0000, CMN_DIAG_PLL1_FBL_OVRD },
50078+	{ 0x0006, CMN_DIAG_PLL1_V2I_TUNE },
50079+	{ 0x0045, CMN_DIAG_PLL1_CP_TUNE },
50080+	{ 0x0008, CMN_DIAG_PLL1_LF_PROG },
50081+	{ 0x0100, CMN_DIAG_PLL1_PTATIS_TUNE1 },
50082+	{ 0x0007, CMN_DIAG_PLL1_PTATIS_TUNE2 },
50083+	{ 0x0001, CMN_DIAG_PLL1_INCLK_CTRL },
50084+};
50085+
50086+static const struct phy_reg dp_pll_rbr_ssc_cfg[] = {
50087+	{ 0x00f0, CMN_PLL1_VCOCAL_INIT },
50088+	{ 0x0018, CMN_PLL1_VCOCAL_ITER },
50089+	{ 0x30b9, CMN_PLL1_VCOCAL_START },
50090+	{ 0x0086, CMN_PLL1_INTDIV },
50091+	{ 0xf915, CMN_PLL1_FRACDIV },
50092+	{ 0x0022, CMN_PLL1_HIGH_THR },
50093+	{ 0x0140, CMN_PLL1_SS_CTRL1 },
50094+	{ 0x7f03, CMN_PLL1_SS_CTRL2 },
50095+	{ 0x0020, CMN_PLL1_DSM_DIAG },
50096+	{ 0x0000, CMN_PLLSM1_USER_DEF_CTRL },
50097+	{ 0x0000, CMN_DIAG_PLL1_OVRD },
50098+	{ 0x0000, CMN_DIAG_PLL1_FBH_OVRD },
50099+	{ 0x0000, CMN_DIAG_PLL1_FBL_OVRD },
50100+	{ 0x0006, CMN_DIAG_PLL1_V2I_TUNE },
50101+	{ 0x0045, CMN_DIAG_PLL1_CP_TUNE },
50102+	{ 0x0008, CMN_DIAG_PLL1_LF_PROG },
50103+	{ 0x0100, CMN_DIAG_PLL1_PTATIS_TUNE1 },
50104+	{ 0x0007, CMN_DIAG_PLL1_PTATIS_TUNE2 },
50105+	{ 0x0001, CMN_DIAG_PLL1_INCLK_CTRL },
50106+};
50107+
50108+static const struct phy_reg dp_pll_hbr_cfg[] = {
50109+	{ 0x00f0, CMN_PLL1_VCOCAL_INIT },
50110+	{ 0x0018, CMN_PLL1_VCOCAL_ITER },
50111+	{ 0x30b4, CMN_PLL1_VCOCAL_START },
50112+	{ 0x00e1, CMN_PLL1_INTDIV },
50113+	{ 0x0000, CMN_PLL1_FRACDIV },
50114+	{ 0x0005, CMN_PLL1_HIGH_THR },
50115+	{ 0x8000, CMN_PLL1_SS_CTRL1 },
50116+	{ 0x0000, CMN_PLL1_SS_CTRL2 },
50117+	{ 0x0020, CMN_PLL1_DSM_DIAG },
50118+	{ 0x1000, CMN_PLLSM1_USER_DEF_CTRL },
50119+	{ 0x0000, CMN_DIAG_PLL1_OVRD },
50120+	{ 0x0000, CMN_DIAG_PLL1_FBH_OVRD },
50121+	{ 0x0000, CMN_DIAG_PLL1_FBL_OVRD },
50122+	{ 0x0007, CMN_DIAG_PLL1_V2I_TUNE },
50123+	{ 0x0045, CMN_DIAG_PLL1_CP_TUNE },
50124+	{ 0x0008, CMN_DIAG_PLL1_LF_PROG },
50125+	{ 0x0001, CMN_DIAG_PLL1_PTATIS_TUNE1 },
50126+	{ 0x0001, CMN_DIAG_PLL1_PTATIS_TUNE2 },
50127+	{ 0x0001, CMN_DIAG_PLL1_INCLK_CTRL },
50128+};
50129+
50130+static const struct phy_reg dp_pll_hbr_ssc_cfg[] = {
50131+	{ 0x00f0, CMN_PLL1_VCOCAL_INIT },
50132+	{ 0x0018, CMN_PLL1_VCOCAL_ITER },
50133+	{ 0x30b4, CMN_PLL1_VCOCAL_START },
50134+	{ 0x00e0, CMN_PLL1_INTDIV },
50135+	{ 0xf479, CMN_PLL1_FRACDIV },
50136+	{ 0x0038, CMN_PLL1_HIGH_THR },
50137+	{ 0x0204, CMN_PLL1_SS_CTRL1 },
50138+	{ 0x7f03, CMN_PLL1_SS_CTRL2 },
50139+	{ 0x0020, CMN_PLL1_DSM_DIAG },
50140+	{ 0x1000, CMN_PLLSM1_USER_DEF_CTRL },
50141+	{ 0x0000, CMN_DIAG_PLL1_OVRD },
50142+	{ 0x0000, CMN_DIAG_PLL1_FBH_OVRD },
50143+	{ 0x0000, CMN_DIAG_PLL1_FBL_OVRD },
50144+	{ 0x0007, CMN_DIAG_PLL1_V2I_TUNE },
50145+	{ 0x0045, CMN_DIAG_PLL1_CP_TUNE },
50146+	{ 0x0008, CMN_DIAG_PLL1_LF_PROG },
50147+	{ 0x0001, CMN_DIAG_PLL1_PTATIS_TUNE1 },
50148+	{ 0x0001, CMN_DIAG_PLL1_PTATIS_TUNE2 },
50149+	{ 0x0001, CMN_DIAG_PLL1_INCLK_CTRL },
50150+};
50151+
50152+static const struct phy_reg dp_pll_hbr2_cfg[] = {
50153+	{ 0x00f0, CMN_PLL1_VCOCAL_INIT },
50154+	{ 0x0018, CMN_PLL1_VCOCAL_ITER },
50155+	{ 0x30b4, CMN_PLL1_VCOCAL_START },
50156+	{ 0x00e1, CMN_PLL1_INTDIV },
50157+	{ 0x0000, CMN_PLL1_FRACDIV },
50158+	{ 0x0005, CMN_PLL1_HIGH_THR },
50159+	{ 0x8000, CMN_PLL1_SS_CTRL1 },
50160+	{ 0x0000, CMN_PLL1_SS_CTRL2 },
50161+	{ 0x0020, CMN_PLL1_DSM_DIAG },
50162+	{ 0x1000, CMN_PLLSM1_USER_DEF_CTRL },
50163+	{ 0x0000, CMN_DIAG_PLL1_OVRD },
50164+	{ 0x0000, CMN_DIAG_PLL1_FBH_OVRD },
50165+	{ 0x0000, CMN_DIAG_PLL1_FBL_OVRD },
50166+	{ 0x0007, CMN_DIAG_PLL1_V2I_TUNE },
50167+	{ 0x0045, CMN_DIAG_PLL1_CP_TUNE },
50168+	{ 0x0008, CMN_DIAG_PLL1_LF_PROG },
50169+	{ 0x0001, CMN_DIAG_PLL1_PTATIS_TUNE1 },
50170+	{ 0x0001, CMN_DIAG_PLL1_PTATIS_TUNE2 },
50171+	{ 0x0001, CMN_DIAG_PLL1_INCLK_CTRL },
50172+};
50173+
50174+static const struct phy_reg dp_pll_hbr2_ssc_cfg[] = {
50175+	{ 0x00f0, CMN_PLL1_VCOCAL_INIT },
50176+	{ 0x0018, CMN_PLL1_VCOCAL_ITER },
50177+	{ 0x30b4, CMN_PLL1_VCOCAL_START },
50178+	{ 0x00e0, CMN_PLL1_INTDIV },
50179+	{ 0xf479, CMN_PLL1_FRACDIV },
50180+	{ 0x0038, CMN_PLL1_HIGH_THR },
50181+	{ 0x0204, CMN_PLL1_SS_CTRL1 },
50182+	{ 0x7f03, CMN_PLL1_SS_CTRL2 },
50183+	{ 0x0020, CMN_PLL1_DSM_DIAG },
50184+	{ 0x1000, CMN_PLLSM1_USER_DEF_CTRL },
50185+	{ 0x0000, CMN_DIAG_PLL1_OVRD },
50186+	{ 0x0000, CMN_DIAG_PLL1_FBH_OVRD },
50187+	{ 0x0000, CMN_DIAG_PLL1_FBL_OVRD },
50188+	{ 0x0007, CMN_DIAG_PLL1_V2I_TUNE },
50189+	{ 0x0045, CMN_DIAG_PLL1_CP_TUNE },
50190+	{ 0x0008, CMN_DIAG_PLL1_LF_PROG },
50191+	{ 0x0001, CMN_DIAG_PLL1_PTATIS_TUNE1 },
50192+	{ 0x0001, CMN_DIAG_PLL1_PTATIS_TUNE2 },
50193+	{ 0x0001, CMN_DIAG_PLL1_INCLK_CTRL },
50194 };
50195 
50196 static const struct rockchip_usb3phy_port_cfg rk3399_usb3phy_port_cfgs[] = {
50197@@ -454,6 +607,134 @@ static const struct rockchip_usb3phy_port_cfg rk3399_usb3phy_port_cfgs[] = {
50198 	{ /* sentinel */ }
50199 };
50200 
50201+/* default phy config */
50202+static const struct phy_config tcphy_default_config[3][4] = {
50203+	{{ .swing = 0x2a, .pe = 0x00 },
50204+	 { .swing = 0x1f, .pe = 0x15 },
50205+	 { .swing = 0x14, .pe = 0x22 },
50206+	 { .swing = 0x02, .pe = 0x2b } },
50207+
50208+	{{ .swing = 0x21, .pe = 0x00 },
50209+	 { .swing = 0x12, .pe = 0x15 },
50210+	 { .swing = 0x02, .pe = 0x22 },
50211+	 { .swing = 0,    .pe = 0 } },
50212+
50213+	{{ .swing = 0x15, .pe = 0x00 },
50214+	 { .swing = 0x00, .pe = 0x15 },
50215+	 { .swing = 0,    .pe = 0 },
50216+	 { .swing = 0,    .pe = 0 } },
50217+};
50218+
50219+enum phy_dp_power_state {
50220+	PHY_DP_POWER_STATE_DISABLED = -1,
50221+	PHY_DP_POWER_STATE_A0,
50222+	PHY_DP_POWER_STATE_A1,
50223+	PHY_DP_POWER_STATE_A2,
50224+	PHY_DP_POWER_STATE_A3,
50225+};
50226+
50227+static int tcphy_dp_set_power_state(struct rockchip_typec_phy *tcphy,
50228+				    enum phy_dp_power_state state)
50229+{
50230+	u32 ack, reg, sts = BIT(state);
50231+	int ret;
50232+
50233+	/*
50234+	 * Power state changes must not be requested until after the cmn_ready
50235+	 * signal has gone active.
50236+	 */
50237+	reg = readl(tcphy->base + PMA_CMN_CTRL1);
50238+	if (!(reg & CMN_READY)) {
50239+		dev_err(tcphy->dev, "cmn_ready in the inactive state\n");
50240+		return -EINVAL;
50241+	}
50242+
50243+	reg = readl(tcphy->base + PHY_DP_MODE_CTL);
50244+	reg &= ~PHY_DP_POWER_STATE_MASK;
50245+	reg |= sts;
50246+	writel(reg, tcphy->base + PHY_DP_MODE_CTL);
50247+
50248+	ret = readl_poll_timeout(tcphy->base + PHY_DP_MODE_CTL,
50249+				 ack, (((ack & PHY_DP_POWER_STATE_ACK_MASK) >>
50250+				 PHY_DP_POWER_STATE_ACK_SHIFT) == sts), 10,
50251+				 PHY_MODE_SET_TIMEOUT);
50252+	if (ret < 0) {
50253+		dev_err(tcphy->dev, "failed to enter power state %d\n", state);
50254+		return ret;
50255+	}
50256+
50257+	return 0;
50258+}
50259+
50260+enum {
50261+	PHY_DP_LANE_0,
50262+	PHY_DP_LANE_1,
50263+	PHY_DP_LANE_2,
50264+	PHY_DP_LANE_3,
50265+};
50266+
50267+enum {
50268+	PMA_IF_PIPE_PCS,
50269+	PMA_IF_PHY_DP,
50270+};
50271+
50272+/*
50273+ * For the TypeC PHY, the 4 lanes are mapping to the USB TypeC receptacle pins
50274+ * as follows:
50275+ *   -------------------------------------------------------------------
50276+ *	PHY Lanes/Module Pins			TypeC Receptacle Pins
50277+ *   -------------------------------------------------------------------
50278+ *	Lane0 (tx_p/m_ln_0)			TX1+/TX1- (pins A2/A3)
50279+ *	Lane1 (tx_rx_p/m_ln_1)			RX1+/RX1- (pins B11/B10)
50280+ *	Lane2 (tx_rx_p/m_ln_2)			RX2+/RX2- (pins A11/A10)
50281+ *	Lane3 (tx_p/m_ln_3)			TX2+/TX2- (pins B2/B3)
50282+ *   -------------------------------------------------------------------
50283+ *
50284+ * USB and DP lanes mapping to TypeC PHY lanes for each of pin assignment
50285+ * options (normal connector orientation) described in the VESA DisplayPort
50286+ * Alt Mode on USB TypeC Standard as follows:
50287+ *
50288+ * ----------------------------------------------------------------------
50289+ *	PHY Lanes	A	B	C	D	E	F
50290+ * ----------------------------------------------------------------------
50291+ *	  0	       ML1     SSTX    ML2     SSTX    ML2     SSTX
50292+ *	  1	       ML3     SSRX    ML3     SSRX    ML3     SSRX
50293+ *	  2	       ML2     ML1     ML0     ML0     ML0     ML0
50294+ *	  3	       ML0     ML0     ML1     ML1     ML1     ML1
50295+ * ----------------------------------------------------------------------
50296+ */
50297+static void tcphy_set_lane_mapping(struct rockchip_typec_phy *tcphy, u8 mode)
50298+{
50299+	/*
50300+	 * The PHY_PMA_LANE_CFG register is used to select whether a PMA lane
50301+	 * is mapped for USB or PHY DP. The PHY_PMA_LANE_CFG register is
50302+	 * configured based on a normal connector orientation. Logic in the
50303+	 * PHY automatically handles the flipped connector case based on the
50304+	 * setting of orientation of TypeC PHY.
50305+	 */
50306+	if (mode == MODE_DFP_DP) {
50307+		/* This maps to VESA DP Alt Mode pin assignments C and E. */
50308+		writel(PMA_LANE3_DP_LANE_SEL(PHY_DP_LANE_1) |
50309+		       PMA_LANE3_INTERFACE_SEL(PMA_IF_PHY_DP) |
50310+		       PMA_LANE2_DP_LANE_SEL(PHY_DP_LANE_0) |
50311+		       PMA_LANE2_INTERFACE_SEL(PMA_IF_PHY_DP) |
50312+		       PMA_LANE1_DP_LANE_SEL(PHY_DP_LANE_3) |
50313+		       PMA_LANE1_INTERFACE_SEL(PMA_IF_PHY_DP) |
50314+		       PMA_LANE0_DP_LANE_SEL(PHY_DP_LANE_2) |
50315+		       PMA_LANE0_INTERFACE_SEL(PMA_IF_PHY_DP),
50316+		       tcphy->base + PHY_PMA_LANE_CFG);
50317+	} else {
50318+		/* This maps to VESA DP Alt Mode pin assignments D and F. */
50319+		writel(PMA_LANE3_DP_LANE_SEL(PHY_DP_LANE_1) |
50320+		       PMA_LANE3_INTERFACE_SEL(PMA_IF_PHY_DP) |
50321+		       PMA_LANE2_DP_LANE_SEL(PHY_DP_LANE_0) |
50322+		       PMA_LANE2_INTERFACE_SEL(PMA_IF_PHY_DP) |
50323+		       PMA_LANE1_INTERFACE_SEL(PMA_IF_PIPE_PCS) |
50324+		       PMA_LANE0_INTERFACE_SEL(PMA_IF_PIPE_PCS),
50325+		       tcphy->base + PHY_PMA_LANE_CFG);
50326+	}
50327+}
50328+
50329 static void tcphy_cfg_24m(struct rockchip_typec_phy *tcphy)
50330 {
50331 	u32 i, rdata;
50332@@ -475,7 +756,7 @@ static void tcphy_cfg_24m(struct rockchip_typec_phy *tcphy)
50333 
50334 	rdata = readl(tcphy->base + CMN_DIAG_HSCLK_SEL);
50335 	rdata &= ~CLK_PLL_MASK;
50336-	rdata |= CLK_PLL_CONFIG;
50337+	rdata |= CLK_PLL1_DIV2;
50338 	writel(rdata, tcphy->base + CMN_DIAG_HSCLK_SEL);
50339 }
50340 
50341@@ -489,17 +770,44 @@ static void tcphy_cfg_usb3_pll(struct rockchip_typec_phy *tcphy)
50342 		       tcphy->base + usb3_pll_cfg[i].addr);
50343 }
50344 
50345-static void tcphy_cfg_dp_pll(struct rockchip_typec_phy *tcphy)
50346+static void tcphy_cfg_dp_pll(struct rockchip_typec_phy *tcphy, int link_rate)
50347 {
50348-	u32 i;
50349+	const struct phy_reg *phy_cfg;
50350+	u32 clk_ctrl;
50351+	u32 i, cfg_size, hsclk_sel;
50352+
50353+	hsclk_sel = readl(tcphy->base + CMN_DIAG_HSCLK_SEL);
50354+	hsclk_sel &= ~CLK_PLL_MASK;
50355+
50356+	switch (link_rate) {
50357+	case 540000:
50358+		clk_ctrl = DP_PLL_DATA_RATE_HBR2;
50359+		hsclk_sel |= CLK_PLL1_DIV1;
50360+		phy_cfg = dp_pll_hbr2_cfg;
50361+		cfg_size = ARRAY_SIZE(dp_pll_hbr2_cfg);
50362+		break;
50363+	case 270000:
50364+		clk_ctrl = DP_PLL_DATA_RATE_HBR;
50365+		hsclk_sel |= CLK_PLL1_DIV2;
50366+		phy_cfg = dp_pll_hbr_cfg;
50367+		cfg_size = ARRAY_SIZE(dp_pll_hbr_cfg);
50368+		break;
50369+	case 162000:
50370+	default:
50371+		clk_ctrl = DP_PLL_DATA_RATE_RBR;
50372+		hsclk_sel |= CLK_PLL1_DIV2;
50373+		phy_cfg = dp_pll_rbr_cfg;
50374+		cfg_size = ARRAY_SIZE(dp_pll_rbr_cfg);
50375+		break;
50376+	}
50377 
50378-	/* set the default mode to RBR */
50379-	writel(DP_PLL_CLOCK_ENABLE | DP_PLL_ENABLE | DP_PLL_DATA_RATE_RBR,
50380-	       tcphy->base + DP_CLK_CTL);
50381+	clk_ctrl |= DP_PLL_CLOCK_ENABLE | DP_PLL_ENABLE;
50382+	writel(clk_ctrl, tcphy->base + PHY_DP_CLK_CTL);
50383+	writel(hsclk_sel, tcphy->base + CMN_DIAG_HSCLK_SEL);
50384 
50385 	/* load the configuration of PLL1 */
50386-	for (i = 0; i < ARRAY_SIZE(dp_pll_cfg); i++)
50387-		writel(dp_pll_cfg[i].value, tcphy->base + dp_pll_cfg[i].addr);
50388+	for (i = 0; i < cfg_size; i++)
50389+		writel(phy_cfg[i].value, tcphy->base + phy_cfg[i].addr);
50390 }
50391 
50392 static void tcphy_tx_usb3_cfg_lane(struct rockchip_typec_phy *tcphy, u32 lane)
50393@@ -526,9 +834,10 @@ static void tcphy_rx_usb3_cfg_lane(struct rockchip_typec_phy *tcphy, u32 lane)
50394 	writel(0xfb, tcphy->base + XCVR_DIAG_BIDI_CTRL(lane));
50395 }
50396 
50397-static void tcphy_dp_cfg_lane(struct rockchip_typec_phy *tcphy, u32 lane)
50398+static void tcphy_dp_cfg_lane(struct rockchip_typec_phy *tcphy, int link_rate,
50399+			      u8 swing, u8 pre_emp, u32 lane)
50400 {
50401-	u16 rdata;
50402+	u16 val;
50403 
50404 	writel(0xbefc, tcphy->base + XCVR_PSM_RCTRL(lane));
50405 	writel(0x6799, tcphy->base + TX_PSC_A0(lane));
50406@@ -536,27 +845,234 @@ static void tcphy_dp_cfg_lane(struct rockchip_typec_phy *tcphy, u32 lane)
50407 	writel(0x98, tcphy->base + TX_PSC_A2(lane));
50408 	writel(0x98, tcphy->base + TX_PSC_A3(lane));
50409 
50410-	writel(0, tcphy->base + TX_TXCC_MGNFS_MULT_000(lane));
50411-	writel(0, tcphy->base + TX_TXCC_MGNFS_MULT_001(lane));
50412-	writel(0, tcphy->base + TX_TXCC_MGNFS_MULT_010(lane));
50413-	writel(0, tcphy->base + TX_TXCC_MGNFS_MULT_011(lane));
50414-	writel(0, tcphy->base + TX_TXCC_MGNFS_MULT_100(lane));
50415-	writel(0, tcphy->base + TX_TXCC_MGNFS_MULT_101(lane));
50416-	writel(0, tcphy->base + TX_TXCC_MGNFS_MULT_110(lane));
50417-	writel(0, tcphy->base + TX_TXCC_MGNFS_MULT_111(lane));
50418-	writel(0, tcphy->base + TX_TXCC_CPOST_MULT_10(lane));
50419-	writel(0, tcphy->base + TX_TXCC_CPOST_MULT_01(lane));
50420-	writel(0, tcphy->base + TX_TXCC_CPOST_MULT_00(lane));
50421-	writel(0, tcphy->base + TX_TXCC_CPOST_MULT_11(lane));
50422-
50423-	writel(0x128, tcphy->base + TX_TXCC_CAL_SCLR_MULT(lane));
50424-	writel(0x400, tcphy->base + TX_DIAG_TX_DRV(lane));
50425-
50426-	rdata = readl(tcphy->base + XCVR_DIAG_PLLDRC_CTRL(lane));
50427-	rdata = (rdata & 0x8fff) | 0x6000;
50428-	writel(rdata, tcphy->base + XCVR_DIAG_PLLDRC_CTRL(lane));
50429+	writel(tcphy->config[swing][pre_emp].swing,
50430+	       tcphy->base + TX_TXCC_MGNFS_MULT_000(lane));
50431+	writel(tcphy->config[swing][pre_emp].pe,
50432+	       tcphy->base + TX_TXCC_CPOST_MULT_00(lane));
50433+
50434+	if (swing == 2 && pre_emp == 0 && link_rate != 540000) {
50435+		writel(0x700, tcphy->base + TX_DIAG_TX_DRV(lane));
50436+		writel(0x13c, tcphy->base + TX_TXCC_CAL_SCLR_MULT(lane));
50437+	} else {
50438+		writel(0x128, tcphy->base + TX_TXCC_CAL_SCLR_MULT(lane));
50439+		writel(0x0400, tcphy->base + TX_DIAG_TX_DRV(lane));
50440+	}
50441+
50442+	val = readl(tcphy->base + XCVR_DIAG_PLLDRC_CTRL(lane));
50443+	val = val & 0x8fff;
50444+	switch (link_rate) {
50445+	case 540000:
50446+		val |= (5 << 12);
50447+		break;
50448+	case 162000:
50449+	case 270000:
50450+	default:
50451+		val |= (6 << 12);
50452+		break;
50453+	}
50454+	writel(val, tcphy->base + XCVR_DIAG_PLLDRC_CTRL(lane));
50455 }
50456 
50457+int tcphy_dp_set_phy_config(struct phy *phy, int link_rate,
50458+			    int lane_count, u8 swing, u8 pre_emp)
50459+{
50460+	struct rockchip_typec_phy *tcphy = phy_get_drvdata(phy);
50461+	u8 i;
50462+
50463+	if (!phy->power_count)
50464+		return -EPERM;
50465+
50466+	if (tcphy->mode == MODE_DFP_DP) {
50467+		for (i = 0; i < 4; i++)
50468+			tcphy_dp_cfg_lane(tcphy, link_rate, swing, pre_emp, i);
50469+	} else {
50470+		if (tcphy->flip) {
50471+			tcphy_dp_cfg_lane(tcphy, link_rate, swing, pre_emp, 0);
50472+			tcphy_dp_cfg_lane(tcphy, link_rate, swing, pre_emp, 1);
50473+		} else {
50474+			tcphy_dp_cfg_lane(tcphy, link_rate, swing, pre_emp, 2);
50475+			tcphy_dp_cfg_lane(tcphy, link_rate, swing, pre_emp, 3);
50476+		}
50477+	}
50478+
50479+	return 0;
50480+}
50481+EXPORT_SYMBOL(tcphy_dp_set_phy_config);
50482+
50483+int tcphy_dp_set_lane_count(struct phy *phy, u8 lane_count)
50484+{
50485+	struct rockchip_typec_phy *tcphy = phy_get_drvdata(phy);
50486+	u32 reg;
50487+
50488+	if (!phy->power_count)
50489+		return -EPERM;
50490+
50491+	/*
50492+	 * In cases where fewer than the configured number of DP lanes are
50493+	 * being used. PHY_DP_MODE_CTL[15:12] must be set to disable and
50494+	 * power-down the unused PHY DP lanes (and their mapped PMA lanes).
50495+	 * Set the bit ([15:12]) associated with each DP PHY lane(s) to be
50496+	 * disabled.
50497+	 */
50498+	reg = readl(tcphy->base + PHY_DP_MODE_CTL);
50499+	reg |= PHY_DP_LANE_DISABLE;
50500+
50501+	switch (lane_count) {
50502+	case 4:
50503+		reg &= ~(PHY_DP_LANE_3_DISABLE | PHY_DP_LANE_2_DISABLE |
50504+			 PHY_DP_LANE_1_DISABLE | PHY_DP_LANE_0_DISABLE);
50505+		break;
50506+	case 2:
50507+		reg &= ~(PHY_DP_LANE_1_DISABLE | PHY_DP_LANE_0_DISABLE);
50508+		break;
50509+	case 1:
50510+		reg &= ~PHY_DP_LANE_0_DISABLE;
50511+		break;
50512+	default:
50513+		return -EINVAL;
50514+	}
50515+
50516+	writel(reg, tcphy->base + PHY_DP_MODE_CTL);
50517+
50518+	return 0;
50519+}
50520+EXPORT_SYMBOL(tcphy_dp_set_lane_count);
50521+
50522+int tcphy_dp_set_link_rate(struct phy *phy, int link_rate, bool ssc_on)
50523+{
50524+	struct rockchip_typec_phy *tcphy = phy_get_drvdata(phy);
50525+	const struct phy_reg *phy_cfg;
50526+	u32 cmn_diag_hsclk_sel, phy_dp_clk_ctl, reg;
50527+	u32 i, cfg_size;
50528+	int ret;
50529+
50530+	if (!phy->power_count)
50531+		return -EPERM;
50532+
50533+	/* Place the PHY lanes in the A3 power state. */
50534+	ret = tcphy_dp_set_power_state(tcphy, PHY_DP_POWER_STATE_A3);
50535+	if (ret) {
50536+		dev_err(tcphy->dev, "failed to enter A3 state: %d\n", ret);
50537+		return ret;
50538+	}
50539+
50540+	/* Gate the PLL clocks from PMA */
50541+	reg = readl(tcphy->base + PHY_DP_CLK_CTL);
50542+	reg &= ~DP_PLL_CLOCK_ENABLE_MASK;
50543+	reg |= DP_PLL_CLOCK_DISABLE;
50544+	writel(reg, tcphy->base + PHY_DP_CLK_CTL);
50545+
50546+	ret = readl_poll_timeout(tcphy->base + PHY_DP_CLK_CTL, reg,
50547+				 !(reg & DP_PLL_CLOCK_ENABLE_ACK),
50548+				 10, PHY_MODE_SET_TIMEOUT);
50549+	if (ret) {
50550+		dev_err(tcphy->dev, "wait DP PLL clock disabled timeout\n");
50551+		return ret;
50552+	}
50553+
50554+	/* Disable the PLL */
50555+	reg = readl(tcphy->base + PHY_DP_CLK_CTL);
50556+	reg &= ~DP_PLL_ENABLE_MASK;
50557+	reg |= DP_PLL_DISABLE;
50558+	writel(reg, tcphy->base + PHY_DP_CLK_CTL);
50559+
50560+	ret = readl_poll_timeout(tcphy->base + PHY_DP_CLK_CTL, reg,
50561+				 !(reg & DP_PLL_READY),
50562+				 10, PHY_MODE_SET_TIMEOUT);
50563+	if (ret) {
50564+		dev_err(tcphy->dev, "wait DP PLL not ready timeout\n");
50565+		return ret;
50566+	}
50567+
50568+	/* Re-configure PHY registers for the new data rate */
50569+	cmn_diag_hsclk_sel = readl(tcphy->base + CMN_DIAG_HSCLK_SEL);
50570+	cmn_diag_hsclk_sel &= ~(GENMASK(5, 4) | GENMASK(1, 0));
50571+
50572+	phy_dp_clk_ctl = readl(tcphy->base + PHY_DP_CLK_CTL);
50573+	phy_dp_clk_ctl &= ~(GENMASK(15, 12) | GENMASK(11, 8));
50574+
50575+	switch (link_rate) {
50576+	case 162000:
50577+		cmn_diag_hsclk_sel |= (3 << 4) | (0 << 0);
50578+		phy_dp_clk_ctl |= (2 << 12) | (4 << 8);
50579+
50580+		phy_cfg = ssc_on ? dp_pll_rbr_ssc_cfg : dp_pll_rbr_cfg;
50581+		cfg_size = ssc_on ? ARRAY_SIZE(dp_pll_rbr_ssc_cfg) :
50582+				    ARRAY_SIZE(dp_pll_rbr_cfg);
50583+		break;
50584+	case 270000:
50585+		cmn_diag_hsclk_sel |= (3 << 4) | (0 << 0);
50586+		phy_dp_clk_ctl |= (2 << 12) | (4 << 8);
50587+
50588+		phy_cfg = ssc_on ? dp_pll_hbr_ssc_cfg : dp_pll_hbr_cfg;
50589+		cfg_size = ssc_on ? ARRAY_SIZE(dp_pll_hbr_ssc_cfg) :
50590+				    ARRAY_SIZE(dp_pll_hbr_cfg);
50591+		break;
50592+	case 540000:
50593+		cmn_diag_hsclk_sel |= (2 << 4) | (0 << 0);
50594+		phy_dp_clk_ctl |= (1 << 12) | (2 << 8);
50595+
50596+		phy_cfg = ssc_on ? dp_pll_hbr2_ssc_cfg : dp_pll_hbr2_cfg;
50597+		cfg_size = ssc_on ? ARRAY_SIZE(dp_pll_hbr2_ssc_cfg) :
50598+				    ARRAY_SIZE(dp_pll_hbr2_cfg);
50599+		break;
50600+	default:
50601+		return -EINVAL;
50602+	}
50603+
50604+	writel(cmn_diag_hsclk_sel, tcphy->base + CMN_DIAG_HSCLK_SEL);
50605+	writel(phy_dp_clk_ctl, tcphy->base + PHY_DP_CLK_CTL);
50606+
50607+	/* load the configuration of PLL1 */
50608+	for (i = 0; i < cfg_size; i++)
50609+		writel(phy_cfg[i].value, tcphy->base + phy_cfg[i].addr);
50610+
50611+	/* Enable the PLL */
50612+	reg = readl(tcphy->base + PHY_DP_CLK_CTL);
50613+	reg &= ~DP_PLL_ENABLE_MASK;
50614+	reg |= DP_PLL_ENABLE;
50615+	writel(reg, tcphy->base + PHY_DP_CLK_CTL);
50616+
50617+	ret = readl_poll_timeout(tcphy->base + PHY_DP_CLK_CTL, reg,
50618+				 reg & DP_PLL_READY,
50619+				 10, PHY_MODE_SET_TIMEOUT);
50620+	if (ret < 0) {
50621+		dev_err(tcphy->dev, "wait DP PLL ready timeout\n");
50622+		return ret;
50623+	}
50624+
50625+	/* Enable PMA PLL clocks */
50626+	reg = readl(tcphy->base + PHY_DP_CLK_CTL);
50627+	reg &= ~DP_PLL_CLOCK_ENABLE_MASK;
50628+	reg |= DP_PLL_CLOCK_ENABLE;
50629+	writel(reg, tcphy->base + PHY_DP_CLK_CTL);
50630+
50631+	ret = readl_poll_timeout(tcphy->base + PHY_DP_CLK_CTL, reg,
50632+				 reg & DP_PLL_CLOCK_ENABLE_ACK,
50633+				 10, PHY_MODE_SET_TIMEOUT);
50634+	if (ret) {
50635+		dev_err(tcphy->dev, "wait DP PLL clock enabled timeout\n");
50636+		return ret;
50637+	}
50638+
50639+	/* The PMA must go through the A2 power state upon a data rate change */
50640+	ret = tcphy_dp_set_power_state(tcphy, PHY_DP_POWER_STATE_A2);
50641+	if (ret) {
50642+		dev_err(tcphy->dev, "failed to enter A2 state: %d\n", ret);
50643+		return ret;
50644+	}
50645+
50646+	/* change the PHY power state to A0 */
50647+	ret = tcphy_dp_set_power_state(tcphy, PHY_DP_POWER_STATE_A0);
50648+	if (ret) {
50649+		dev_err(tcphy->dev, "failed to enter A0 state: %d\n", ret);
50650+		return ret;
50651+	}
50652+
50653+	return 0;
50654+}
50655+EXPORT_SYMBOL(tcphy_dp_set_link_rate);
50656+
50657 static inline int property_enable(struct rockchip_typec_phy *tcphy,
50658 				  const struct usb3phy_reg *reg, bool en)
50659 {
50660@@ -719,6 +1235,18 @@ static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
50661 	writel(val, tcphy->base + TX_DIG_CTRL_REG_2);
50662 }
50663 
50664+static int tcphy_cfg_usb3_to_usb2_only(struct rockchip_typec_phy *tcphy,
50665+				       bool value)
50666+{
50667+	const struct rockchip_usb3phy_port_cfg *cfg = tcphy->port_cfgs;
50668+
50669+	property_enable(tcphy, &cfg->usb3tousb2_en, value);
50670+	property_enable(tcphy, &cfg->usb3_host_disable, value);
50671+	property_enable(tcphy, &cfg->usb3_host_port, !value);
50672+
50673+	return 0;
50674+}
50675+
50676 static int tcphy_phy_init(struct rockchip_typec_phy *tcphy, u8 mode)
50677 {
50678 	const struct rockchip_usb3phy_port_cfg *cfg = tcphy->port_cfgs;
50679@@ -743,32 +1271,33 @@ static int tcphy_phy_init(struct rockchip_typec_phy *tcphy, u8 mode)
50680 	tcphy_dp_aux_set_flip(tcphy);
50681 
50682 	tcphy_cfg_24m(tcphy);
50683+	tcphy_set_lane_mapping(tcphy, mode);
50684 
50685 	if (mode == MODE_DFP_DP) {
50686-		tcphy_cfg_dp_pll(tcphy);
50687+		tcphy_cfg_usb3_to_usb2_only(tcphy, true);
50688+		tcphy_cfg_dp_pll(tcphy, DP_DEFAULT_RATE);
50689 		for (i = 0; i < 4; i++)
50690-			tcphy_dp_cfg_lane(tcphy, i);
50691-
50692-		writel(PIN_ASSIGN_C_E, tcphy->base + PMA_LANE_CFG);
50693+			tcphy_dp_cfg_lane(tcphy, DP_DEFAULT_RATE, 0, 0, i);
50694 	} else {
50695 		tcphy_cfg_usb3_pll(tcphy);
50696-		tcphy_cfg_dp_pll(tcphy);
50697+		tcphy_cfg_dp_pll(tcphy, DP_DEFAULT_RATE);
50698 		if (tcphy->flip) {
50699 			tcphy_tx_usb3_cfg_lane(tcphy, 3);
50700 			tcphy_rx_usb3_cfg_lane(tcphy, 2);
50701-			tcphy_dp_cfg_lane(tcphy, 0);
50702-			tcphy_dp_cfg_lane(tcphy, 1);
50703+			tcphy_dp_cfg_lane(tcphy, DP_DEFAULT_RATE, 0, 0, 0);
50704+			tcphy_dp_cfg_lane(tcphy, DP_DEFAULT_RATE, 0, 0, 1);
50705 		} else {
50706 			tcphy_tx_usb3_cfg_lane(tcphy, 0);
50707 			tcphy_rx_usb3_cfg_lane(tcphy, 1);
50708-			tcphy_dp_cfg_lane(tcphy, 2);
50709-			tcphy_dp_cfg_lane(tcphy, 3);
50710+			tcphy_dp_cfg_lane(tcphy, DP_DEFAULT_RATE, 0, 0, 2);
50711+			tcphy_dp_cfg_lane(tcphy, DP_DEFAULT_RATE, 0, 0, 3);
50712 		}
50713-
50714-		writel(PIN_ASSIGN_D_F, tcphy->base + PMA_LANE_CFG);
50715 	}
50716 
50717-	writel(DP_MODE_ENTER_A2, tcphy->base + DP_MODE_CTL);
50718+	val = readl(tcphy->base + PHY_DP_MODE_CTL);
50719+	val &= ~DP_MODE_MASK;
50720+	val |= DP_MODE_ENTER_A2 | DP_LINK_RESET_DEASSERTED;
50721+	writel(val, tcphy->base + PHY_DP_MODE_CTL);
50722 
50723 	reset_control_deassert(tcphy->uphy_rst);
50724 
50725@@ -851,22 +1380,9 @@ static int tcphy_get_mode(struct rockchip_typec_phy *tcphy)
50726 	return mode;
50727 }
50728 
50729-static int tcphy_cfg_usb3_to_usb2_only(struct rockchip_typec_phy *tcphy,
50730-				       bool value)
50731+static int _rockchip_usb3_phy_power_on(struct rockchip_typec_phy *tcphy)
50732 {
50733 	const struct rockchip_usb3phy_port_cfg *cfg = tcphy->port_cfgs;
50734-
50735-	property_enable(tcphy, &cfg->usb3tousb2_en, value);
50736-	property_enable(tcphy, &cfg->usb3_host_disable, value);
50737-	property_enable(tcphy, &cfg->usb3_host_port, !value);
50738-
50739-	return 0;
50740-}
50741-
50742-static int rockchip_usb3_phy_power_on(struct phy *phy)
50743-{
50744-	struct rockchip_typec_phy *tcphy = phy_get_drvdata(phy);
50745-	const struct rockchip_usb3phy_port_cfg *cfg = tcphy->port_cfgs;
50746 	const struct usb3phy_reg *reg = &cfg->pipe_status;
50747 	int timeout, new_mode, ret = 0;
50748 	u32 val;
50749@@ -917,6 +1433,24 @@ static int rockchip_usb3_phy_power_on(struct phy *phy)
50750 	return ret;
50751 }
50752 
50753+static int rockchip_usb3_phy_power_on(struct phy *phy)
50754+{
50755+	struct rockchip_typec_phy *tcphy = phy_get_drvdata(phy);
50756+	int ret;
50757+	int tries;
50758+
50759+	for (tries = 0; tries < POWER_ON_TRIES; tries++) {
50760+		ret = _rockchip_usb3_phy_power_on(tcphy);
50761+		if (!ret)
50762+			break;
50763+	}
50764+
50765+	if (tries && !ret)
50766+		dev_info(tcphy->dev, "Needed %d loops to turn on\n", tries);
50767+
50768+	return ret;
50769+}
50770+
50771 static int rockchip_usb3_phy_power_off(struct phy *phy)
50772 {
50773 	struct rockchip_typec_phy *tcphy = phy_get_drvdata(phy);
50774@@ -980,8 +1514,8 @@ static int rockchip_dp_phy_power_on(struct phy *phy)
50775 
50776 	property_enable(tcphy, &cfg->uphy_dp_sel, 1);
50777 
50778-	ret = readx_poll_timeout(readl, tcphy->base + DP_MODE_CTL,
50779-				 val, val & DP_MODE_A2, 1000,
50780+	ret = readx_poll_timeout(readl, tcphy->base + PHY_DP_MODE_CTL,
50781+				 val, val & DP_MODE_A2_ACK, 1000,
50782 				 PHY_MODE_SET_TIMEOUT);
50783 	if (ret < 0) {
50784 		dev_err(tcphy->dev, "failed to wait TCPHY enter A2\n");
50785@@ -990,14 +1524,10 @@ static int rockchip_dp_phy_power_on(struct phy *phy)
50786 
50787 	tcphy_dp_aux_calibration(tcphy);
50788 
50789-	writel(DP_MODE_ENTER_A0, tcphy->base + DP_MODE_CTL);
50790-
50791-	ret = readx_poll_timeout(readl, tcphy->base + DP_MODE_CTL,
50792-				 val, val & DP_MODE_A0, 1000,
50793-				 PHY_MODE_SET_TIMEOUT);
50794-	if (ret < 0) {
50795-		writel(DP_MODE_ENTER_A2, tcphy->base + DP_MODE_CTL);
50796-		dev_err(tcphy->dev, "failed to wait TCPHY enter A0\n");
50797+	/* enter A0 mode */
50798+	ret = tcphy_dp_set_power_state(tcphy, PHY_DP_POWER_STATE_A0);
50799+	if (ret) {
50800+		dev_err(tcphy->dev, "failed to enter A0 power state\n");
50801 		goto power_on_finish;
50802 	}
50803 
50804@@ -1014,6 +1544,7 @@ static int rockchip_dp_phy_power_on(struct phy *phy)
50805 static int rockchip_dp_phy_power_off(struct phy *phy)
50806 {
50807 	struct rockchip_typec_phy *tcphy = phy_get_drvdata(phy);
50808+	int ret;
50809 
50810 	mutex_lock(&tcphy->lock);
50811 
50812@@ -1022,7 +1553,11 @@ static int rockchip_dp_phy_power_off(struct phy *phy)
50813 
50814 	tcphy->mode &= ~MODE_DFP_DP;
50815 
50816-	writel(DP_MODE_ENTER_A2, tcphy->base + DP_MODE_CTL);
50817+	ret = tcphy_dp_set_power_state(tcphy, PHY_DP_POWER_STATE_A2);
50818+	if (ret) {
50819+		dev_err(tcphy->dev, "failed to enter A2 power state\n");
50820+		goto unlock;
50821+	}
50822 
50823 	if (tcphy->mode == MODE_DISCONNECT)
50824 		tcphy_phy_deinit(tcphy);
50825@@ -1041,6 +1576,8 @@ static const struct phy_ops rockchip_dp_phy_ops = {
50826 static int tcphy_parse_dt(struct rockchip_typec_phy *tcphy,
50827 			  struct device *dev)
50828 {
50829+	int ret;
50830+
50831 	tcphy->grf_regs = syscon_regmap_lookup_by_phandle(dev->of_node,
50832 							  "rockchip,grf");
50833 	if (IS_ERR(tcphy->grf_regs)) {
50834@@ -1078,6 +1615,16 @@ static int tcphy_parse_dt(struct rockchip_typec_phy *tcphy,
50835 		return PTR_ERR(tcphy->tcphy_rst);
50836 	}
50837 
50838+	/*
50839+	 * check if phy_config pass from dts, if no,
50840+	 * use default phy config value.
50841+	 */
50842+	ret = of_property_read_u32_array(dev->of_node, "rockchip,phy-config",
50843+		(u32 *)tcphy->config, sizeof(tcphy->config) / sizeof(u32));
50844+	if (ret)
50845+		memcpy(tcphy->config, tcphy_default_config,
50846+		       sizeof(tcphy->config));
50847+
50848 	return 0;
50849 }
50850 
50851diff --git a/drivers/phy/rockchip/phy-rockchip-usb.c b/drivers/phy/rockchip/phy-rockchip-usb.c
50852index 845428597..a891018fe 100644
50853--- a/drivers/phy/rockchip/phy-rockchip-usb.c
50854+++ b/drivers/phy/rockchip/phy-rockchip-usb.c
50855@@ -8,20 +8,26 @@
50856 
50857 #include <linux/clk.h>
50858 #include <linux/clk-provider.h>
50859+#include <linux/delay.h>
50860+#include <linux/extcon-provider.h>
50861+#include <linux/interrupt.h>
50862 #include <linux/io.h>
50863 #include <linux/kernel.h>
50864+#include <linux/mfd/syscon.h>
50865 #include <linux/module.h>
50866 #include <linux/mutex.h>
50867 #include <linux/of.h>
50868 #include <linux/of_address.h>
50869+#include <linux/of_irq.h>
50870 #include <linux/of_platform.h>
50871 #include <linux/phy/phy.h>
50872 #include <linux/platform_device.h>
50873+#include <linux/power_supply.h>
50874 #include <linux/regulator/consumer.h>
50875 #include <linux/reset.h>
50876 #include <linux/regmap.h>
50877-#include <linux/mfd/syscon.h>
50878-#include <linux/delay.h>
50879+#include <linux/usb/of.h>
50880+#include <linux/wakelock.h>
50881 
50882 static int enable_usb_uart;
50883 
50884@@ -45,6 +51,69 @@ static int enable_usb_uart;
50885 #define UOC_CON3_UTMI_OPMODE_MASK			(3 << 1)
50886 #define UOC_CON3_UTMI_SUSPENDN				BIT(0)
50887 
50888+#define RK3288_UOC0_CON0				0x320
50889+#define RK3288_UOC0_CON0_COMMON_ON_N			BIT(0)
50890+#define RK3288_UOC0_CON0_DISABLE			BIT(4)
50891+
50892+#define RK3288_UOC0_CON2				0x328
50893+#define RK3288_UOC0_CON2_SOFT_CON_SEL			BIT(2)
50894+#define RK3288_UOC0_CON2_CHRGSEL			BIT(5)
50895+#define RK3288_UOC0_CON2_VDATDETENB			BIT(6)
50896+#define RK3288_UOC0_CON2_VDATSRCENB			BIT(7)
50897+#define RK3288_UOC0_CON2_DCDENB				BIT(14)
50898+
50899+#define RK3288_UOC0_CON3				0x32c
50900+#define RK3288_UOC0_CON3_UTMI_SUSPENDN			BIT(0)
50901+#define RK3288_UOC0_CON3_UTMI_OPMODE_NODRIVING		BIT(1)
50902+#define RK3288_UOC0_CON3_UTMI_OPMODE_MASK		(3 << 1)
50903+#define RK3288_UOC0_CON3_UTMI_XCVRSEELCT_FSTRANSC	BIT(3)
50904+#define RK3288_UOC0_CON3_UTMI_XCVRSEELCT_MASK		(3 << 3)
50905+#define RK3288_UOC0_CON3_UTMI_TERMSEL_FULLSPEED		BIT(5)
50906+#define RK3288_UOC0_CON3_BYPASSDMEN			BIT(6)
50907+#define RK3288_UOC0_CON3_BYPASSSEL			BIT(7)
50908+#define RK3288_UOC0_CON3_IDDIG_SET_OTG			(0 << 12)
50909+#define RK3288_UOC0_CON3_IDDIG_SET_HOST			(2 << 12)
50910+#define RK3288_UOC0_CON3_IDDIG_SET_PERIPHERAL		(3 << 12)
50911+#define RK3288_UOC0_CON3_IDDIG_SET_MASK			(3 << 12)
50912+
50913+#define RK3288_UOC0_CON4				0x330
50914+#define RK3288_UOC0_CON4_BVALID_IRQ_EN			BIT(2)
50915+#define RK3288_UOC0_CON4_BVALID_IRQ_PD			BIT(3)
50916+
50917+#define RK3288_SOC_STATUS2				0x288
50918+#define RK3288_SOC_STATUS2_UTMISRP_BVALID		BIT(14)
50919+#define RK3288_SOC_STATUS2_UTMIOTG_IDDIG		BIT(17)
50920+
50921+#define RK3288_SOC_STATUS19				0x2cc
50922+#define RK3288_SOC_STATUS19_CHGDET			BIT(23)
50923+#define RK3288_SOC_STATUS19_FSVPLUS			BIT(24)
50924+#define RK3288_SOC_STATUS19_FSVMINUS			BIT(25)
50925+
50926+#define OTG_SCHEDULE_DELAY				(1 * HZ)
50927+#define CHG_DCD_POLL_TIME				(100 * HZ / 1000)
50928+#define CHG_DCD_MAX_RETRIES				6
50929+#define CHG_PRIMARY_DET_TIME				(40 * HZ / 1000)
50930+#define CHG_SECONDARY_DET_TIME				(40 * HZ / 1000)
50931+
50932+enum usb_chg_state {
50933+	USB_CHG_STATE_UNDEFINED = 0,
50934+	USB_CHG_STATE_WAIT_FOR_DCD,
50935+	USB_CHG_STATE_DCD_DONE,
50936+	USB_CHG_STATE_PRIMARY_DONE,
50937+	USB_CHG_STATE_SECONDARY_DONE,
50938+	USB_CHG_STATE_DETECTED,
50939+};
50940+
50941+static const unsigned int rockchip_usb_phy_extcon_cable[] = {
50942+	EXTCON_USB,
50943+	EXTCON_USB_HOST,
50944+	EXTCON_USB_VBUS_EN,
50945+	EXTCON_CHG_USB_SDP,
50946+	EXTCON_CHG_USB_CDP,
50947+	EXTCON_CHG_USB_DCP,
50948+	EXTCON_NONE,
50949+};
50950+
50951 struct rockchip_usb_phys {
50952 	int reg;
50953 	const char *pll_name;
50954@@ -61,20 +130,127 @@ struct rockchip_usb_phy_pdata {
50955 struct rockchip_usb_phy_base {
50956 	struct device *dev;
50957 	struct regmap *reg_base;
50958+	struct extcon_dev *edev;
50959 	const struct rockchip_usb_phy_pdata *pdata;
50960 };
50961 
50962 struct rockchip_usb_phy {
50963 	struct rockchip_usb_phy_base *base;
50964-	struct device_node *np;
50965-	unsigned int	reg_offset;
50966-	struct clk	*clk;
50967-	struct clk      *clk480m;
50968-	struct clk_hw	clk480m_hw;
50969-	struct phy	*phy;
50970-	bool		uart_enabled;
50971-	struct reset_control *reset;
50972-	struct regulator *vbus;
50973+	struct device_node	*np;
50974+	unsigned int		reg_offset;
50975+	struct clk		*clk;
50976+	struct clk		*clk480m;
50977+	struct clk_hw		clk480m_hw;
50978+	struct phy		*phy;
50979+	bool			uart_enabled;
50980+	int			bvalid_irq;
50981+	struct reset_control	*reset;
50982+	struct regulator	*vbus;
50983+	struct mutex		mutex; /* protects registers of phy */
50984+	struct delayed_work	chg_work;
50985+	struct delayed_work	otg_sm_work;
50986+	struct wake_lock	wakelock;
50987+	enum usb_chg_state	chg_state;
50988+	enum power_supply_type	chg_type;
50989+	enum usb_dr_mode	mode;
50990+};
50991+
50992+static ssize_t otg_mode_show(struct device *dev,
50993+			     struct device_attribute *attr, char *buf)
50994+{
50995+	struct rockchip_usb_phy *rk_phy = dev_get_drvdata(dev);
50996+
50997+	if (!rk_phy) {
50998+		dev_err(dev, "Fail to get otg phy.\n");
50999+		return -EINVAL;
51000+	}
51001+
51002+	switch (rk_phy->mode) {
51003+	case USB_DR_MODE_HOST:
51004+		return sprintf(buf, "host\n");
51005+	case USB_DR_MODE_PERIPHERAL:
51006+		return sprintf(buf, "peripheral\n");
51007+	case USB_DR_MODE_OTG:
51008+		return sprintf(buf, "otg\n");
51009+	case USB_DR_MODE_UNKNOWN:
51010+		return sprintf(buf, "UNKNOWN\n");
51011+	default:
51012+		break;
51013+	}
51014+
51015+	return -EINVAL;
51016+}
51017+
51018+static ssize_t otg_mode_store(struct device *dev, struct device_attribute *attr,
51019+			      const char *buf, size_t count)
51020+{
51021+	struct rockchip_usb_phy *rk_phy = dev_get_drvdata(dev);
51022+	enum usb_dr_mode new_dr_mode;
51023+	int ret = count;
51024+	int val = 0;
51025+
51026+	if (!rk_phy) {
51027+		dev_err(dev, "Fail to get otg phy.\n");
51028+		return -EINVAL;
51029+	}
51030+
51031+	mutex_lock(&rk_phy->mutex);
51032+
51033+	if (!strncmp(buf, "0", 1) || !strncmp(buf, "otg", 3)) {
51034+		new_dr_mode = USB_DR_MODE_OTG;
51035+	} else if (!strncmp(buf, "1", 1) || !strncmp(buf, "host", 4)) {
51036+		new_dr_mode = USB_DR_MODE_HOST;
51037+	} else if (!strncmp(buf, "2", 1) || !strncmp(buf, "peripheral", 10)) {
51038+		new_dr_mode = USB_DR_MODE_PERIPHERAL;
51039+	} else {
51040+		dev_err(&rk_phy->phy->dev, "Error mode! Input 'otg' or 'host' or 'peripheral'\n");
51041+		ret = -EINVAL;
51042+		goto out_unlock;
51043+	}
51044+
51045+	if (rk_phy->mode == new_dr_mode) {
51046+		dev_warn(&rk_phy->phy->dev, "Same as current mode.\n");
51047+		goto out_unlock;
51048+	}
51049+
51050+	rk_phy->mode = new_dr_mode;
51051+
51052+	switch (rk_phy->mode) {
51053+	case USB_DR_MODE_HOST:
51054+		val = HIWORD_UPDATE(RK3288_UOC0_CON3_IDDIG_SET_HOST,
51055+				    RK3288_UOC0_CON3_IDDIG_SET_MASK);
51056+		break;
51057+	case USB_DR_MODE_PERIPHERAL:
51058+		val = HIWORD_UPDATE(RK3288_UOC0_CON3_IDDIG_SET_PERIPHERAL,
51059+				    RK3288_UOC0_CON3_IDDIG_SET_MASK);
51060+		break;
51061+	case USB_DR_MODE_OTG:
51062+		val = HIWORD_UPDATE(RK3288_UOC0_CON3_IDDIG_SET_OTG,
51063+				    RK3288_UOC0_CON3_IDDIG_SET_MASK);
51064+		break;
51065+	default:
51066+		break;
51067+	}
51068+
51069+	regmap_write(rk_phy->base->reg_base, RK3288_UOC0_CON3, val);
51070+
51071+out_unlock:
51072+	mutex_unlock(&rk_phy->mutex);
51073+
51074+	return ret;
51075+}
51076+
51077+static DEVICE_ATTR_RW(otg_mode);
51078+
51079+/* Group all the usb2 phy attributes */
51080+static struct attribute *usb2_phy_attrs[] = {
51081+	&dev_attr_otg_mode.attr,
51082+	NULL,
51083+};
51084+
51085+static struct attribute_group usb2_phy_attr_group = {
51086+	.name = NULL, /* we want them in the same directory */
51087+	.attrs = usb2_phy_attrs,
51088 };
51089 
51090 static int rockchip_usb_phy_power(struct rockchip_usb_phy *phy,
51091@@ -136,6 +312,46 @@ static const struct clk_ops rockchip_usb_phy480m_ops = {
51092 	.recalc_rate = rockchip_usb_phy480m_recalc_rate,
51093 };
51094 
51095+static int rk3288_usb_phy_init(struct phy *_phy)
51096+{
51097+	struct rockchip_usb_phy *phy = phy_get_drvdata(_phy);
51098+	int ret = 0;
51099+	unsigned int val;
51100+
51101+	if (phy->bvalid_irq > 0) {
51102+		mutex_lock(&phy->mutex);
51103+
51104+		/* clear bvalid status and enable bvalid detect irq */
51105+		val = HIWORD_UPDATE(RK3288_UOC0_CON4_BVALID_IRQ_EN
51106+					| RK3288_UOC0_CON4_BVALID_IRQ_PD,
51107+				    RK3288_UOC0_CON4_BVALID_IRQ_EN
51108+					| RK3288_UOC0_CON4_BVALID_IRQ_PD);
51109+		ret = regmap_write(phy->base->reg_base, RK3288_UOC0_CON4, val);
51110+		if (ret) {
51111+			dev_err(phy->base->dev,
51112+				"failed to enable bvalid irq\n");
51113+			goto out;
51114+		}
51115+
51116+		schedule_delayed_work(&phy->otg_sm_work, OTG_SCHEDULE_DELAY);
51117+
51118+out:
51119+		mutex_unlock(&phy->mutex);
51120+	}
51121+
51122+	return ret;
51123+}
51124+
51125+static int rk3288_usb_phy_exit(struct phy *_phy)
51126+{
51127+	struct rockchip_usb_phy *phy = phy_get_drvdata(_phy);
51128+
51129+	if (phy->bvalid_irq > 0)
51130+		flush_delayed_work(&phy->otg_sm_work);
51131+
51132+	return 0;
51133+}
51134+
51135 static int rockchip_usb_phy_power_off(struct phy *_phy)
51136 {
51137 	struct rockchip_usb_phy *phy = phy_get_drvdata(_phy);
51138@@ -179,7 +395,7 @@ static int rockchip_usb_phy_reset(struct phy *_phy)
51139 	return 0;
51140 }
51141 
51142-static const struct phy_ops ops = {
51143+static struct phy_ops ops = {
51144 	.power_on	= rockchip_usb_phy_power_on,
51145 	.power_off	= rockchip_usb_phy_power_off,
51146 	.reset		= rockchip_usb_phy_reset,
51147@@ -199,13 +415,383 @@ static void rockchip_usb_phy_action(void *data)
51148 		clk_put(rk_phy->clk);
51149 }
51150 
51151+static int rockchip_usb_phy_extcon_register(struct rockchip_usb_phy_base *base)
51152+{
51153+	int ret;
51154+	struct device_node *node = base->dev->of_node;
51155+	struct extcon_dev *edev;
51156+
51157+	if (of_property_read_bool(node, "extcon")) {
51158+		edev = extcon_get_edev_by_phandle(base->dev, 0);
51159+		if (IS_ERR(edev)) {
51160+			if (PTR_ERR(edev) != -EPROBE_DEFER)
51161+				dev_err(base->dev,
51162+					"Invalid or missing extcon\n");
51163+			return PTR_ERR(edev);
51164+		}
51165+	} else {
51166+		/* Initialize extcon device */
51167+		edev = devm_extcon_dev_allocate(base->dev,
51168+						rockchip_usb_phy_extcon_cable);
51169+
51170+		if (IS_ERR(edev))
51171+			return -ENOMEM;
51172+
51173+		ret = devm_extcon_dev_register(base->dev, edev);
51174+		if (ret) {
51175+			dev_err(base->dev,
51176+				"failed to register extcon device\n");
51177+			return ret;
51178+		}
51179+	}
51180+
51181+	base->edev = edev;
51182+
51183+	return 0;
51184+}
51185+
51186+static void rk3288_usb_phy_otg_sm_work(struct work_struct *work)
51187+{
51188+	struct rockchip_usb_phy *rk_phy = container_of(work,
51189+						       struct rockchip_usb_phy,
51190+						       otg_sm_work.work);
51191+	unsigned int val;
51192+	static unsigned int cable;
51193+	static bool chg_det_completed;
51194+	bool sch_work;
51195+	bool vbus_attached;
51196+	bool id;
51197+
51198+	mutex_lock(&rk_phy->mutex);
51199+
51200+	sch_work = false;
51201+
51202+	regmap_read(rk_phy->base->reg_base, RK3288_SOC_STATUS2, &val);
51203+	id = (val & RK3288_SOC_STATUS2_UTMIOTG_IDDIG) ? true : false;
51204+
51205+	regmap_read(rk_phy->base->reg_base, RK3288_SOC_STATUS2, &val);
51206+	vbus_attached =
51207+		(val & RK3288_SOC_STATUS2_UTMISRP_BVALID) ? true : false;
51208+
51209+	if (!vbus_attached || !id || rk_phy->mode == USB_DR_MODE_HOST) {
51210+		dev_dbg(&rk_phy->phy->dev, "peripheral disconnected\n");
51211+		wake_unlock(&rk_phy->wakelock);
51212+		extcon_set_state_sync(rk_phy->base->edev, cable, false);
51213+		rk_phy->chg_state = USB_CHG_STATE_UNDEFINED;
51214+		chg_det_completed = false;
51215+		goto out;
51216+	}
51217+
51218+	if (chg_det_completed) {
51219+		sch_work = true;
51220+		goto out;
51221+	}
51222+
51223+	switch (rk_phy->chg_state) {
51224+	case USB_CHG_STATE_UNDEFINED:
51225+		mutex_unlock(&rk_phy->mutex);
51226+		schedule_delayed_work(&rk_phy->chg_work, 0);
51227+		return;
51228+	case USB_CHG_STATE_DETECTED:
51229+		switch (rk_phy->chg_type) {
51230+		case POWER_SUPPLY_TYPE_USB:
51231+			dev_dbg(&rk_phy->phy->dev, "sdp cable is connected\n");
51232+			wake_lock(&rk_phy->wakelock);
51233+			cable = EXTCON_CHG_USB_SDP;
51234+			sch_work = true;
51235+			break;
51236+		case POWER_SUPPLY_TYPE_USB_DCP:
51237+			dev_dbg(&rk_phy->phy->dev, "dcp cable is connected\n");
51238+			cable = EXTCON_CHG_USB_DCP;
51239+			sch_work = true;
51240+			break;
51241+		case POWER_SUPPLY_TYPE_USB_CDP:
51242+			dev_dbg(&rk_phy->phy->dev, "cdp cable is connected\n");
51243+			wake_lock(&rk_phy->wakelock);
51244+			cable = EXTCON_CHG_USB_CDP;
51245+			sch_work = true;
51246+			break;
51247+		default:
51248+			break;
51249+		}
51250+		chg_det_completed = true;
51251+		break;
51252+	default:
51253+		break;
51254+	}
51255+
51256+	if (extcon_get_state(rk_phy->base->edev, cable) != vbus_attached)
51257+		extcon_set_state_sync(rk_phy->base->edev, cable,
51258+				      vbus_attached);
51259+
51260+out:
51261+	if (sch_work)
51262+		schedule_delayed_work(&rk_phy->otg_sm_work, OTG_SCHEDULE_DELAY);
51263+
51264+	mutex_unlock(&rk_phy->mutex);
51265+}
51266+
51267+static const char *chg_to_string(enum power_supply_type chg_type)
51268+{
51269+	switch (chg_type) {
51270+	case POWER_SUPPLY_TYPE_USB:
51271+		return "USB_SDP_CHARGER";
51272+	case POWER_SUPPLY_TYPE_USB_DCP:
51273+		return "USB_DCP_CHARGER";
51274+	case POWER_SUPPLY_TYPE_USB_CDP:
51275+		return "USB_CDP_CHARGER";
51276+	default:
51277+		return "INVALID_CHARGER";
51278+	}
51279+}
51280+
51281+static void rk3288_chg_detect_work(struct work_struct *work)
51282+{
51283+	struct rockchip_usb_phy *rk_phy =
51284+		container_of(work, struct rockchip_usb_phy, chg_work.work);
51285+	unsigned int val;
51286+	static int dcd_retries;
51287+	static int primary_retries;
51288+	unsigned long delay;
51289+	bool fsvplus;
51290+	bool vout;
51291+	bool tmout;
51292+
51293+	dev_dbg(&rk_phy->phy->dev, "chg detection work state = %d\n",
51294+		rk_phy->chg_state);
51295+
51296+	switch (rk_phy->chg_state) {
51297+	case USB_CHG_STATE_UNDEFINED:
51298+		mutex_lock(&rk_phy->mutex);
51299+		/* put the controller in non-driving mode */
51300+		val = HIWORD_UPDATE(RK3288_UOC0_CON2_SOFT_CON_SEL,
51301+				    RK3288_UOC0_CON2_SOFT_CON_SEL);
51302+		regmap_write(rk_phy->base->reg_base, RK3288_UOC0_CON2, val);
51303+		val = HIWORD_UPDATE(RK3288_UOC0_CON3_UTMI_OPMODE_NODRIVING,
51304+				    RK3288_UOC0_CON3_UTMI_SUSPENDN
51305+					| RK3288_UOC0_CON3_UTMI_OPMODE_MASK);
51306+		regmap_write(rk_phy->base->reg_base, RK3288_UOC0_CON3, val);
51307+		/* Start DCD processing stage 1 */
51308+		val = HIWORD_UPDATE(RK3288_UOC0_CON2_DCDENB,
51309+				    RK3288_UOC0_CON2_DCDENB);
51310+		regmap_write(rk_phy->base->reg_base, RK3288_UOC0_CON2, val);
51311+		rk_phy->chg_state = USB_CHG_STATE_WAIT_FOR_DCD;
51312+		dcd_retries = 0;
51313+		primary_retries = 0;
51314+		delay = CHG_DCD_POLL_TIME;
51315+		break;
51316+	case USB_CHG_STATE_WAIT_FOR_DCD:
51317+		/* get data contact detection status */
51318+		regmap_read(rk_phy->base->reg_base, RK3288_SOC_STATUS19, &val);
51319+		fsvplus = (val & RK3288_SOC_STATUS19_FSVPLUS) ? true : false;
51320+		tmout = ++dcd_retries == CHG_DCD_MAX_RETRIES;
51321+		/* stage 2 */
51322+		if (!fsvplus || tmout) {
51323+vdpsrc:
51324+			/* stage 4 */
51325+			/* Turn off DCD circuitry */
51326+			val = HIWORD_UPDATE(0, RK3288_UOC0_CON2_DCDENB);
51327+			regmap_write(rk_phy->base->reg_base,
51328+				     RK3288_UOC0_CON2, val);
51329+			/* Voltage Source on DP, Probe on DM */
51330+			val = HIWORD_UPDATE(RK3288_UOC0_CON2_VDATSRCENB
51331+						| RK3288_UOC0_CON2_VDATDETENB,
51332+					    RK3288_UOC0_CON2_VDATSRCENB
51333+						| RK3288_UOC0_CON2_VDATDETENB
51334+						| RK3288_UOC0_CON2_CHRGSEL);
51335+			regmap_write(rk_phy->base->reg_base,
51336+				     RK3288_UOC0_CON2, val);
51337+			delay = CHG_PRIMARY_DET_TIME;
51338+			rk_phy->chg_state = USB_CHG_STATE_DCD_DONE;
51339+		} else {
51340+			/* stage 3 */
51341+			delay = CHG_DCD_POLL_TIME;
51342+		}
51343+		break;
51344+	case USB_CHG_STATE_DCD_DONE:
51345+		regmap_read(rk_phy->base->reg_base, RK3288_SOC_STATUS19, &val);
51346+		vout = (val & RK3288_SOC_STATUS19_CHGDET) ? true : false;
51347+
51348+		val = HIWORD_UPDATE(0, RK3288_UOC0_CON2_VDATSRCENB
51349+					| RK3288_UOC0_CON2_VDATDETENB);
51350+		regmap_write(rk_phy->base->reg_base, RK3288_UOC0_CON2, val);
51351+		if (vout) {
51352+			/* Voltage Source on DM, Probe on DP  */
51353+			val = HIWORD_UPDATE(RK3288_UOC0_CON2_VDATSRCENB
51354+						| RK3288_UOC0_CON2_VDATDETENB
51355+						| RK3288_UOC0_CON2_CHRGSEL,
51356+					    RK3288_UOC0_CON2_VDATSRCENB
51357+						| RK3288_UOC0_CON2_VDATDETENB
51358+						| RK3288_UOC0_CON2_CHRGSEL);
51359+			regmap_write(rk_phy->base->reg_base,
51360+				     RK3288_UOC0_CON2, val);
51361+			delay = CHG_SECONDARY_DET_TIME;
51362+			rk_phy->chg_state = USB_CHG_STATE_PRIMARY_DONE;
51363+		} else {
51364+			if (dcd_retries == CHG_DCD_MAX_RETRIES) {
51365+				/* floating charger found */
51366+				rk_phy->chg_type = POWER_SUPPLY_TYPE_USB_DCP;
51367+				rk_phy->chg_state = USB_CHG_STATE_DETECTED;
51368+				delay = 0;
51369+			} else if (primary_retries < 2) {
51370+				primary_retries++;
51371+				goto vdpsrc;
51372+			} else {
51373+				rk_phy->chg_type = POWER_SUPPLY_TYPE_USB;
51374+				rk_phy->chg_state = USB_CHG_STATE_DETECTED;
51375+				delay = 0;
51376+			}
51377+		}
51378+		break;
51379+	case USB_CHG_STATE_PRIMARY_DONE:
51380+		regmap_read(rk_phy->base->reg_base, RK3288_SOC_STATUS19, &val);
51381+		vout = (val & RK3288_SOC_STATUS19_CHGDET) ? true : false;
51382+
51383+		/* Turn off voltage source */
51384+		val = HIWORD_UPDATE(0, RK3288_UOC0_CON2_VDATSRCENB
51385+					| RK3288_UOC0_CON2_VDATDETENB
51386+					| RK3288_UOC0_CON2_CHRGSEL);
51387+		regmap_write(rk_phy->base->reg_base, RK3288_UOC0_CON2, val);
51388+		if (vout)
51389+			rk_phy->chg_type = POWER_SUPPLY_TYPE_USB_DCP;
51390+		else
51391+			rk_phy->chg_type = POWER_SUPPLY_TYPE_USB_CDP;
51392+		fallthrough;
51393+	case USB_CHG_STATE_SECONDARY_DONE:
51394+		rk_phy->chg_state = USB_CHG_STATE_DETECTED;
51395+		fallthrough;
51396+	case USB_CHG_STATE_DETECTED:
51397+		/* put the controller in normal mode */
51398+		val = HIWORD_UPDATE(0, RK3288_UOC0_CON2_SOFT_CON_SEL);
51399+		regmap_write(rk_phy->base->reg_base, RK3288_UOC0_CON2, val);
51400+		val = HIWORD_UPDATE(RK3288_UOC0_CON3_UTMI_SUSPENDN,
51401+				    RK3288_UOC0_CON3_UTMI_SUSPENDN
51402+					| RK3288_UOC0_CON3_UTMI_OPMODE_MASK);
51403+		regmap_write(rk_phy->base->reg_base, RK3288_UOC0_CON3, val);
51404+		mutex_unlock(&rk_phy->mutex);
51405+		rk3288_usb_phy_otg_sm_work(&rk_phy->otg_sm_work.work);
51406+		dev_info(&rk_phy->phy->dev, "charger = %s\n",
51407+			 chg_to_string(rk_phy->chg_type));
51408+		return;
51409+	default:
51410+		mutex_unlock(&rk_phy->mutex);
51411+		return;
51412+	}
51413+
51414+	/*
51415+	 * Hold the mutex lock during the whole charger
51416+	 * detection stage, and release it after detect
51417+	 * the charger type.
51418+	 */
51419+	schedule_delayed_work(&rk_phy->chg_work, delay);
51420+}
51421+
51422+static irqreturn_t rk3288_usb_phy_bvalid_irq(int irq, void *data)
51423+{
51424+	struct rockchip_usb_phy *rk_phy = data;
51425+	int ret;
51426+	unsigned int val;
51427+
51428+	ret = regmap_read(rk_phy->base->reg_base, RK3288_UOC0_CON4, &val);
51429+	if (ret < 0 || !(val & RK3288_UOC0_CON4_BVALID_IRQ_PD))
51430+		return IRQ_NONE;
51431+
51432+	mutex_lock(&rk_phy->mutex);
51433+
51434+	/* clear bvalid detect irq pending status */
51435+	val = HIWORD_UPDATE(RK3288_UOC0_CON4_BVALID_IRQ_PD,
51436+			    RK3288_UOC0_CON4_BVALID_IRQ_PD);
51437+	regmap_write(rk_phy->base->reg_base, RK3288_UOC0_CON4, val);
51438+
51439+	mutex_unlock(&rk_phy->mutex);
51440+
51441+	if (rk_phy->uart_enabled)
51442+		goto out;
51443+
51444+	cancel_delayed_work_sync(&rk_phy->otg_sm_work);
51445+	rk3288_usb_phy_otg_sm_work(&rk_phy->otg_sm_work.work);
51446+out:
51447+	return IRQ_HANDLED;
51448+}
51449+
51450+static int rk3288_usb_phy_probe_init(struct rockchip_usb_phy *rk_phy)
51451+{
51452+	int ret = 0;
51453+	unsigned int val;
51454+
51455+	if (rk_phy->reg_offset == 0x320) {
51456+		/* Enable Bvalid interrupt and charge detection */
51457+		ops.init = rk3288_usb_phy_init;
51458+		ops.exit = rk3288_usb_phy_exit;
51459+		rk_phy->bvalid_irq = of_irq_get_byname(rk_phy->np,
51460+						       "otg-bvalid");
51461+		regmap_read(rk_phy->base->reg_base, RK3288_UOC0_CON4, &val);
51462+		if (rk_phy->bvalid_irq <= 0) {
51463+			dev_err(&rk_phy->phy->dev,
51464+				"no vbus valid irq provided\n");
51465+			ret = -EINVAL;
51466+			goto out;
51467+		}
51468+
51469+		ret = devm_request_threaded_irq(rk_phy->base->dev,
51470+						rk_phy->bvalid_irq,
51471+						NULL,
51472+						rk3288_usb_phy_bvalid_irq,
51473+						IRQF_ONESHOT,
51474+						"rockchip_usb_phy_bvalid",
51475+						rk_phy);
51476+		if (ret) {
51477+			dev_err(&rk_phy->phy->dev,
51478+				"failed to request otg-bvalid irq handle\n");
51479+			goto out;
51480+		}
51481+
51482+		rk_phy->chg_state = USB_CHG_STATE_UNDEFINED;
51483+		wake_lock_init(&rk_phy->wakelock, WAKE_LOCK_SUSPEND,
51484+			       "rockchip_otg");
51485+		INIT_DELAYED_WORK(&rk_phy->chg_work, rk3288_chg_detect_work);
51486+		INIT_DELAYED_WORK(&rk_phy->otg_sm_work,
51487+				  rk3288_usb_phy_otg_sm_work);
51488+
51489+		rk_phy->mode = of_usb_get_dr_mode_by_phy(rk_phy->np, -1);
51490+		if (rk_phy->mode == USB_DR_MODE_OTG ||
51491+		    rk_phy->mode == USB_DR_MODE_UNKNOWN) {
51492+			ret = sysfs_create_group(&rk_phy->phy->dev.kobj,
51493+						 &usb2_phy_attr_group);
51494+			if (ret) {
51495+				dev_err(&rk_phy->phy->dev,
51496+					"Cannot create sysfs group\n");
51497+				goto out;
51498+			}
51499+		}
51500+	} else if (rk_phy->reg_offset == 0x334) {
51501+		/*
51502+		 * Setting the COMMONONN to 1'b0 for EHCI PHY on RK3288 SoC.
51503+		 *
51504+		 * EHCI (auto) suspend causes the corresponding usb-phy into
51505+		 * suspend mode which would power down the inner PLL blocks in
51506+		 * usb-phy if the COMMONONN is set to 1'b1. The PLL output
51507+		 * clocks contained CLK480M, CLK12MOHCI, CLK48MOHCI, PHYCLOCK0
51508+		 * and so on, these clocks are not only supplied for EHCI and
51509+		 * OHCI, but also supplied for GPU and other external modules,
51510+		 * so setting COMMONONN to 1'b0 to keep the inner PLL blocks in
51511+		 * usb-phy always powered.
51512+		 */
51513+		regmap_write(rk_phy->base->reg_base, rk_phy->reg_offset,
51514+			     BIT(16));
51515+	}
51516+out:
51517+	return ret;
51518+}
51519+
51520 static int rockchip_usb_phy_init(struct rockchip_usb_phy_base *base,
51521 				 struct device_node *child)
51522 {
51523+	struct device_node *np = base->dev->of_node;
51524 	struct rockchip_usb_phy *rk_phy;
51525 	unsigned int reg_offset;
51526 	const char *clk_name;
51527-	struct clk_init_data init;
51528+	struct clk_init_data init = {};
51529 	int err, i;
51530 
51531 	rk_phy = devm_kzalloc(base->dev, sizeof(*rk_phy), GFP_KERNEL);
51532@@ -214,6 +800,7 @@ static int rockchip_usb_phy_init(struct rockchip_usb_phy_base *base,
51533 
51534 	rk_phy->base = base;
51535 	rk_phy->np = child;
51536+	mutex_init(&rk_phy->mutex);
51537 
51538 	if (of_property_read_u32(child, "reg", &reg_offset)) {
51539 		dev_err(base->dev, "missing reg property in node %pOFn\n",
51540@@ -288,6 +875,12 @@ static int rockchip_usb_phy_init(struct rockchip_usb_phy_base *base,
51541 	}
51542 	phy_set_drvdata(rk_phy->phy, rk_phy);
51543 
51544+	if (of_device_is_compatible(np, "rockchip,rk3288-usb-phy")) {
51545+		err = rk3288_usb_phy_probe_init(rk_phy);
51546+		if (err)
51547+			return err;
51548+	}
51549+
51550 	rk_phy->vbus = devm_regulator_get_optional(&rk_phy->phy->dev, "vbus");
51551 	if (IS_ERR(rk_phy->vbus)) {
51552 		if (PTR_ERR(rk_phy->vbus) == -EPROBE_DEFER)
51553@@ -402,10 +995,6 @@ static const struct rockchip_usb_phy_pdata rk3188_pdata = {
51554 	.usb_uart_phy = 0,
51555 };
51556 
51557-#define RK3288_UOC0_CON3				0x32c
51558-#define RK3288_UOC0_CON3_BYPASSDMEN			BIT(6)
51559-#define RK3288_UOC0_CON3_BYPASSSEL			BIT(7)
51560-
51561 /*
51562  * Enable the bypass of uart2 data through the otg usb phy.
51563  * Original description in the TRM.
51564@@ -487,6 +1076,10 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
51565 		return PTR_ERR(phy_base->reg_base);
51566 	}
51567 
51568+	err = rockchip_usb_phy_extcon_register(phy_base);
51569+	if (err)
51570+		return err;
51571+
51572 	for_each_available_child_of_node(dev->of_node, child) {
51573 		err = rockchip_usb_phy_init(phy_base, child);
51574 		if (err) {
51575@@ -496,6 +1089,7 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
51576 	}
51577 
51578 	phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
51579+
51580 	return PTR_ERR_OR_ZERO(phy_provider);
51581 }
51582 
51583diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
51584index 815095326..6782f1ae2 100644
51585--- a/drivers/pinctrl/Kconfig
51586+++ b/drivers/pinctrl/Kconfig
51587@@ -207,13 +207,18 @@ config PINCTRL_OXNAS
51588 	select MFD_SYSCON
51589 
51590 config PINCTRL_ROCKCHIP
51591-	bool
51592-	depends on OF
51593+	tristate "Rockchip gpio and pinctrl driver"
51594+	depends on ARCH_ROCKCHIP || COMPILE_TEST
51595+	depends on OF || ACPI
51596+	select GPIOLIB
51597 	select PINMUX
51598 	select GENERIC_PINCONF
51599 	select GENERIC_IRQ_CHIP
51600 	select MFD_SYSCON
51601 	select OF_GPIO
51602+	default ARCH_ROCKCHIP
51603+	help
51604+          This support pinctrl and gpio driver for Rockchip SoCs.
51605 
51606 config PINCTRL_SINGLE
51607 	tristate "One-register-per-pin type device tree based pinctrl driver"
51608diff --git a/drivers/pinctrl/pinctrl-rk805.c b/drivers/pinctrl/pinctrl-rk805.c
51609index c6f4229eb..46e426cf4 100644
51610--- a/drivers/pinctrl/pinctrl-rk805.c
51611+++ b/drivers/pinctrl/pinctrl-rk805.c
51612@@ -78,6 +78,7 @@ struct rk805_pctrl_info {
51613 
51614 enum rk805_pinmux_option {
51615 	RK805_PINMUX_GPIO,
51616+	RK805_PINMUX_TS,
51617 };
51618 
51619 enum {
51620@@ -132,12 +133,167 @@ static const struct rk805_pin_config rk805_gpio_cfgs[] = {
51621 	},
51622 };
51623 
51624+#define RK816_FUN_MASK		BIT(2)
51625+#define RK816_VAL_MASK		BIT(3)
51626+#define RK816_DIR_MASK		BIT(4)
51627+
51628+enum {
51629+	RK816_GPIO0,
51630+};
51631+
51632+/* RK816: gpio/ts */
51633+static const char *const rk816_gpio_groups[] = {
51634+	"gpio0",
51635+};
51636+
51637+static const struct pinctrl_pin_desc rk816_pins_desc[] = {
51638+	PINCTRL_PIN(RK816_GPIO0, "gpio0"),
51639+};
51640+
51641+static const struct rk805_pin_function rk816_pin_functions[] = {
51642+	{
51643+		.name = "gpio",
51644+		.groups = rk816_gpio_groups,
51645+		.ngroups = ARRAY_SIZE(rk816_gpio_groups),
51646+		.mux_option = RK805_PINMUX_GPIO,
51647+	},
51648+	{
51649+		.name = "ts",
51650+		.groups = rk816_gpio_groups,
51651+		.ngroups = ARRAY_SIZE(rk816_gpio_groups),
51652+		.mux_option = RK805_PINMUX_TS,
51653+	},
51654+};
51655+
51656+static const struct rk805_pin_group rk816_pin_groups[] = {
51657+	{
51658+		.name = "gpio0",
51659+		.pins = { RK816_GPIO0 },
51660+		.npins = 1,
51661+	},
51662+};
51663+
51664+static struct rk805_pin_config rk816_gpio_cfgs[] = {
51665+	{
51666+		.reg = RK816_GPIO_IO_POL_REG,
51667+		.val_msk = RK816_VAL_MASK,
51668+		.fun_msk = RK816_FUN_MASK,
51669+		.dir_msk = RK816_DIR_MASK,
51670+	},
51671+};
51672+
51673+enum rk817_pinmux_option {
51674+	RK817_PINMUX_FUN0 = 0,
51675+	RK817_PINMUX_FUN1,
51676+	RK817_PINMUX_FUN2,
51677+	RK817_PINMUX_FUN3
51678+};
51679+
51680+enum {
51681+	RK817_GPIO_SLP,
51682+	RK817_GPIO_TS,
51683+	RK817_GPIO_GT
51684+};
51685+
51686+/* for rk809 only a sleep pin */
51687+static const char *const rk817_gpio_groups[] = {
51688+	"gpio_slp",
51689+	"gpio_ts",
51690+	"gpio_gt",
51691+};
51692+
51693+static const struct pinctrl_pin_desc rk817_pins_desc[] = {
51694+	PINCTRL_PIN(RK817_GPIO_SLP, "gpio_slp"), /* sleep pin */
51695+	PINCTRL_PIN(RK817_GPIO_TS, "gpio_ts"), /* ts pin */
51696+	PINCTRL_PIN(RK817_GPIO_GT, "gpio_gt")/* gate pin */
51697+};
51698+
51699+static const struct rk805_pin_function rk817_pin_functions[] = {
51700+	{
51701+		.name = "pin_fun0",
51702+		.groups = rk817_gpio_groups,
51703+		.ngroups = ARRAY_SIZE(rk817_gpio_groups),
51704+		.mux_option = RK817_PINMUX_FUN0,
51705+	},
51706+	{
51707+		.name = "pin_fun1",
51708+		.groups = rk817_gpio_groups,
51709+		.ngroups = ARRAY_SIZE(rk817_gpio_groups),
51710+		.mux_option = RK817_PINMUX_FUN1,
51711+	},
51712+	{
51713+		.name = "pin_fun2",
51714+		.groups = rk817_gpio_groups,
51715+		.ngroups = ARRAY_SIZE(rk817_gpio_groups),
51716+		.mux_option = RK817_PINMUX_FUN2,
51717+	},
51718+	{
51719+		.name = "pin_fun3",
51720+		.groups = rk817_gpio_groups,
51721+		.ngroups = ARRAY_SIZE(rk817_gpio_groups),
51722+		.mux_option = RK817_PINMUX_FUN3,
51723+	},
51724+};
51725+
51726+/* for rk809 only a sleep pin */
51727+static const struct rk805_pin_group rk817_pin_groups[] = {
51728+	{
51729+		.name = "gpio_slp",
51730+		.pins = { RK817_GPIO_SLP },
51731+		.npins = 1,
51732+	},
51733+	{
51734+		.name = "gpio_ts",
51735+		.pins = { RK817_GPIO_TS },
51736+		.npins = 1,
51737+	},
51738+	{
51739+		.name = "gpio_gt",
51740+		.pins = { RK817_GPIO_GT },
51741+		.npins = 1,
51742+	}
51743+};
51744+
51745+#define RK817_GPIOTS_VAL_MSK	BIT(3)
51746+#define RK817_GPIOGT_VAL_MSK	BIT(6)
51747+#define RK817_GPIOTS_FUNC_MSK	BIT(2)
51748+#define RK817_GPIOGT_FUNC_MSK	BIT(5)
51749+#define RK817_GPIOTS_DIR_MSK	BIT(4)
51750+#define RK817_GPIOGT_DIR_MSK	BIT(7)
51751+
51752+static struct rk805_pin_config rk817_gpio_cfgs[] = {
51753+	{
51754+		.reg = RK817_SYS_CFG(3),
51755+		.val_msk = 0,
51756+		.fun_msk = RK817_SLPPIN_FUNC_MSK,
51757+		.dir_msk = 0
51758+	},
51759+	{
51760+		.reg = RK817_GPIO_INT_CFG,
51761+		.val_msk = RK817_GPIOTS_VAL_MSK,
51762+		.fun_msk = RK817_GPIOTS_FUNC_MSK,
51763+		.dir_msk = RK817_GPIOTS_DIR_MSK
51764+	},
51765+	{
51766+		.reg = RK817_GPIO_INT_CFG,
51767+		.val_msk = RK817_GPIOGT_VAL_MSK,
51768+		.fun_msk = RK817_GPIOGT_FUNC_MSK,
51769+		.dir_msk = RK817_GPIOGT_DIR_MSK
51770+	}
51771+};
51772+
51773 /* generic gpio chip */
51774 static int rk805_gpio_get(struct gpio_chip *chip, unsigned int offset)
51775 {
51776 	struct rk805_pctrl_info *pci = gpiochip_get_data(chip);
51777 	int ret, val;
51778 
51779+	if (!pci->pin_cfg[offset].val_msk) {
51780+		dev_dbg(pci->dev, "getting gpio%d value is not support\n",
51781+			offset);
51782+		return -1;
51783+	}
51784+
51785 	ret = regmap_read(pci->rk808->regmap, pci->pin_cfg[offset].reg, &val);
51786 	if (ret) {
51787 		dev_err(pci->dev, "get gpio%d value failed\n", offset);
51788@@ -154,6 +310,9 @@ static void rk805_gpio_set(struct gpio_chip *chip,
51789 	struct rk805_pctrl_info *pci = gpiochip_get_data(chip);
51790 	int ret;
51791 
51792+	if (!pci->pin_cfg[offset].val_msk)
51793+		return;
51794+
51795 	ret = regmap_update_bits(pci->rk808->regmap,
51796 				 pci->pin_cfg[offset].reg,
51797 				 pci->pin_cfg[offset].val_msk,
51798@@ -214,6 +373,34 @@ static const struct gpio_chip rk805_gpio_chip = {
51799 	.owner			= THIS_MODULE,
51800 };
51801 
51802+static struct gpio_chip rk816_gpio_chip = {
51803+	.label			= "rk816-gpio",
51804+	.request		= gpiochip_generic_request,
51805+	.free			= gpiochip_generic_free,
51806+	.get_direction		= rk805_gpio_get_direction,
51807+	.get			= rk805_gpio_get,
51808+	.set			= rk805_gpio_set,
51809+	.direction_input	= rk805_gpio_direction_input,
51810+	.direction_output	= rk805_gpio_direction_output,
51811+	.can_sleep		= true,
51812+	.base			= -1,
51813+	.owner			= THIS_MODULE,
51814+};
51815+
51816+static struct gpio_chip rk817_gpio_chip = {
51817+	.label			= "rk817-gpio",
51818+	.request		= gpiochip_generic_request,
51819+	.free			= gpiochip_generic_free,
51820+	.get_direction		= rk805_gpio_get_direction,
51821+	.get			= rk805_gpio_get,
51822+	.set			= rk805_gpio_set,
51823+	.direction_input	= rk805_gpio_direction_input,
51824+	.direction_output	= rk805_gpio_direction_output,
51825+	.can_sleep		= true,
51826+	.base			= -1,
51827+	.owner			= THIS_MODULE,
51828+};
51829+
51830 /* generic pinctrl */
51831 static int rk805_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
51832 {
51833@@ -289,7 +476,7 @@ static int _rk805_pinctrl_set_mux(struct pinctrl_dev *pctldev,
51834 	if (!pci->pin_cfg[offset].fun_msk)
51835 		return 0;
51836 
51837-	if (mux == RK805_PINMUX_GPIO) {
51838+	if (mux == RK805_PINMUX_GPIO)  {
51839 		ret = regmap_update_bits(pci->rk808->regmap,
51840 					 pci->pin_cfg[offset].reg,
51841 					 pci->pin_cfg[offset].fun_msk,
51842@@ -298,6 +485,15 @@ static int _rk805_pinctrl_set_mux(struct pinctrl_dev *pctldev,
51843 			dev_err(pci->dev, "set gpio%d GPIO failed\n", offset);
51844 			return ret;
51845 		}
51846+	} else if (mux == RK805_PINMUX_TS) {
51847+		ret = regmap_update_bits(pci->rk808->regmap,
51848+					 pci->pin_cfg[offset].reg,
51849+					 pci->pin_cfg[offset].fun_msk,
51850+					 0);
51851+		if (ret) {
51852+			dev_err(pci->dev, "set gpio%d TS failed\n", offset);
51853+			return ret;
51854+		}
51855 	} else {
51856 		dev_err(pci->dev, "Couldn't find function mux %d\n", mux);
51857 		return -EINVAL;
51858@@ -306,6 +502,27 @@ static int _rk805_pinctrl_set_mux(struct pinctrl_dev *pctldev,
51859 	return 0;
51860 }
51861 
51862+static int _rk817_pinctrl_set_mux(struct pinctrl_dev *pctldev,
51863+				  unsigned int offset,
51864+				  int mux)
51865+{
51866+	struct rk805_pctrl_info *pci = pinctrl_dev_get_drvdata(pctldev);
51867+	int ret;
51868+
51869+	if (!pci->pin_cfg[offset].fun_msk)
51870+		return 0;
51871+
51872+	mux <<= ffs(pci->pin_cfg[offset].fun_msk) - 1;
51873+	ret = regmap_update_bits(pci->rk808->regmap,
51874+				 pci->pin_cfg[offset].reg,
51875+				 pci->pin_cfg[offset].fun_msk, mux);
51876+
51877+	if (ret)
51878+		dev_err(pci->dev, "set gpio%d func%d failed\n", offset, mux);
51879+
51880+	return ret;
51881+}
51882+
51883 static int rk805_pinctrl_set_mux(struct pinctrl_dev *pctldev,
51884 				 unsigned int function,
51885 				 unsigned int group)
51886@@ -314,7 +531,18 @@ static int rk805_pinctrl_set_mux(struct pinctrl_dev *pctldev,
51887 	int mux = pci->functions[function].mux_option;
51888 	int offset = group;
51889 
51890-	return _rk805_pinctrl_set_mux(pctldev, offset, mux);
51891+	switch (pci->rk808->variant) {
51892+	case RK805_ID:
51893+	case RK816_ID:
51894+		return _rk805_pinctrl_set_mux(pctldev, offset, mux);
51895+
51896+	case RK809_ID:
51897+	case RK817_ID:
51898+		return _rk817_pinctrl_set_mux(pctldev, offset, mux);
51899+	default:
51900+		dev_err(pci->dev, "Couldn't find the variant id\n");
51901+		return -EINVAL;
51902+	}
51903 }
51904 
51905 static int rk805_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
51906@@ -324,13 +552,6 @@ static int rk805_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
51907 	struct rk805_pctrl_info *pci = pinctrl_dev_get_drvdata(pctldev);
51908 	int ret;
51909 
51910-	/* switch to gpio function */
51911-	ret = _rk805_pinctrl_set_mux(pctldev, offset, RK805_PINMUX_GPIO);
51912-	if (ret) {
51913-		dev_err(pci->dev, "set gpio%d mux failed\n", offset);
51914-		return ret;
51915-	}
51916-
51917 	/* set direction */
51918 	if (!pci->pin_cfg[offset].dir_msk)
51919 		return 0;
51920@@ -347,7 +568,25 @@ static int rk805_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
51921 	return ret;
51922 }
51923 
51924+static int rk805_pinctrl_gpio_request_enable(struct pinctrl_dev *pctldev,
51925+					     struct pinctrl_gpio_range *range,
51926+					     unsigned int offset)
51927+{
51928+	struct rk805_pctrl_info *pci = pinctrl_dev_get_drvdata(pctldev);
51929+
51930+	/* switch to gpio function */
51931+	switch (pci->rk808->variant) {
51932+	case RK805_ID:
51933+	case RK816_ID:
51934+		return _rk805_pinctrl_set_mux(pctldev, offset,
51935+					      RK805_PINMUX_GPIO);
51936+	default:
51937+		return 0;
51938+	}
51939+}
51940+
51941 static const struct pinmux_ops rk805_pinmux_ops = {
51942+	.gpio_request_enable	= rk805_pinctrl_gpio_request_enable,
51943 	.get_functions_count	= rk805_pinctrl_get_funcs_count,
51944 	.get_function_name	= rk805_pinctrl_get_func_name,
51945 	.get_function_groups	= rk805_pinctrl_get_func_groups,
51946@@ -364,6 +603,7 @@ static int rk805_pinconf_get(struct pinctrl_dev *pctldev,
51947 
51948 	switch (param) {
51949 	case PIN_CONFIG_OUTPUT:
51950+	case PIN_CONFIG_INPUT_ENABLE:
51951 		arg = rk805_gpio_get(&pci->gpio_chip, pin);
51952 		break;
51953 	default:
51954@@ -390,8 +630,13 @@ static int rk805_pinconf_set(struct pinctrl_dev *pctldev,
51955 
51956 		switch (param) {
51957 		case PIN_CONFIG_OUTPUT:
51958-			rk805_gpio_set(&pci->gpio_chip, pin, arg);
51959 			rk805_pmx_gpio_set_direction(pctldev, NULL, pin, false);
51960+			rk805_gpio_set(&pci->gpio_chip, pin, arg);
51961+			break;
51962+		case PIN_CONFIG_INPUT_ENABLE:
51963+			if (arg)
51964+				rk805_pmx_gpio_set_direction(pctldev, NULL,
51965+							     pin, true);
51966 			break;
51967 		default:
51968 			dev_err(pci->dev, "Properties not supported\n");
51969@@ -415,9 +660,26 @@ static const struct pinctrl_desc rk805_pinctrl_desc = {
51970 	.owner = THIS_MODULE,
51971 };
51972 
51973+static struct pinctrl_desc rk816_pinctrl_desc = {
51974+	.name = "rk816-pinctrl",
51975+	.pctlops = &rk805_pinctrl_ops,
51976+	.pmxops = &rk805_pinmux_ops,
51977+	.confops = &rk805_pinconf_ops,
51978+	.owner = THIS_MODULE,
51979+};
51980+
51981+static struct pinctrl_desc rk817_pinctrl_desc = {
51982+	.name = "rk817-pinctrl",
51983+	.pctlops = &rk805_pinctrl_ops,
51984+	.pmxops = &rk805_pinmux_ops,
51985+	.confops = &rk805_pinconf_ops,
51986+	.owner = THIS_MODULE,
51987+};
51988+
51989 static int rk805_pinctrl_probe(struct platform_device *pdev)
51990 {
51991 	struct rk805_pctrl_info *pci;
51992+	struct device_node *np;
51993 	int ret;
51994 
51995 	pci = devm_kzalloc(&pdev->dev, sizeof(*pci), GFP_KERNEL);
51996@@ -425,18 +687,19 @@ static int rk805_pinctrl_probe(struct platform_device *pdev)
51997 		return -ENOMEM;
51998 
51999 	pci->dev = &pdev->dev;
52000-	pci->dev->of_node = pdev->dev.parent->of_node;
52001+	np = of_get_child_by_name(pdev->dev.parent->of_node, "pinctrl_rk8xx");
52002+	if (np)
52003+		pci->dev->of_node = np;
52004+	else
52005+		pci->dev->of_node = pdev->dev.parent->of_node;
52006 	pci->rk808 = dev_get_drvdata(pdev->dev.parent);
52007 
52008-	pci->pinctrl_desc = rk805_pinctrl_desc;
52009-	pci->gpio_chip = rk805_gpio_chip;
52010-	pci->gpio_chip.parent = &pdev->dev;
52011-	pci->gpio_chip.of_node = pdev->dev.parent->of_node;
52012-
52013 	platform_set_drvdata(pdev, pci);
52014 
52015 	switch (pci->rk808->variant) {
52016 	case RK805_ID:
52017+		pci->pinctrl_desc = rk805_pinctrl_desc;
52018+		pci->gpio_chip = rk805_gpio_chip;
52019 		pci->pins = rk805_pins_desc;
52020 		pci->num_pins = ARRAY_SIZE(rk805_pins_desc);
52021 		pci->functions = rk805_pin_functions;
52022@@ -448,13 +711,59 @@ static int rk805_pinctrl_probe(struct platform_device *pdev)
52023 		pci->pin_cfg = rk805_gpio_cfgs;
52024 		pci->gpio_chip.ngpio = ARRAY_SIZE(rk805_gpio_cfgs);
52025 		break;
52026+
52027+	case RK816_ID:
52028+		pci->pinctrl_desc = rk816_pinctrl_desc;
52029+		pci->gpio_chip = rk816_gpio_chip;
52030+		pci->pins = rk816_pins_desc;
52031+		pci->num_pins = ARRAY_SIZE(rk816_pins_desc);
52032+		pci->functions = rk816_pin_functions;
52033+		pci->num_functions = ARRAY_SIZE(rk816_pin_functions);
52034+		pci->groups = rk816_pin_groups;
52035+		pci->num_pin_groups = ARRAY_SIZE(rk816_pin_groups);
52036+		pci->pinctrl_desc.pins = rk816_pins_desc;
52037+		pci->pinctrl_desc.npins = ARRAY_SIZE(rk816_pins_desc);
52038+		pci->pin_cfg = rk816_gpio_cfgs;
52039+		pci->gpio_chip.ngpio = ARRAY_SIZE(rk816_gpio_cfgs);
52040+		break;
52041+
52042+	case RK809_ID:
52043+	case RK817_ID:
52044+		pci->pinctrl_desc = rk817_pinctrl_desc;
52045+		pci->gpio_chip = rk817_gpio_chip;
52046+		pci->pins = rk817_pins_desc;
52047+		pci->num_pins = ARRAY_SIZE(rk817_pins_desc);
52048+		pci->functions = rk817_pin_functions;
52049+		pci->num_functions = ARRAY_SIZE(rk817_pin_functions);
52050+		pci->groups = rk817_pin_groups;
52051+		pci->num_pin_groups = ARRAY_SIZE(rk817_pin_groups);
52052+		pci->pinctrl_desc.pins = rk817_pins_desc;
52053+		pci->pinctrl_desc.npins = ARRAY_SIZE(rk817_pins_desc);
52054+		pci->pin_cfg = rk817_gpio_cfgs;
52055+		pci->gpio_chip.ngpio = ARRAY_SIZE(rk817_gpio_cfgs);
52056+		/* for rk809 only a sleep pin */
52057+		if (pci->rk808->variant == RK809_ID) {
52058+			pci->pinctrl_desc.npins = 1;
52059+			pci->num_pin_groups = 1;
52060+			pci->num_pins = 1;
52061+			pci->gpio_chip.ngpio = 1;
52062+		}
52063+		break;
52064+
52065 	default:
52066 		dev_err(&pdev->dev, "unsupported RK805 ID %lu\n",
52067 			pci->rk808->variant);
52068 		return -EINVAL;
52069 	}
52070 
52071-	/* Add gpio chip */
52072+	pci->gpio_chip.parent = &pdev->dev;
52073+
52074+	if (np)
52075+		pci->gpio_chip.of_node = np;
52076+	else
52077+		pci->gpio_chip.of_node = pdev->dev.parent->of_node;
52078+
52079+	/* Add gpiochip */
52080 	ret = devm_gpiochip_add_data(&pdev->dev, &pci->gpio_chip, pci);
52081 	if (ret < 0) {
52082 		dev_err(&pdev->dev, "Couldn't add gpiochip\n");
52083@@ -485,7 +794,12 @@ static struct platform_driver rk805_pinctrl_driver = {
52084 		.name = "rk805-pinctrl",
52085 	},
52086 };
52087-module_platform_driver(rk805_pinctrl_driver);
52088+
52089+static int __init rk805_pinctrl_driver_register(void)
52090+{
52091+	return platform_driver_register(&rk805_pinctrl_driver);
52092+}
52093+fs_initcall_sync(rk805_pinctrl_driver_register);
52094 
52095 MODULE_DESCRIPTION("RK805 pin control and GPIO driver");
52096 MODULE_AUTHOR("Joseph Chen <chenjh@rock-chips.com>");
52097diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
52098index 764c96ddf..1e78070ab 100644
52099--- a/drivers/pinctrl/pinctrl-rockchip.c
52100+++ b/drivers/pinctrl/pinctrl-rockchip.c
52101@@ -16,11 +16,13 @@
52102  */
52103 
52104 #include <linux/init.h>
52105+#include <linux/module.h>
52106 #include <linux/platform_device.h>
52107 #include <linux/io.h>
52108 #include <linux/bitops.h>
52109 #include <linux/gpio/driver.h>
52110 #include <linux/of_address.h>
52111+#include <linux/of_device.h>
52112 #include <linux/of_irq.h>
52113 #include <linux/pinctrl/machine.h>
52114 #include <linux/pinctrl/pinconf.h>
52115@@ -31,39 +33,12 @@
52116 #include <linux/clk.h>
52117 #include <linux/regmap.h>
52118 #include <linux/mfd/syscon.h>
52119+#include <linux/rockchip/cpu.h>
52120 #include <dt-bindings/pinctrl/rockchip.h>
52121 
52122 #include "core.h"
52123 #include "pinconf.h"
52124-
52125-/* GPIO control registers */
52126-#define GPIO_SWPORT_DR		0x00
52127-#define GPIO_SWPORT_DDR		0x04
52128-#define GPIO_INTEN		0x30
52129-#define GPIO_INTMASK		0x34
52130-#define GPIO_INTTYPE_LEVEL	0x38
52131-#define GPIO_INT_POLARITY	0x3c
52132-#define GPIO_INT_STATUS		0x40
52133-#define GPIO_INT_RAWSTATUS	0x44
52134-#define GPIO_DEBOUNCE		0x48
52135-#define GPIO_PORTS_EOI		0x4c
52136-#define GPIO_EXT_PORT		0x50
52137-#define GPIO_LS_SYNC		0x60
52138-
52139-enum rockchip_pinctrl_type {
52140-	PX30,
52141-	RV1108,
52142-	RK2928,
52143-	RK3066B,
52144-	RK3128,
52145-	RK3188,
52146-	RK3288,
52147-	RK3308,
52148-	RK3368,
52149-	RK3399,
52150-	RK3568,
52151-};
52152-
52153+#include <linux/pinctrl-rockchip.h>
52154 
52155 /**
52156  * Generate a bitmask for setting a value (v) with a write mask bit in hiword
52157@@ -81,103 +56,8 @@ enum rockchip_pinctrl_type {
52158 #define IOMUX_UNROUTED		BIT(3)
52159 #define IOMUX_WIDTH_3BIT	BIT(4)
52160 #define IOMUX_WIDTH_2BIT	BIT(5)
52161-
52162-/**
52163- * struct rockchip_iomux
52164- * @type: iomux variant using IOMUX_* constants
52165- * @offset: if initialized to -1 it will be autocalculated, by specifying
52166- *	    an initial offset value the relevant source offset can be reset
52167- *	    to a new value for autocalculating the following iomux registers.
52168- */
52169-struct rockchip_iomux {
52170-	int				type;
52171-	int				offset;
52172-};
52173-
52174-/*
52175- * enum type index corresponding to rockchip_perpin_drv_list arrays index.
52176- */
52177-enum rockchip_pin_drv_type {
52178-	DRV_TYPE_IO_DEFAULT = 0,
52179-	DRV_TYPE_IO_1V8_OR_3V0,
52180-	DRV_TYPE_IO_1V8_ONLY,
52181-	DRV_TYPE_IO_1V8_3V0_AUTO,
52182-	DRV_TYPE_IO_3V3_ONLY,
52183-	DRV_TYPE_MAX
52184-};
52185-
52186-/*
52187- * enum type index corresponding to rockchip_pull_list arrays index.
52188- */
52189-enum rockchip_pin_pull_type {
52190-	PULL_TYPE_IO_DEFAULT = 0,
52191-	PULL_TYPE_IO_1V8_ONLY,
52192-	PULL_TYPE_MAX
52193-};
52194-
52195-/**
52196- * struct rockchip_drv
52197- * @drv_type: drive strength variant using rockchip_perpin_drv_type
52198- * @offset: if initialized to -1 it will be autocalculated, by specifying
52199- *	    an initial offset value the relevant source offset can be reset
52200- *	    to a new value for autocalculating the following drive strength
52201- *	    registers. if used chips own cal_drv func instead to calculate
52202- *	    registers offset, the variant could be ignored.
52203- */
52204-struct rockchip_drv {
52205-	enum rockchip_pin_drv_type	drv_type;
52206-	int				offset;
52207-};
52208-
52209-/**
52210- * struct rockchip_pin_bank
52211- * @reg_base: register base of the gpio bank
52212- * @regmap_pull: optional separate register for additional pull settings
52213- * @clk: clock of the gpio bank
52214- * @irq: interrupt of the gpio bank
52215- * @saved_masks: Saved content of GPIO_INTEN at suspend time.
52216- * @pin_base: first pin number
52217- * @nr_pins: number of pins in this bank
52218- * @name: name of the bank
52219- * @bank_num: number of the bank, to account for holes
52220- * @iomux: array describing the 4 iomux sources of the bank
52221- * @drv: array describing the 4 drive strength sources of the bank
52222- * @pull_type: array describing the 4 pull type sources of the bank
52223- * @valid: is all necessary information present
52224- * @of_node: dt node of this bank
52225- * @drvdata: common pinctrl basedata
52226- * @domain: irqdomain of the gpio bank
52227- * @gpio_chip: gpiolib chip
52228- * @grange: gpio range
52229- * @slock: spinlock for the gpio bank
52230- * @toggle_edge_mode: bit mask to toggle (falling/rising) edge mode
52231- * @recalced_mask: bit mask to indicate a need to recalulate the mask
52232- * @route_mask: bits describing the routing pins of per bank
52233- */
52234-struct rockchip_pin_bank {
52235-	void __iomem			*reg_base;
52236-	struct regmap			*regmap_pull;
52237-	struct clk			*clk;
52238-	int				irq;
52239-	u32				saved_masks;
52240-	u32				pin_base;
52241-	u8				nr_pins;
52242-	char				*name;
52243-	u8				bank_num;
52244-	struct rockchip_iomux		iomux[4];
52245-	struct rockchip_drv		drv[4];
52246-	enum rockchip_pin_pull_type	pull_type[4];
52247-	bool				valid;
52248-	struct device_node		*of_node;
52249-	struct rockchip_pinctrl		*drvdata;
52250-	struct irq_domain		*domain;
52251-	struct gpio_chip		gpio_chip;
52252-	struct pinctrl_gpio_range	grange;
52253-	raw_spinlock_t			slock;
52254-	u32				toggle_edge_mode;
52255-	u32				recalced_mask;
52256-	u32				route_mask;
52257-};
52258+#define IOMUX_WRITABLE_32BIT	BIT(6)
52259+#define IOMUX_L_SOURCE_PMU	BIT(7)
52260 
52261 #define PIN_BANK(id, pins, label)			\
52262 	{						\
52263@@ -205,6 +85,21 @@ struct rockchip_pin_bank {
52264 		},							\
52265 	}
52266 
52267+#define PIN_BANK_IOMUX_FLAGS_OFFSET(id, pins, label, iom0, iom1, iom2,	\
52268+				    iom3, offset0, offset1, offset2,	\
52269+				    offset3)				\
52270+	{								\
52271+		.bank_num	= id,					\
52272+		.nr_pins	= pins,					\
52273+		.name		= label,				\
52274+		.iomux		= {					\
52275+			{ .type = iom0, .offset = offset0 },		\
52276+			{ .type = iom1, .offset = offset1 },		\
52277+			{ .type = iom2, .offset = offset2 },		\
52278+			{ .type = iom3, .offset = offset3 },		\
52279+		},							\
52280+	}
52281+
52282 #define PIN_BANK_DRV_FLAGS(id, pins, label, type0, type1, type2, type3) \
52283 	{								\
52284 		.bank_num	= id,					\
52285@@ -224,6 +119,25 @@ struct rockchip_pin_bank {
52286 		},							\
52287 	}
52288 
52289+#define PIN_BANK_IOMUX_FLAGS_PULL_FLAGS(id, pins, label, iom0, iom1,	\
52290+					iom2, iom3, pull0, pull1,	\
52291+					pull2, pull3)			\
52292+	{								\
52293+		.bank_num	= id,					\
52294+		.nr_pins	= pins,					\
52295+		.name		= label,				\
52296+		.iomux		= {					\
52297+			{ .type = iom0, .offset = -1 },			\
52298+			{ .type = iom1, .offset = -1 },			\
52299+			{ .type = iom2, .offset = -1 },			\
52300+			{ .type = iom3, .offset = -1 },			\
52301+		},							\
52302+		.pull_type[0] = pull0,					\
52303+		.pull_type[1] = pull1,					\
52304+		.pull_type[2] = pull2,					\
52305+		.pull_type[3] = pull3,					\
52306+	}
52307+
52308 #define PIN_BANK_DRV_FLAGS_PULL_FLAGS(id, pins, label, drv0, drv1,	\
52309 				      drv2, drv3, pull0, pull1,		\
52310 				      pull2, pull3)			\
52311@@ -318,118 +232,8 @@ struct rockchip_pin_bank {
52312 #define RK_MUXROUTE_PMU(ID, PIN, FUNC, REG, VAL)	\
52313 	PIN_BANK_MUX_ROUTE_FLAGS(ID, PIN, FUNC, REG, VAL, ROCKCHIP_ROUTE_PMU)
52314 
52315-/**
52316- * struct rockchip_mux_recalced_data: represent a pin iomux data.
52317- * @num: bank number.
52318- * @pin: pin number.
52319- * @bit: index at register.
52320- * @reg: register offset.
52321- * @mask: mask bit
52322- */
52323-struct rockchip_mux_recalced_data {
52324-	u8 num;
52325-	u8 pin;
52326-	u32 reg;
52327-	u8 bit;
52328-	u8 mask;
52329-};
52330-
52331-enum rockchip_mux_route_location {
52332-	ROCKCHIP_ROUTE_SAME = 0,
52333-	ROCKCHIP_ROUTE_PMU,
52334-	ROCKCHIP_ROUTE_GRF,
52335-};
52336-
52337-/**
52338- * struct rockchip_mux_recalced_data: represent a pin iomux data.
52339- * @bank_num: bank number.
52340- * @pin: index at register or used to calc index.
52341- * @func: the min pin.
52342- * @route_location: the mux route location (same, pmu, grf).
52343- * @route_offset: the max pin.
52344- * @route_val: the register offset.
52345- */
52346-struct rockchip_mux_route_data {
52347-	u8 bank_num;
52348-	u8 pin;
52349-	u8 func;
52350-	enum rockchip_mux_route_location route_location;
52351-	u32 route_offset;
52352-	u32 route_val;
52353-};
52354-
52355-struct rockchip_pin_ctrl {
52356-	struct rockchip_pin_bank	*pin_banks;
52357-	u32				nr_banks;
52358-	u32				nr_pins;
52359-	char				*label;
52360-	enum rockchip_pinctrl_type	type;
52361-	int				grf_mux_offset;
52362-	int				pmu_mux_offset;
52363-	int				grf_drv_offset;
52364-	int				pmu_drv_offset;
52365-	struct rockchip_mux_recalced_data *iomux_recalced;
52366-	u32				niomux_recalced;
52367-	struct rockchip_mux_route_data *iomux_routes;
52368-	u32				niomux_routes;
52369-
52370-	void	(*pull_calc_reg)(struct rockchip_pin_bank *bank,
52371-				    int pin_num, struct regmap **regmap,
52372-				    int *reg, u8 *bit);
52373-	void	(*drv_calc_reg)(struct rockchip_pin_bank *bank,
52374-				    int pin_num, struct regmap **regmap,
52375-				    int *reg, u8 *bit);
52376-	int	(*schmitt_calc_reg)(struct rockchip_pin_bank *bank,
52377-				    int pin_num, struct regmap **regmap,
52378-				    int *reg, u8 *bit);
52379-};
52380-
52381-struct rockchip_pin_config {
52382-	unsigned int		func;
52383-	unsigned long		*configs;
52384-	unsigned int		nconfigs;
52385-};
52386-
52387-/**
52388- * struct rockchip_pin_group: represent group of pins of a pinmux function.
52389- * @name: name of the pin group, used to lookup the group.
52390- * @pins: the pins included in this group.
52391- * @npins: number of pins included in this group.
52392- * @data: local pin configuration
52393- */
52394-struct rockchip_pin_group {
52395-	const char			*name;
52396-	unsigned int			npins;
52397-	unsigned int			*pins;
52398-	struct rockchip_pin_config	*data;
52399-};
52400-
52401-/**
52402- * struct rockchip_pmx_func: represent a pin function.
52403- * @name: name of the pin function, used to lookup the function.
52404- * @groups: one or more names of pin groups that provide this function.
52405- * @ngroups: number of groups included in @groups.
52406- */
52407-struct rockchip_pmx_func {
52408-	const char		*name;
52409-	const char		**groups;
52410-	u8			ngroups;
52411-};
52412-
52413-struct rockchip_pinctrl {
52414-	struct regmap			*regmap_base;
52415-	int				reg_size;
52416-	struct regmap			*regmap_pull;
52417-	struct regmap			*regmap_pmu;
52418-	struct device			*dev;
52419-	struct rockchip_pin_ctrl	*ctrl;
52420-	struct pinctrl_desc		pctl;
52421-	struct pinctrl_dev		*pctl_dev;
52422-	struct rockchip_pin_group	*groups;
52423-	unsigned int			ngroups;
52424-	struct rockchip_pmx_func	*functions;
52425-	unsigned int			nfunctions;
52426-};
52427+#define RK3588_PIN_BANK_FLAGS(ID, PIN, LABEL, M, P)			\
52428+	PIN_BANK_IOMUX_FLAGS_PULL_FLAGS(ID, PIN, LABEL, M, M, M, M, P, P, P, P)
52429 
52430 static struct regmap_config rockchip_regmap_config = {
52431 	.reg_bits = 32,
52432@@ -655,6 +459,37 @@ static struct rockchip_mux_recalced_data rv1108_mux_recalced_data[] = {
52433 	},
52434 };
52435 
52436+static struct rockchip_mux_recalced_data rv1126_mux_recalced_data[] = {
52437+	{
52438+		.num = 0,
52439+		.pin = 20,
52440+		.reg = 0x10000,
52441+		.bit = 0,
52442+		.mask = 0xf
52443+	},
52444+	{
52445+		.num = 0,
52446+		.pin = 21,
52447+		.reg = 0x10000,
52448+		.bit = 4,
52449+		.mask = 0xf
52450+	},
52451+	{
52452+		.num = 0,
52453+		.pin = 22,
52454+		.reg = 0x10000,
52455+		.bit = 8,
52456+		.mask = 0xf
52457+	},
52458+	{
52459+		.num = 0,
52460+		.pin = 23,
52461+		.reg = 0x10000,
52462+		.bit = 12,
52463+		.mask = 0xf
52464+	},
52465+};
52466+
52467 static  struct rockchip_mux_recalced_data rk3128_mux_recalced_data[] = {
52468 	{
52469 		.num = 2,
52470@@ -800,11 +635,47 @@ static struct rockchip_mux_recalced_data rk3308_mux_recalced_data[] = {
52471 
52472 static struct rockchip_mux_recalced_data rk3328_mux_recalced_data[] = {
52473 	{
52474+		.num = 2,
52475+		.pin = 8,
52476+		.reg = 0x24,
52477+		.bit = 0,
52478+		.mask = 0x3
52479+	}, {
52480+		.num = 2,
52481+		.pin = 9,
52482+		.reg = 0x24,
52483+		.bit = 2,
52484+		.mask = 0x3
52485+	}, {
52486+		.num = 2,
52487+		.pin = 10,
52488+		.reg = 0x24,
52489+		.bit = 4,
52490+		.mask = 0x3
52491+	}, {
52492+		.num = 2,
52493+		.pin = 11,
52494+		.reg = 0x24,
52495+		.bit = 6,
52496+		.mask = 0x3
52497+	}, {
52498 		.num = 2,
52499 		.pin = 12,
52500 		.reg = 0x24,
52501 		.bit = 8,
52502 		.mask = 0x3
52503+	}, {
52504+		.num = 2,
52505+		.pin = 13,
52506+		.reg = 0x24,
52507+		.bit = 10,
52508+		.mask = 0x3
52509+	}, {
52510+		.num = 2,
52511+		.pin = 14,
52512+		.reg = 0x24,
52513+		.bit = 12,
52514+		.mask = 0x3
52515 	}, {
52516 		.num = 2,
52517 		.pin = 15,
52518@@ -820,6 +691,103 @@ static struct rockchip_mux_recalced_data rk3328_mux_recalced_data[] = {
52519 	},
52520 };
52521 
52522+static struct rockchip_mux_route_data rv1126_mux_route_data[] = {
52523+	RK_MUXROUTE_GRF(3, RK_PD2, 1, 0x10260, WRITE_MASK_VAL(0, 0, 0)), /* I2S0_MCLK_M0 */
52524+	RK_MUXROUTE_GRF(3, RK_PB0, 3, 0x10260, WRITE_MASK_VAL(0, 0, 1)), /* I2S0_MCLK_M1 */
52525+
52526+	RK_MUXROUTE_GRF(0, RK_PD4, 4, 0x10260, WRITE_MASK_VAL(3, 2, 0)), /* I2S1_MCLK_M0 */
52527+	RK_MUXROUTE_GRF(1, RK_PD5, 2, 0x10260, WRITE_MASK_VAL(3, 2, 1)), /* I2S1_MCLK_M1 */
52528+	RK_MUXROUTE_GRF(2, RK_PC7, 6, 0x10260, WRITE_MASK_VAL(3, 2, 2)), /* I2S1_MCLK_M2 */
52529+
52530+	RK_MUXROUTE_GRF(1, RK_PD0, 1, 0x10260, WRITE_MASK_VAL(4, 4, 0)), /* I2S2_MCLK_M0 */
52531+	RK_MUXROUTE_GRF(2, RK_PB3, 2, 0x10260, WRITE_MASK_VAL(4, 4, 1)), /* I2S2_MCLK_M1 */
52532+
52533+	RK_MUXROUTE_GRF(3, RK_PD4, 2, 0x10260, WRITE_MASK_VAL(12, 12, 0)), /* PDM_CLK0_M0 */
52534+	RK_MUXROUTE_GRF(3, RK_PC0, 3, 0x10260, WRITE_MASK_VAL(12, 12, 1)), /* PDM_CLK0_M1 */
52535+
52536+	RK_MUXROUTE_GRF(3, RK_PC6, 1, 0x10264, WRITE_MASK_VAL(0, 0, 0)), /* CIF_CLKOUT_M0 */
52537+	RK_MUXROUTE_GRF(2, RK_PD1, 3, 0x10264, WRITE_MASK_VAL(0, 0, 1)), /* CIF_CLKOUT_M1 */
52538+
52539+	RK_MUXROUTE_GRF(3, RK_PA4, 5, 0x10264, WRITE_MASK_VAL(5, 4, 0)), /* I2C3_SCL_M0 */
52540+	RK_MUXROUTE_GRF(2, RK_PD4, 7, 0x10264, WRITE_MASK_VAL(5, 4, 1)), /* I2C3_SCL_M1 */
52541+	RK_MUXROUTE_GRF(1, RK_PD6, 3, 0x10264, WRITE_MASK_VAL(5, 4, 2)), /* I2C3_SCL_M2 */
52542+
52543+	RK_MUXROUTE_GRF(3, RK_PA0, 7, 0x10264, WRITE_MASK_VAL(6, 6, 0)), /* I2C4_SCL_M0 */
52544+	RK_MUXROUTE_GRF(4, RK_PA0, 4, 0x10264, WRITE_MASK_VAL(6, 6, 1)), /* I2C4_SCL_M1 */
52545+
52546+	RK_MUXROUTE_GRF(2, RK_PA5, 7, 0x10264, WRITE_MASK_VAL(9, 8, 0)), /* I2C5_SCL_M0 */
52547+	RK_MUXROUTE_GRF(3, RK_PB0, 5, 0x10264, WRITE_MASK_VAL(9, 8, 1)), /* I2C5_SCL_M1 */
52548+	RK_MUXROUTE_GRF(1, RK_PD0, 4, 0x10264, WRITE_MASK_VAL(9, 8, 2)), /* I2C5_SCL_M2 */
52549+
52550+	RK_MUXROUTE_GRF(3, RK_PC0, 5, 0x10264, WRITE_MASK_VAL(11, 10, 0)), /* SPI1_CLK_M0 */
52551+	RK_MUXROUTE_GRF(1, RK_PC6, 3, 0x10264, WRITE_MASK_VAL(11, 10, 1)), /* SPI1_CLK_M1 */
52552+	RK_MUXROUTE_GRF(2, RK_PD5, 6, 0x10264, WRITE_MASK_VAL(11, 10, 2)), /* SPI1_CLK_M2 */
52553+
52554+	RK_MUXROUTE_GRF(3, RK_PC0, 2, 0x10264, WRITE_MASK_VAL(12, 12, 0)), /* RGMII_CLK_M0 */
52555+	RK_MUXROUTE_GRF(2, RK_PB7, 2, 0x10264, WRITE_MASK_VAL(12, 12, 1)), /* RGMII_CLK_M1 */
52556+
52557+	RK_MUXROUTE_GRF(3, RK_PA1, 3, 0x10264, WRITE_MASK_VAL(13, 13, 0)), /* CAN_TXD_M0 */
52558+	RK_MUXROUTE_GRF(3, RK_PA7, 5, 0x10264, WRITE_MASK_VAL(13, 13, 1)), /* CAN_TXD_M1 */
52559+
52560+	RK_MUXROUTE_GRF(3, RK_PA4, 6, 0x10268, WRITE_MASK_VAL(0, 0, 0)), /* PWM8_M0 */
52561+	RK_MUXROUTE_GRF(2, RK_PD7, 5, 0x10268, WRITE_MASK_VAL(0, 0, 1)), /* PWM8_M1 */
52562+
52563+	RK_MUXROUTE_GRF(3, RK_PA5, 6, 0x10268, WRITE_MASK_VAL(2, 2, 0)), /* PWM9_M0 */
52564+	RK_MUXROUTE_GRF(2, RK_PD6, 5, 0x10268, WRITE_MASK_VAL(2, 2, 1)), /* PWM9_M1 */
52565+
52566+	RK_MUXROUTE_GRF(3, RK_PA6, 6, 0x10268, WRITE_MASK_VAL(4, 4, 0)), /* PWM10_M0 */
52567+	RK_MUXROUTE_GRF(2, RK_PD5, 5, 0x10268, WRITE_MASK_VAL(4, 4, 1)), /* PWM10_M1 */
52568+
52569+	RK_MUXROUTE_GRF(3, RK_PA7, 6, 0x10268, WRITE_MASK_VAL(6, 6, 0)), /* PWM11_IR_M0 */
52570+	RK_MUXROUTE_GRF(3, RK_PA1, 5, 0x10268, WRITE_MASK_VAL(6, 6, 1)), /* PWM11_IR_M1 */
52571+
52572+	RK_MUXROUTE_GRF(1, RK_PA5, 3, 0x10268, WRITE_MASK_VAL(8, 8, 0)), /* UART2_TX_M0 */
52573+	RK_MUXROUTE_GRF(3, RK_PA2, 1, 0x10268, WRITE_MASK_VAL(8, 8, 1)), /* UART2_TX_M1 */
52574+
52575+	RK_MUXROUTE_GRF(3, RK_PC6, 3, 0x10268, WRITE_MASK_VAL(11, 10, 0)), /* UART3_TX_M0 */
52576+	RK_MUXROUTE_GRF(1, RK_PA7, 2, 0x10268, WRITE_MASK_VAL(11, 10, 1)), /* UART3_TX_M1 */
52577+	RK_MUXROUTE_GRF(3, RK_PA0, 4, 0x10268, WRITE_MASK_VAL(11, 10, 2)), /* UART3_TX_M2 */
52578+
52579+	RK_MUXROUTE_GRF(3, RK_PA4, 4, 0x10268, WRITE_MASK_VAL(13, 12, 0)), /* UART4_TX_M0 */
52580+	RK_MUXROUTE_GRF(2, RK_PA6, 4, 0x10268, WRITE_MASK_VAL(13, 12, 1)), /* UART4_TX_M1 */
52581+	RK_MUXROUTE_GRF(1, RK_PD5, 3, 0x10268, WRITE_MASK_VAL(13, 12, 2)), /* UART4_TX_M2 */
52582+
52583+	RK_MUXROUTE_GRF(3, RK_PA6, 4, 0x10268, WRITE_MASK_VAL(15, 14, 0)), /* UART5_TX_M0 */
52584+	RK_MUXROUTE_GRF(2, RK_PB0, 4, 0x10268, WRITE_MASK_VAL(15, 14, 1)), /* UART5_TX_M1 */
52585+	RK_MUXROUTE_GRF(2, RK_PA0, 3, 0x10268, WRITE_MASK_VAL(15, 14, 2)), /* UART5_TX_M2 */
52586+
52587+	RK_MUXROUTE_PMU(0, RK_PB6, 3, 0x0114, WRITE_MASK_VAL(0, 0, 0)), /* PWM0_M0 */
52588+	RK_MUXROUTE_PMU(2, RK_PB3, 5, 0x0114, WRITE_MASK_VAL(0, 0, 1)), /* PWM0_M1 */
52589+
52590+	RK_MUXROUTE_PMU(0, RK_PB7, 3, 0x0114, WRITE_MASK_VAL(2, 2, 0)), /* PWM1_M0 */
52591+	RK_MUXROUTE_PMU(2, RK_PB2, 5, 0x0114, WRITE_MASK_VAL(2, 2, 1)), /* PWM1_M1 */
52592+
52593+	RK_MUXROUTE_PMU(0, RK_PC0, 3, 0x0114, WRITE_MASK_VAL(4, 4, 0)), /* PWM2_M0 */
52594+	RK_MUXROUTE_PMU(2, RK_PB1, 5, 0x0114, WRITE_MASK_VAL(4, 4, 1)), /* PWM2_M1 */
52595+
52596+	RK_MUXROUTE_PMU(0, RK_PC1, 3, 0x0114, WRITE_MASK_VAL(6, 6, 0)), /* PWM3_IR_M0 */
52597+	RK_MUXROUTE_PMU(2, RK_PB0, 5, 0x0114, WRITE_MASK_VAL(6, 6, 1)), /* PWM3_IR_M1 */
52598+
52599+	RK_MUXROUTE_PMU(0, RK_PC2, 3, 0x0114, WRITE_MASK_VAL(8, 8, 0)), /* PWM4_M0 */
52600+	RK_MUXROUTE_PMU(2, RK_PA7, 5, 0x0114, WRITE_MASK_VAL(8, 8, 1)), /* PWM4_M1 */
52601+
52602+	RK_MUXROUTE_PMU(0, RK_PC3, 3, 0x0114, WRITE_MASK_VAL(10, 10, 0)), /* PWM5_M0 */
52603+	RK_MUXROUTE_PMU(2, RK_PA6, 5, 0x0114, WRITE_MASK_VAL(10, 10, 1)), /* PWM5_M1 */
52604+
52605+	RK_MUXROUTE_PMU(0, RK_PB2, 3, 0x0114, WRITE_MASK_VAL(12, 12, 0)), /* PWM6_M0 */
52606+	RK_MUXROUTE_PMU(2, RK_PD4, 5, 0x0114, WRITE_MASK_VAL(12, 12, 1)), /* PWM6_M1 */
52607+
52608+	RK_MUXROUTE_PMU(0, RK_PB1, 3, 0x0114, WRITE_MASK_VAL(14, 14, 0)), /* PWM7_IR_M0 */
52609+	RK_MUXROUTE_PMU(3, RK_PA0, 5, 0x0114, WRITE_MASK_VAL(14, 14, 1)), /* PWM7_IR_M1 */
52610+
52611+	RK_MUXROUTE_PMU(0, RK_PB0, 1, 0x0118, WRITE_MASK_VAL(1, 0, 0)), /* SPI0_CLK_M0 */
52612+	RK_MUXROUTE_PMU(2, RK_PA1, 1, 0x0118, WRITE_MASK_VAL(1, 0, 1)), /* SPI0_CLK_M1 */
52613+	RK_MUXROUTE_PMU(2, RK_PB2, 6, 0x0118, WRITE_MASK_VAL(1, 0, 2)), /* SPI0_CLK_M2 */
52614+
52615+	RK_MUXROUTE_PMU(0, RK_PB6, 2, 0x0118, WRITE_MASK_VAL(2, 2, 0)), /* UART1_TX_M0 */
52616+	RK_MUXROUTE_PMU(1, RK_PD0, 5, 0x0118, WRITE_MASK_VAL(2, 2, 1)), /* UART1_TX_M1 */
52617+};
52618+
52619 static void rockchip_get_recalced_mux(struct rockchip_pin_bank *bank, int pin,
52620 				      int *reg, u8 *bit, int *mask)
52621 {
52622@@ -843,6 +811,20 @@ static void rockchip_get_recalced_mux(struct rockchip_pin_bank *bank, int pin,
52623 	*bit = data->bit;
52624 }
52625 
52626+static struct rockchip_mux_route_data rk1808_mux_route_data[] = {
52627+	RK_MUXROUTE_SAME(3, RK_PB4, 2, 0x190, BIT(16 + 3)), /* i2c2m0_sda */
52628+	RK_MUXROUTE_SAME(1, RK_PB5, 2, 0x190, BIT(16 + 3) | BIT(3)), /* i2c2m1_sda */
52629+	RK_MUXROUTE_SAME(1, RK_PA6, 2, 0x190, BIT(16 + 4)), /* spi2m0_miso */
52630+	RK_MUXROUTE_SAME(2, RK_PA4, 3, 0x190, BIT(16 + 4) | BIT(4)), /* spi2m1_miso */
52631+	RK_MUXROUTE_SAME(4, RK_PB7, 2, 0x190, BIT(16 + 5)), /* spi1m0_miso */
52632+	RK_MUXROUTE_SAME(3, RK_PD2, 3, 0x190, BIT(16 + 5) | BIT(5)), /* spi1m1_miso */
52633+	RK_MUXROUTE_SAME(4, RK_PB0, 2, 0x190, BIT(16 + 13)), /* uart1_rxm0 */
52634+	RK_MUXROUTE_SAME(1, RK_PB4, 3, 0x190, BIT(16 + 13) | BIT(13)), /* uart1_rxm1 */
52635+	RK_MUXROUTE_SAME(4, RK_PA3, 2, 0x190, BIT(16 + 14) | BIT(16 + 15)), /* uart2_rxm0 */
52636+	RK_MUXROUTE_SAME(2, RK_PD1, 2, 0x190, BIT(16 + 14) | BIT(16 + 15) | BIT(14)), /* uart2_rxm1 */
52637+	RK_MUXROUTE_SAME(3, RK_PA4, 2, 0x190, BIT(16 + 14) | BIT(16 + 15) | BIT(15)), /* uart2_rxm2 */
52638+};
52639+
52640 static struct rockchip_mux_route_data px30_mux_route_data[] = {
52641 	RK_MUXROUTE_SAME(2, RK_PA0, 1, 0x184, BIT(16 + 7)), /* cif-d2m0 */
52642 	RK_MUXROUTE_SAME(3, RK_PA3, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d2m1 */
52643@@ -954,19 +936,20 @@ static struct rockchip_mux_route_data rk3568_mux_route_data[] = {
52644 	RK_MUXROUTE_PMU(0, RK_PB5, 4, 0x0110, WRITE_MASK_VAL(3, 2, 1)), /* PWM1 IO mux M1 */
52645 	RK_MUXROUTE_PMU(0, RK_PC1, 1, 0x0110, WRITE_MASK_VAL(5, 4, 0)), /* PWM2 IO mux M0 */
52646 	RK_MUXROUTE_PMU(0, RK_PB6, 4, 0x0110, WRITE_MASK_VAL(5, 4, 1)), /* PWM2 IO mux M1 */
52647-	RK_MUXROUTE_PMU(0, RK_PB3, 2, 0x0300, WRITE_MASK_VAL(0, 0, 0)), /* CAN0 IO mux M0 */
52648+	RK_MUXROUTE_GRF(0, RK_PB3, 2, 0x0300, WRITE_MASK_VAL(0, 0, 0)), /* CAN0 IO mux M0 */
52649 	RK_MUXROUTE_GRF(2, RK_PA1, 4, 0x0300, WRITE_MASK_VAL(0, 0, 1)), /* CAN0 IO mux M1 */
52650 	RK_MUXROUTE_GRF(1, RK_PA1, 3, 0x0300, WRITE_MASK_VAL(2, 2, 0)), /* CAN1 IO mux M0 */
52651 	RK_MUXROUTE_GRF(4, RK_PC3, 3, 0x0300, WRITE_MASK_VAL(2, 2, 1)), /* CAN1 IO mux M1 */
52652 	RK_MUXROUTE_GRF(4, RK_PB5, 3, 0x0300, WRITE_MASK_VAL(4, 4, 0)), /* CAN2 IO mux M0 */
52653 	RK_MUXROUTE_GRF(2, RK_PB2, 4, 0x0300, WRITE_MASK_VAL(4, 4, 1)), /* CAN2 IO mux M1 */
52654 	RK_MUXROUTE_GRF(4, RK_PC4, 1, 0x0300, WRITE_MASK_VAL(6, 6, 0)), /* HPDIN IO mux M0 */
52655-	RK_MUXROUTE_PMU(0, RK_PC2, 2, 0x0300, WRITE_MASK_VAL(6, 6, 1)), /* HPDIN IO mux M1 */
52656+	RK_MUXROUTE_GRF(0, RK_PC2, 2, 0x0300, WRITE_MASK_VAL(6, 6, 1)), /* HPDIN IO mux M1 */
52657 	RK_MUXROUTE_GRF(3, RK_PB1, 3, 0x0300, WRITE_MASK_VAL(8, 8, 0)), /* GMAC1 IO mux M0 */
52658 	RK_MUXROUTE_GRF(4, RK_PA7, 3, 0x0300, WRITE_MASK_VAL(8, 8, 1)), /* GMAC1 IO mux M1 */
52659+	RK_MUXROUTE_GRF(4, RK_PB7, 3, 0x0300, WRITE_MASK_VAL(8, 8, 1)), /* GMAC1 IO mux M1 */
52660 	RK_MUXROUTE_GRF(4, RK_PD1, 1, 0x0300, WRITE_MASK_VAL(10, 10, 0)), /* HDMITX IO mux M0 */
52661-	RK_MUXROUTE_PMU(0, RK_PC7, 1, 0x0300, WRITE_MASK_VAL(10, 10, 1)), /* HDMITX IO mux M1 */
52662-	RK_MUXROUTE_PMU(0, RK_PB6, 1, 0x0300, WRITE_MASK_VAL(14, 14, 0)), /* I2C2 IO mux M0 */
52663+	RK_MUXROUTE_GRF(0, RK_PC7, 1, 0x0300, WRITE_MASK_VAL(10, 10, 1)), /* HDMITX IO mux M1 */
52664+	RK_MUXROUTE_GRF(0, RK_PB6, 1, 0x0300, WRITE_MASK_VAL(14, 14, 0)), /* I2C2 IO mux M0 */
52665 	RK_MUXROUTE_GRF(4, RK_PB4, 1, 0x0300, WRITE_MASK_VAL(14, 14, 1)), /* I2C2 IO mux M1 */
52666 	RK_MUXROUTE_GRF(1, RK_PA0, 1, 0x0304, WRITE_MASK_VAL(0, 0, 0)), /* I2C3 IO mux M0 */
52667 	RK_MUXROUTE_GRF(3, RK_PB6, 4, 0x0304, WRITE_MASK_VAL(0, 0, 1)), /* I2C3 IO mux M1 */
52668@@ -992,7 +975,7 @@ static struct rockchip_mux_route_data rk3568_mux_route_data[] = {
52669 	RK_MUXROUTE_GRF(4, RK_PC3, 1, 0x0308, WRITE_MASK_VAL(12, 12, 1)), /* PWM15 IO mux M1 */
52670 	RK_MUXROUTE_GRF(3, RK_PD2, 3, 0x0308, WRITE_MASK_VAL(14, 14, 0)), /* SDMMC2 IO mux M0 */
52671 	RK_MUXROUTE_GRF(3, RK_PA5, 5, 0x0308, WRITE_MASK_VAL(14, 14, 1)), /* SDMMC2 IO mux M1 */
52672-	RK_MUXROUTE_PMU(0, RK_PB5, 2, 0x030c, WRITE_MASK_VAL(0, 0, 0)), /* SPI0 IO mux M0 */
52673+	RK_MUXROUTE_GRF(0, RK_PB5, 2, 0x030c, WRITE_MASK_VAL(0, 0, 0)), /* SPI0 IO mux M0 */
52674 	RK_MUXROUTE_GRF(2, RK_PD3, 3, 0x030c, WRITE_MASK_VAL(0, 0, 1)), /* SPI0 IO mux M1 */
52675 	RK_MUXROUTE_GRF(2, RK_PB5, 3, 0x030c, WRITE_MASK_VAL(2, 2, 0)), /* SPI1 IO mux M0 */
52676 	RK_MUXROUTE_GRF(3, RK_PC3, 3, 0x030c, WRITE_MASK_VAL(2, 2, 1)), /* SPI1 IO mux M1 */
52677@@ -1001,8 +984,8 @@ static struct rockchip_mux_route_data rk3568_mux_route_data[] = {
52678 	RK_MUXROUTE_GRF(4, RK_PB3, 4, 0x030c, WRITE_MASK_VAL(6, 6, 0)), /* SPI3 IO mux M0 */
52679 	RK_MUXROUTE_GRF(4, RK_PC2, 2, 0x030c, WRITE_MASK_VAL(6, 6, 1)), /* SPI3 IO mux M1 */
52680 	RK_MUXROUTE_GRF(2, RK_PB4, 2, 0x030c, WRITE_MASK_VAL(8, 8, 0)), /* UART1 IO mux M0 */
52681-	RK_MUXROUTE_PMU(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(8, 8, 1)), /* UART1 IO mux M1 */
52682-	RK_MUXROUTE_PMU(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(10, 10, 0)), /* UART2 IO mux M0 */
52683+	RK_MUXROUTE_GRF(3, RK_PD6, 4, 0x030c, WRITE_MASK_VAL(8, 8, 1)), /* UART1 IO mux M1 */
52684+	RK_MUXROUTE_GRF(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(10, 10, 0)), /* UART2 IO mux M0 */
52685 	RK_MUXROUTE_GRF(1, RK_PD5, 2, 0x030c, WRITE_MASK_VAL(10, 10, 1)), /* UART2 IO mux M1 */
52686 	RK_MUXROUTE_GRF(1, RK_PA1, 2, 0x030c, WRITE_MASK_VAL(12, 12, 0)), /* UART3 IO mux M0 */
52687 	RK_MUXROUTE_GRF(3, RK_PB7, 4, 0x030c, WRITE_MASK_VAL(12, 12, 1)), /* UART3 IO mux M1 */
52688@@ -1032,13 +1015,13 @@ static struct rockchip_mux_route_data rk3568_mux_route_data[] = {
52689 	RK_MUXROUTE_GRF(3, RK_PD6, 5, 0x0314, WRITE_MASK_VAL(1, 0, 1)), /* PDM IO mux M1 */
52690 	RK_MUXROUTE_GRF(4, RK_PA0, 4, 0x0314, WRITE_MASK_VAL(1, 0, 1)), /* PDM IO mux M1 */
52691 	RK_MUXROUTE_GRF(3, RK_PC4, 5, 0x0314, WRITE_MASK_VAL(1, 0, 2)), /* PDM IO mux M2 */
52692-	RK_MUXROUTE_PMU(0, RK_PA5, 3, 0x0314, WRITE_MASK_VAL(3, 2, 0)), /* PCIE20 IO mux M0 */
52693+	RK_MUXROUTE_GRF(0, RK_PA5, 3, 0x0314, WRITE_MASK_VAL(3, 2, 0)), /* PCIE20 IO mux M0 */
52694 	RK_MUXROUTE_GRF(2, RK_PD0, 4, 0x0314, WRITE_MASK_VAL(3, 2, 1)), /* PCIE20 IO mux M1 */
52695 	RK_MUXROUTE_GRF(1, RK_PB0, 4, 0x0314, WRITE_MASK_VAL(3, 2, 2)), /* PCIE20 IO mux M2 */
52696-	RK_MUXROUTE_PMU(0, RK_PA4, 3, 0x0314, WRITE_MASK_VAL(5, 4, 0)), /* PCIE30X1 IO mux M0 */
52697+	RK_MUXROUTE_GRF(0, RK_PA4, 3, 0x0314, WRITE_MASK_VAL(5, 4, 0)), /* PCIE30X1 IO mux M0 */
52698 	RK_MUXROUTE_GRF(2, RK_PD2, 4, 0x0314, WRITE_MASK_VAL(5, 4, 1)), /* PCIE30X1 IO mux M1 */
52699 	RK_MUXROUTE_GRF(1, RK_PA5, 4, 0x0314, WRITE_MASK_VAL(5, 4, 2)), /* PCIE30X1 IO mux M2 */
52700-	RK_MUXROUTE_PMU(0, RK_PA6, 2, 0x0314, WRITE_MASK_VAL(7, 6, 0)), /* PCIE30X2 IO mux M0 */
52701+	RK_MUXROUTE_GRF(0, RK_PA6, 2, 0x0314, WRITE_MASK_VAL(7, 6, 0)), /* PCIE30X2 IO mux M0 */
52702 	RK_MUXROUTE_GRF(2, RK_PD4, 4, 0x0314, WRITE_MASK_VAL(7, 6, 1)), /* PCIE30X2 IO mux M1 */
52703 	RK_MUXROUTE_GRF(4, RK_PC2, 4, 0x0314, WRITE_MASK_VAL(7, 6, 2)), /* PCIE30X2 IO mux M2 */
52704 };
52705@@ -1071,6 +1054,7 @@ static bool rockchip_get_mux_route(struct rockchip_pin_bank *bank, int pin,
52706 static int rockchip_get_mux(struct rockchip_pin_bank *bank, int pin)
52707 {
52708 	struct rockchip_pinctrl *info = bank->drvdata;
52709+	struct rockchip_pin_ctrl *ctrl = info->ctrl;
52710 	int iomux_num = (pin / 8);
52711 	struct regmap *regmap;
52712 	unsigned int val;
52713@@ -1088,8 +1072,12 @@ static int rockchip_get_mux(struct rockchip_pin_bank *bank, int pin)
52714 	if (bank->iomux[iomux_num].type & IOMUX_GPIO_ONLY)
52715 		return RK_FUNC_GPIO;
52716 
52717-	regmap = (bank->iomux[iomux_num].type & IOMUX_SOURCE_PMU)
52718-				? info->regmap_pmu : info->regmap_base;
52719+	if (bank->iomux[iomux_num].type & IOMUX_SOURCE_PMU)
52720+		regmap = info->regmap_pmu;
52721+	else if (bank->iomux[iomux_num].type & IOMUX_L_SOURCE_PMU)
52722+		regmap = (pin % 8 < 4) ? info->regmap_pmu : info->regmap_base;
52723+	else
52724+		regmap = info->regmap_base;
52725 
52726 	/* get basic quadrupel of mux registers and the correct reg inside */
52727 	mux_type = bank->iomux[iomux_num].type;
52728@@ -1112,6 +1100,27 @@ static int rockchip_get_mux(struct rockchip_pin_bank *bank, int pin)
52729 	if (bank->recalced_mask & BIT(pin))
52730 		rockchip_get_recalced_mux(bank, pin, &reg, &bit, &mask);
52731 
52732+	if (ctrl->type == RK3588) {
52733+		if (bank->bank_num == 0) {
52734+			if ((pin >= RK_PB4) && (pin <= RK_PD7)) {
52735+				u32 reg0 = 0;
52736+
52737+				reg0 = reg + 0x4000 - 0xC; /* PMU2_IOC_BASE */
52738+				ret = regmap_read(regmap, reg0, &val);
52739+				if (ret)
52740+					return ret;
52741+
52742+				if (!(val & BIT(8)))
52743+					return ((val >> bit) & mask);
52744+
52745+				reg = reg + 0x8000; /* BUS_IOC_BASE */
52746+				regmap = info->regmap_base;
52747+			}
52748+		} else if (bank->bank_num > 0) {
52749+			reg += 0x8000; /* BUS_IOC_BASE */
52750+		}
52751+	}
52752+
52753 	ret = regmap_read(regmap, reg, &val);
52754 	if (ret)
52755 		return ret;
52756@@ -1160,6 +1169,7 @@ static int rockchip_verify_mux(struct rockchip_pin_bank *bank,
52757 static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
52758 {
52759 	struct rockchip_pinctrl *info = bank->drvdata;
52760+	struct rockchip_pin_ctrl *ctrl = info->ctrl;
52761 	int iomux_num = (pin / 8);
52762 	struct regmap *regmap;
52763 	int reg, ret, mask, mux_type;
52764@@ -1176,8 +1186,12 @@ static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
52765 	dev_dbg(info->dev, "setting mux of GPIO%d-%d to %d\n",
52766 						bank->bank_num, pin, mux);
52767 
52768-	regmap = (bank->iomux[iomux_num].type & IOMUX_SOURCE_PMU)
52769-				? info->regmap_pmu : info->regmap_base;
52770+	if (bank->iomux[iomux_num].type & IOMUX_SOURCE_PMU)
52771+		regmap = info->regmap_pmu;
52772+	else if (bank->iomux[iomux_num].type & IOMUX_L_SOURCE_PMU)
52773+		regmap = (pin % 8 < 4) ? info->regmap_pmu : info->regmap_base;
52774+	else
52775+		regmap = info->regmap_base;
52776 
52777 	/* get basic quadrupel of mux registers and the correct reg inside */
52778 	mux_type = bank->iomux[iomux_num].type;
52779@@ -1200,6 +1214,46 @@ static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
52780 	if (bank->recalced_mask & BIT(pin))
52781 		rockchip_get_recalced_mux(bank, pin, &reg, &bit, &mask);
52782 
52783+	if (ctrl->type == RK3588) {
52784+		if (bank->bank_num == 0) {
52785+			if ((pin >= RK_PB4) && (pin <= RK_PD7)) {
52786+				if (mux < 8) {
52787+					reg += 0x4000 - 0xC; /* PMU2_IOC_BASE */
52788+					data = (mask << (bit + 16));
52789+					rmask = data | (data >> 16);
52790+					data |= (mux & mask) << bit;
52791+					ret = regmap_update_bits(regmap, reg, rmask, data);
52792+				} else {
52793+					u32 reg0 = 0;
52794+
52795+					reg0 = reg + 0x4000 - 0xC; /* PMU2_IOC_BASE */
52796+					data = (mask << (bit + 16));
52797+					rmask = data | (data >> 16);
52798+					data |= 8 << bit;
52799+					ret = regmap_update_bits(regmap, reg0, rmask, data);
52800+
52801+					reg0 = reg + 0x8000; /* BUS_IOC_BASE */
52802+					data = (mask << (bit + 16));
52803+					rmask = data | (data >> 16);
52804+					data |= mux << bit;
52805+					regmap = info->regmap_base;
52806+					ret |= regmap_update_bits(regmap, reg0, rmask, data);
52807+				}
52808+			} else {
52809+				data = (mask << (bit + 16));
52810+				rmask = data | (data >> 16);
52811+				data |= (mux & mask) << bit;
52812+				ret = regmap_update_bits(regmap, reg, rmask, data);
52813+			}
52814+			return ret;
52815+		} else if (bank->bank_num > 0) {
52816+			reg += 0x8000; /* BUS_IOC_BASE */
52817+		}
52818+	}
52819+
52820+	if (mux > mask)
52821+		return -EINVAL;
52822+
52823 	if (bank->route_mask & BIT(pin)) {
52824 		if (rockchip_get_mux_route(bank, pin, mux, &route_location,
52825 					   &route_reg, &route_val)) {
52826@@ -1221,10 +1275,20 @@ static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
52827 		}
52828 	}
52829 
52830-	data = (mask << (bit + 16));
52831-	rmask = data | (data >> 16);
52832-	data |= (mux & mask) << bit;
52833-	ret = regmap_update_bits(regmap, reg, rmask, data);
52834+	if (mux_type & IOMUX_WRITABLE_32BIT) {
52835+		ret = regmap_read(regmap, reg, &data);
52836+		if (ret)
52837+			return ret;
52838+
52839+		data &= ~(mask << bit);
52840+		data |= (mux & mask) << bit;
52841+		ret = regmap_write(regmap, reg, data);
52842+	} else {
52843+		data = (mask << (bit + 16));
52844+		rmask = data | (data >> 16);
52845+		data |= (mux & mask) << bit;
52846+		ret = regmap_update_bits(regmap, reg, rmask, data);
52847+	}
52848 
52849 	return ret;
52850 }
52851@@ -1409,6 +1473,115 @@ static int rv1108_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
52852 	return 0;
52853 }
52854 
52855+#define RV1126_PULL_PMU_OFFSET		0x40
52856+#define RV1126_PULL_GRF_GPIO1A0_OFFSET		0x10108
52857+#define RV1126_PULL_PINS_PER_REG	8
52858+#define RV1126_PULL_BITS_PER_PIN	2
52859+#define RV1126_PULL_BANK_STRIDE		16
52860+#define RV1126_GPIO_C4_D7(p)	(p >= 20 && p <= 31) /* GPIO0_C4 ~ GPIO0_D7 */
52861+
52862+static void rv1126_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
52863+					 int pin_num, struct regmap **regmap,
52864+					 int *reg, u8 *bit)
52865+{
52866+	struct rockchip_pinctrl *info = bank->drvdata;
52867+
52868+	/* The first 24 pins of the first bank are located in PMU */
52869+	if (bank->bank_num == 0) {
52870+		if (RV1126_GPIO_C4_D7(pin_num)) {
52871+			*regmap = info->regmap_base;
52872+			*reg = RV1126_PULL_GRF_GPIO1A0_OFFSET;
52873+			*reg -= (((31 - pin_num) / RV1126_PULL_PINS_PER_REG + 1) * 4);
52874+			*bit = pin_num % RV1126_PULL_PINS_PER_REG;
52875+			*bit *= RV1126_PULL_BITS_PER_PIN;
52876+			return;
52877+		}
52878+		*regmap = info->regmap_pmu;
52879+		*reg = RV1126_PULL_PMU_OFFSET;
52880+	} else {
52881+		*reg = RV1126_PULL_GRF_GPIO1A0_OFFSET;
52882+		*regmap = info->regmap_base;
52883+		*reg += (bank->bank_num - 1) * RV1126_PULL_BANK_STRIDE;
52884+	}
52885+
52886+	*reg += ((pin_num / RV1126_PULL_PINS_PER_REG) * 4);
52887+	*bit = (pin_num % RV1126_PULL_PINS_PER_REG);
52888+	*bit *= RV1126_PULL_BITS_PER_PIN;
52889+}
52890+
52891+#define RV1126_DRV_PMU_OFFSET		0x20
52892+#define RV1126_DRV_GRF_GPIO1A0_OFFSET		0x10090
52893+#define RV1126_DRV_BITS_PER_PIN		4
52894+#define RV1126_DRV_PINS_PER_REG		4
52895+#define RV1126_DRV_BANK_STRIDE		32
52896+
52897+static void rv1126_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
52898+					int pin_num, struct regmap **regmap,
52899+					int *reg, u8 *bit)
52900+{
52901+	struct rockchip_pinctrl *info = bank->drvdata;
52902+
52903+	/* The first 24 pins of the first bank are located in PMU */
52904+	if (bank->bank_num == 0) {
52905+		if (RV1126_GPIO_C4_D7(pin_num)) {
52906+			*regmap = info->regmap_base;
52907+			*reg = RV1126_DRV_GRF_GPIO1A0_OFFSET;
52908+			*reg -= (((31 - pin_num) / RV1126_DRV_PINS_PER_REG + 1) * 4);
52909+			*reg -= 0x4;
52910+			*bit = pin_num % RV1126_DRV_PINS_PER_REG;
52911+			*bit *= RV1126_DRV_BITS_PER_PIN;
52912+			return;
52913+		}
52914+		*regmap = info->regmap_pmu;
52915+		*reg = RV1126_DRV_PMU_OFFSET;
52916+	} else {
52917+		*regmap = info->regmap_base;
52918+		*reg = RV1126_DRV_GRF_GPIO1A0_OFFSET;
52919+		*reg += (bank->bank_num - 1) * RV1126_DRV_BANK_STRIDE;
52920+	}
52921+
52922+	*reg += ((pin_num / RV1126_DRV_PINS_PER_REG) * 4);
52923+	*bit = pin_num % RV1126_DRV_PINS_PER_REG;
52924+	*bit *= RV1126_DRV_BITS_PER_PIN;
52925+}
52926+
52927+#define RV1126_SCHMITT_PMU_OFFSET		0x60
52928+#define RV1126_SCHMITT_GRF_GPIO1A0_OFFSET		0x10188
52929+#define RV1126_SCHMITT_BANK_STRIDE		16
52930+#define RV1126_SCHMITT_PINS_PER_GRF_REG		8
52931+#define RV1126_SCHMITT_PINS_PER_PMU_REG		8
52932+
52933+static int rv1126_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
52934+					   int pin_num,
52935+					   struct regmap **regmap,
52936+					   int *reg, u8 *bit)
52937+{
52938+	struct rockchip_pinctrl *info = bank->drvdata;
52939+	int pins_per_reg;
52940+
52941+	if (bank->bank_num == 0) {
52942+		if (RV1126_GPIO_C4_D7(pin_num)) {
52943+			*regmap = info->regmap_base;
52944+			*reg = RV1126_SCHMITT_GRF_GPIO1A0_OFFSET;
52945+			*reg -= (((31 - pin_num) / RV1126_SCHMITT_PINS_PER_GRF_REG + 1) * 4);
52946+			*bit = pin_num % RV1126_SCHMITT_PINS_PER_GRF_REG;
52947+			return 0;
52948+		}
52949+		*regmap = info->regmap_pmu;
52950+		*reg = RV1126_SCHMITT_PMU_OFFSET;
52951+		pins_per_reg = RV1126_SCHMITT_PINS_PER_PMU_REG;
52952+	} else {
52953+		*regmap = info->regmap_base;
52954+		*reg = RV1126_SCHMITT_GRF_GPIO1A0_OFFSET;
52955+		pins_per_reg = RV1126_SCHMITT_PINS_PER_GRF_REG;
52956+		*reg += (bank->bank_num - 1) * RV1126_SCHMITT_BANK_STRIDE;
52957+	}
52958+	*reg += ((pin_num / pins_per_reg) * 4);
52959+	*bit = pin_num % pins_per_reg;
52960+
52961+	return 0;
52962+}
52963+
52964 #define RK3308_SCHMITT_PINS_PER_REG		8
52965 #define RK3308_SCHMITT_BANK_STRIDE		16
52966 #define RK3308_SCHMITT_GRF_OFFSET		0x1a0
52967@@ -1429,24 +1602,129 @@ static int rk3308_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
52968 	return 0;
52969 }
52970 
52971-#define RK2928_PULL_OFFSET		0x118
52972-#define RK2928_PULL_PINS_PER_REG	16
52973-#define RK2928_PULL_BANK_STRIDE		8
52974+#define RK1808_PULL_PMU_OFFSET		0x10
52975+#define RK1808_PULL_GRF_OFFSET		0x80
52976+#define RK1808_PULL_PINS_PER_REG	8
52977+#define RK1808_PULL_BITS_PER_PIN	2
52978+#define RK1808_PULL_BANK_STRIDE		16
52979 
52980-static void rk2928_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
52981-				    int pin_num, struct regmap **regmap,
52982-				    int *reg, u8 *bit)
52983+static void rk1808_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
52984+					 int pin_num, struct regmap **regmap,
52985+					 int *reg, u8 *bit)
52986 {
52987 	struct rockchip_pinctrl *info = bank->drvdata;
52988 
52989-	*regmap = info->regmap_base;
52990-	*reg = RK2928_PULL_OFFSET;
52991-	*reg += bank->bank_num * RK2928_PULL_BANK_STRIDE;
52992-	*reg += (pin_num / RK2928_PULL_PINS_PER_REG) * 4;
52993-
52994-	*bit = pin_num % RK2928_PULL_PINS_PER_REG;
52995-};
52996-
52997+	if (bank->bank_num == 0) {
52998+		*regmap = info->regmap_pmu;
52999+		*reg = RK1808_PULL_PMU_OFFSET;
53000+	} else {
53001+		*reg = RK1808_PULL_GRF_OFFSET;
53002+		*regmap = info->regmap_base;
53003+		*reg += (bank->bank_num - 1) * RK1808_PULL_BANK_STRIDE;
53004+	}
53005+
53006+	*reg += ((pin_num / RK1808_PULL_PINS_PER_REG) * 4);
53007+	*bit = (pin_num % RK1808_PULL_PINS_PER_REG);
53008+	*bit *= RK1808_PULL_BITS_PER_PIN;
53009+}
53010+
53011+#define RK1808_DRV_PMU_OFFSET		0x20
53012+#define RK1808_DRV_GRF_OFFSET		0x140
53013+#define RK1808_DRV_BITS_PER_PIN		2
53014+#define RK1808_DRV_PINS_PER_REG		8
53015+#define RK1808_DRV_BANK_STRIDE		16
53016+
53017+static void rk1808_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
53018+					int pin_num,
53019+					struct regmap **regmap,
53020+					int *reg, u8 *bit)
53021+{
53022+	struct rockchip_pinctrl *info = bank->drvdata;
53023+
53024+	if (bank->bank_num == 0) {
53025+		*regmap = info->regmap_pmu;
53026+		*reg = RK1808_DRV_PMU_OFFSET;
53027+	} else {
53028+		*regmap = info->regmap_base;
53029+		*reg = RK1808_DRV_GRF_OFFSET;
53030+		*reg += (bank->bank_num - 1) * RK1808_DRV_BANK_STRIDE;
53031+	}
53032+
53033+	*reg += ((pin_num / RK1808_DRV_PINS_PER_REG) * 4);
53034+	*bit = pin_num % RK1808_DRV_PINS_PER_REG;
53035+	*bit *= RK1808_DRV_BITS_PER_PIN;
53036+}
53037+
53038+#define RK1808_SR_PMU_OFFSET		0x0030
53039+#define RK1808_SR_GRF_OFFSET		0x00c0
53040+#define RK1808_SR_BANK_STRIDE		16
53041+#define RK1808_SR_PINS_PER_REG		8
53042+
53043+static int rk1808_calc_slew_rate_reg_and_bit(struct rockchip_pin_bank *bank,
53044+					   int pin_num,
53045+					   struct regmap **regmap,
53046+					   int *reg, u8 *bit)
53047+{
53048+	struct rockchip_pinctrl *info = bank->drvdata;
53049+
53050+	if (bank->bank_num == 0) {
53051+		*regmap = info->regmap_pmu;
53052+		*reg = RK1808_SR_PMU_OFFSET;
53053+	} else {
53054+		*regmap = info->regmap_base;
53055+		*reg = RK1808_SR_GRF_OFFSET;
53056+		*reg += (bank->bank_num  - 1) * RK1808_SR_BANK_STRIDE;
53057+	}
53058+	*reg += ((pin_num / RK1808_SR_PINS_PER_REG) * 4);
53059+	*bit = pin_num % RK1808_SR_PINS_PER_REG;
53060+
53061+	return 0;
53062+}
53063+
53064+#define RK1808_SCHMITT_PMU_OFFSET		0x0040
53065+#define RK1808_SCHMITT_GRF_OFFSET		0x0100
53066+#define RK1808_SCHMITT_BANK_STRIDE		16
53067+#define RK1808_SCHMITT_PINS_PER_REG		8
53068+
53069+static int rk1808_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
53070+					   int pin_num,
53071+					   struct regmap **regmap,
53072+					   int *reg, u8 *bit)
53073+{
53074+	struct rockchip_pinctrl *info = bank->drvdata;
53075+
53076+	if (bank->bank_num == 0) {
53077+		*regmap = info->regmap_pmu;
53078+		*reg = RK1808_SCHMITT_PMU_OFFSET;
53079+	} else {
53080+		*regmap = info->regmap_base;
53081+		*reg = RK1808_SCHMITT_GRF_OFFSET;
53082+		*reg += (bank->bank_num  - 1) * RK1808_SCHMITT_BANK_STRIDE;
53083+	}
53084+	*reg += ((pin_num / RK1808_SCHMITT_PINS_PER_REG) * 4);
53085+	*bit = pin_num % RK1808_SCHMITT_PINS_PER_REG;
53086+
53087+	return 0;
53088+}
53089+
53090+#define RK2928_PULL_OFFSET		0x118
53091+#define RK2928_PULL_PINS_PER_REG	16
53092+#define RK2928_PULL_BANK_STRIDE		8
53093+
53094+static void rk2928_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
53095+				    int pin_num, struct regmap **regmap,
53096+				    int *reg, u8 *bit)
53097+{
53098+	struct rockchip_pinctrl *info = bank->drvdata;
53099+
53100+	*regmap = info->regmap_base;
53101+	*reg = RK2928_PULL_OFFSET;
53102+	*reg += bank->bank_num * RK2928_PULL_BANK_STRIDE;
53103+	*reg += (pin_num / RK2928_PULL_PINS_PER_REG) * 4;
53104+
53105+	*bit = pin_num % RK2928_PULL_PINS_PER_REG;
53106+};
53107+
53108 #define RK3128_PULL_OFFSET	0x118
53109 
53110 static void rk3128_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
53111@@ -1751,6 +2029,32 @@ static void rk3399_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
53112 		*bit = (pin_num % 8) * 2;
53113 }
53114 
53115+#define RK3568_SR_PMU_OFFSET		0x60
53116+#define RK3568_SR_GRF_OFFSET		0x0180
53117+#define RK3568_SR_BANK_STRIDE		0x10
53118+#define RK3568_SR_PINS_PER_REG		16
53119+
53120+static int rk3568_calc_slew_rate_reg_and_bit(struct rockchip_pin_bank *bank,
53121+					     int pin_num,
53122+					     struct regmap **regmap,
53123+					     int *reg, u8 *bit)
53124+{
53125+	struct rockchip_pinctrl *info = bank->drvdata;
53126+
53127+	if (bank->bank_num == 0) {
53128+		*regmap = info->regmap_pmu;
53129+		*reg = RK3568_SR_PMU_OFFSET;
53130+	} else {
53131+		*regmap = info->regmap_base;
53132+		*reg = RK3568_SR_GRF_OFFSET;
53133+		*reg += (bank->bank_num  - 1) * RK3568_SR_BANK_STRIDE;
53134+	}
53135+	*reg += ((pin_num / RK3568_SR_PINS_PER_REG) * 4);
53136+	*bit = pin_num % RK3568_SR_PINS_PER_REG;
53137+
53138+	return 0;
53139+}
53140+
53141 #define RK3568_PULL_PMU_OFFSET		0x20
53142 #define RK3568_PULL_GRF_OFFSET		0x80
53143 #define RK3568_PULL_BITS_PER_PIN	2
53144@@ -1811,6 +2115,190 @@ static void rk3568_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
53145 		*bit = (pin_num % RK3568_DRV_PINS_PER_REG);
53146 		*bit *= RK3568_DRV_BITS_PER_PIN;
53147 	}
53148+
53149+	if (rockchip_get_cpu_version() == 0)
53150+		if ((bank->bank_num == 1 && (pin_num == 15 || pin_num == 23 || pin_num == 31)) ||
53151+		    ((bank->bank_num == 2 || bank->bank_num == 3 || bank->bank_num == 4) &&
53152+		     (pin_num == 7 || pin_num == 15 || pin_num == 23 || pin_num == 31)))
53153+			*bit -= RK3568_DRV_BITS_PER_PIN;
53154+}
53155+
53156+#define PMU1_IOC_REG		(0x0000)
53157+#define PMU2_IOC_REG		(0x4000)
53158+#define BUS_IOC_REG		(0x8000)
53159+#define VCCIO1_4_IOC_REG	(0x9000)
53160+#define VCCIO3_5_IOC_REG	(0xA000)
53161+#define VCCIO2_IOC_REG		(0xB000)
53162+#define VCCIO6_IOC_REG		(0xC000)
53163+#define EMMC_IOC_REG		(0xD000)
53164+
53165+static const u32 rk3588_ds_regs[][2] = {
53166+	{RK_GPIO0_A0, PMU1_IOC_REG + 0x0010},
53167+	{RK_GPIO0_A4, PMU1_IOC_REG + 0x0014},
53168+	{RK_GPIO0_B0, PMU1_IOC_REG + 0x0018},
53169+	{RK_GPIO0_B4, PMU2_IOC_REG + 0x0014},
53170+/*	{RK_GPIO0_C0, PMU2_IOC_REG + 0x0018},
53171+	{RK_GPIO0_C4, PMU2_IOC_REG + 0x001C},
53172+	{RK_GPIO0_D0, PMU2_IOC_REG + 0x0020},
53173+	{RK_GPIO0_D4, PMU2_IOC_REG + 0x0024},*/
53174+	{RK_GPIO1_A0, VCCIO1_4_IOC_REG + 0x0020},
53175+/*	{RK_GPIO1_A4, VCCIO1_4_IOC_REG + 0x0024},
53176+	{RK_GPIO1_B0, VCCIO1_4_IOC_REG + 0x0028},
53177+	{RK_GPIO1_B4, VCCIO1_4_IOC_REG + 0x002C},
53178+	{RK_GPIO1_C0, VCCIO1_4_IOC_REG + 0x0030},
53179+	{RK_GPIO1_C4, VCCIO1_4_IOC_REG + 0x0034},
53180+	{RK_GPIO1_D0, VCCIO1_4_IOC_REG + 0x0038},
53181+	{RK_GPIO1_D4, VCCIO1_4_IOC_REG + 0x003C},*/
53182+	{RK_GPIO2_A0, EMMC_IOC_REG + 0x0040},
53183+/*	{RK_GPIO2_A4, EMMC_IOC_REG + 0x0044},
53184+	{RK_GPIO2_B0, EMMC_IOC_REG + 0x0048},
53185+	{RK_GPIO2_B4, EMMC_IOC_REG + 0x004C},
53186+	{RK_GPIO2_C0, EMMC_IOC_REG + 0x0050},
53187+	{RK_GPIO2_C4, EMMC_IOC_REG + 0x0054},
53188+	{RK_GPIO2_D0, EMMC_IOC_REG + 0x0058},
53189+	{RK_GPIO2_D4, EMMC_IOC_REG + 0x005C},*/
53190+	{RK_GPIO3_A0, VCCIO3_5_IOC_REG + 0x0060},
53191+/*	{RK_GPIO3_A4, VCCIO3_5_IOC_REG + 0x0064},
53192+	{RK_GPIO3_B0, VCCIO3_5_IOC_REG + 0x0068},
53193+	{RK_GPIO3_B4, VCCIO3_5_IOC_REG + 0x006C},
53194+	{RK_GPIO3_C0, VCCIO3_5_IOC_REG + 0x0070},
53195+	{RK_GPIO3_C4, VCCIO3_5_IOC_REG + 0x0074},
53196+	{RK_GPIO3_D0, VCCIO3_5_IOC_REG + 0x0078},
53197+	{RK_GPIO3_D4, VCCIO3_5_IOC_REG + 0x007C},*/
53198+	{RK_GPIO4_A0, VCCIO6_IOC_REG + 0x0080},
53199+/*	{RK_GPIO4_A4, VCCIO6_IOC_REG + 0x0084},
53200+	{RK_GPIO4_B0, VCCIO6_IOC_REG + 0x0088},
53201+	{RK_GPIO4_B4, VCCIO6_IOC_REG + 0x008C},
53202+	{RK_GPIO4_C0, VCCIO6_IOC_REG + 0x0090},*/
53203+	{RK_GPIO4_C2, VCCIO3_5_IOC_REG + 0x0090},
53204+	{RK_GPIO4_D0, VCCIO2_IOC_REG + 0x0098},
53205+};
53206+
53207+static const u32 rk3588_p_regs[][2] = {
53208+	{RK_GPIO0_A0, PMU1_IOC_REG + 0x0020},
53209+	{RK_GPIO0_B0, PMU1_IOC_REG + 0x0024},
53210+	{RK_GPIO0_B5, PMU2_IOC_REG + 0x0028},
53211+	{RK_GPIO0_C0, PMU2_IOC_REG + 0x002C},
53212+	{RK_GPIO0_D0, PMU2_IOC_REG + 0x0030},
53213+	{RK_GPIO1_A0, VCCIO1_4_IOC_REG + 0x0110},
53214+/*	{RK_GPIO1_B0, VCCIO1_4_IOC_REG + 0x0114},
53215+	{RK_GPIO1_C0, VCCIO1_4_IOC_REG + 0x0118},
53216+	{RK_GPIO1_D0, VCCIO1_4_IOC_REG + 0x011C},*/
53217+	{RK_GPIO2_A0, EMMC_IOC_REG + 0x0120},
53218+/*	{RK_GPIO2_D0, EMMC_IOC_REG + 0x012C},*/
53219+	{RK_GPIO3_A0, VCCIO3_5_IOC_REG + 0x0130},
53220+/*	{RK_GPIO3_B0, VCCIO3_5_IOC_REG + 0x0134},
53221+	{RK_GPIO3_C0, VCCIO3_5_IOC_REG + 0x0138},
53222+	{RK_GPIO3_D0, VCCIO3_5_IOC_REG + 0x013C},*/
53223+	{RK_GPIO4_A0, VCCIO6_IOC_REG + 0x0140},
53224+/*	{RK_GPIO4_B0, VCCIO6_IOC_REG + 0x0144},
53225+	{RK_GPIO4_C0, VCCIO6_IOC_REG + 0x0148},*/
53226+	{RK_GPIO4_C2, VCCIO3_5_IOC_REG + 0x0148},
53227+	{RK_GPIO4_D0, VCCIO2_IOC_REG + 0x014C},
53228+};
53229+
53230+static const u32 rk3588_smt_regs[][2] = {
53231+	{RK_GPIO0_A0, PMU1_IOC_REG + 0x0030},
53232+	{RK_GPIO0_B0, PMU1_IOC_REG + 0x0034},
53233+	{RK_GPIO0_B5, PMU2_IOC_REG + 0x0040},
53234+	{RK_GPIO0_C0, PMU2_IOC_REG + 0x0044},
53235+	{RK_GPIO0_D0, PMU2_IOC_REG + 0x0048},
53236+	{RK_GPIO1_A0, VCCIO1_4_IOC_REG + 0x0210},
53237+/*	{RK_GPIO1_B0, VCCIO1_4_IOC_REG + 0x0214},
53238+	{RK_GPIO1_C0, VCCIO1_4_IOC_REG + 0x0218},
53239+	{RK_GPIO1_D0, VCCIO1_4_IOC_REG + 0x021C},*/
53240+	{RK_GPIO2_A0, EMMC_IOC_REG + 0x0220},
53241+	{RK_GPIO2_D0, EMMC_IOC_REG + 0x022C},
53242+	{RK_GPIO3_A0, VCCIO3_5_IOC_REG + 0x0230},
53243+/*	{RK_GPIO3_B0, VCCIO3_5_IOC_REG + 0x0234},
53244+	{RK_GPIO3_C0, VCCIO3_5_IOC_REG + 0x0238},
53245+	{RK_GPIO3_D0, VCCIO3_5_IOC_REG + 0x023C},*/
53246+	{RK_GPIO4_A0, VCCIO6_IOC_REG + 0x0240},
53247+/*	{RK_GPIO4_B0, VCCIO6_IOC_REG + 0x0244},
53248+	{RK_GPIO4_C0, VCCIO6_IOC_REG + 0x0248},*/
53249+	{RK_GPIO4_C2, VCCIO3_5_IOC_REG + 0x0248},
53250+	{RK_GPIO4_D0, VCCIO2_IOC_REG + 0x024C},
53251+};
53252+
53253+#define RK3588_PULL_BITS_PER_PIN		2
53254+#define RK3588_PULL_PINS_PER_REG		8
53255+
53256+static void rk3588_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
53257+					 int pin_num, struct regmap **regmap,
53258+					 int *reg, u8 *bit)
53259+{
53260+	struct rockchip_pinctrl *info = bank->drvdata;
53261+	u8 bank_num = bank->bank_num;
53262+	u32 pin = bank_num * 32 + pin_num;
53263+	int i;
53264+
53265+	for (i = ARRAY_SIZE(rk3588_p_regs) - 1; i >= 0; i--) {
53266+		if (pin >= rk3588_p_regs[i][0]) {
53267+			*reg = rk3588_p_regs[i][1];
53268+			break;
53269+		}
53270+		BUG_ON(i == 0);
53271+	}
53272+
53273+	*regmap = info->regmap_base;
53274+	*reg += ((pin - rk3588_p_regs[i][0]) / RK3588_PULL_PINS_PER_REG) * 4;
53275+	*bit = pin_num % RK3588_PULL_PINS_PER_REG;
53276+	*bit *= RK3588_PULL_BITS_PER_PIN;
53277+}
53278+
53279+#define RK3588_DRV_BITS_PER_PIN		4
53280+#define RK3588_DRV_PINS_PER_REG		4
53281+
53282+static void rk3588_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
53283+					int pin_num, struct regmap **regmap,
53284+					int *reg, u8 *bit)
53285+{
53286+	struct rockchip_pinctrl *info = bank->drvdata;
53287+	u8 bank_num = bank->bank_num;
53288+	u32 pin = bank_num * 32 + pin_num;
53289+	int i;
53290+
53291+	for (i = ARRAY_SIZE(rk3588_ds_regs) - 1; i >= 0; i--) {
53292+		if (pin >= rk3588_ds_regs[i][0]) {
53293+			*reg = rk3588_ds_regs[i][1];
53294+			break;
53295+		}
53296+		BUG_ON(i == 0);
53297+	}
53298+
53299+	*regmap = info->regmap_base;
53300+	*reg += ((pin - rk3588_ds_regs[i][0]) / RK3588_DRV_PINS_PER_REG) * 4;
53301+	*bit = pin_num % RK3588_DRV_PINS_PER_REG;
53302+	*bit *= RK3588_DRV_BITS_PER_PIN;
53303+}
53304+
53305+#define RK3588_SMT_BITS_PER_PIN		1
53306+#define RK3588_SMT_PINS_PER_REG		8
53307+
53308+static int rk3588_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
53309+					   int pin_num,
53310+					   struct regmap **regmap,
53311+					   int *reg, u8 *bit)
53312+{
53313+	struct rockchip_pinctrl *info = bank->drvdata;
53314+	u8 bank_num = bank->bank_num;
53315+	u32 pin = bank_num * 32 + pin_num;
53316+	int i;
53317+
53318+	for (i = ARRAY_SIZE(rk3588_smt_regs) - 1; i >= 0; i--) {
53319+		if (pin >= rk3588_smt_regs[i][0]) {
53320+			*reg = rk3588_smt_regs[i][1];
53321+			break;
53322+		}
53323+		BUG_ON(i == 0);
53324+	}
53325+
53326+	*regmap = info->regmap_base;
53327+	*reg += ((pin - rk3588_smt_regs[i][0]) / RK3588_SMT_PINS_PER_REG) * 4;
53328+	*bit = pin_num % RK3588_SMT_PINS_PER_REG;
53329+	*bit *= RK3588_SMT_BITS_PER_PIN;
53330+
53331+	return 0;
53332 }
53333 
53334 static int rockchip_perpin_drv_list[DRV_TYPE_MAX][8] = {
53335@@ -1913,7 +2401,11 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
53336 		bank->bank_num, pin_num, strength);
53337 
53338 	ctrl->drv_calc_reg(bank, pin_num, &regmap, &reg, &bit);
53339-	if (ctrl->type == RK3568) {
53340+	if (ctrl->type == RV1126 || ctrl->type == RK3588) {
53341+		rmask_bits = RV1126_DRV_BITS_PER_PIN;
53342+		ret = strength;
53343+		goto config;
53344+	} else if (ctrl->type == RK3568) {
53345 		rmask_bits = RK3568_DRV_BITS_PER_PIN;
53346 		ret = (1 << (strength + 1)) - 1;
53347 		goto config;
53348@@ -1994,8 +2486,35 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
53349 	data |= (ret << bit);
53350 
53351 	ret = regmap_update_bits(regmap, reg, rmask, data);
53352+	if (ret)
53353+		return ret;
53354 
53355-	return ret;
53356+	if (ctrl->type == RK3568 && rockchip_get_cpu_version() == 0) {
53357+		if (bank->bank_num == 1 && pin_num == 21)
53358+			reg = 0x0840;
53359+		else if (bank->bank_num == 2 && pin_num == 2)
53360+			reg = 0x0844;
53361+		else if (bank->bank_num == 2 && pin_num == 8)
53362+			reg = 0x0848;
53363+		else if (bank->bank_num == 3 && pin_num == 0)
53364+			reg = 0x084c;
53365+		else if (bank->bank_num == 3 && pin_num == 6)
53366+			reg = 0x0850;
53367+		else if (bank->bank_num == 4 && pin_num == 0)
53368+			reg = 0x0854;
53369+		else
53370+			return 0;
53371+
53372+		data = ((1 << rmask_bits) - 1) << 16;
53373+		rmask = data | (data >> 16);
53374+		data |= (1 << (strength + 1)) - 1;
53375+
53376+		ret = regmap_update_bits(regmap, reg, rmask, data);
53377+		if (ret)
53378+			return ret;
53379+	}
53380+
53381+	return 0;
53382 }
53383 
53384 static int rockchip_pull_list[PULL_TYPE_MAX][4] = {
53385@@ -2040,11 +2559,15 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
53386 				: PIN_CONFIG_BIAS_DISABLE;
53387 	case PX30:
53388 	case RV1108:
53389+	case RV1126:
53390+	case RK1808:
53391 	case RK3188:
53392 	case RK3288:
53393 	case RK3308:
53394 	case RK3368:
53395 	case RK3399:
53396+	case RK3568:
53397+	case RK3588:
53398 		pull_type = bank->pull_type[pin_num / 8];
53399 		data >>= bit;
53400 		data &= (1 << RK3188_PULL_BITS_PER_PIN) - 1;
53401@@ -2085,12 +2608,15 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
53402 		break;
53403 	case PX30:
53404 	case RV1108:
53405+	case RV1126:
53406+	case RK1808:
53407 	case RK3188:
53408 	case RK3288:
53409 	case RK3308:
53410 	case RK3368:
53411 	case RK3399:
53412 	case RK3568:
53413+	case RK3588:
53414 		pull_type = bank->pull_type[pin_num / 8];
53415 		ret = -EINVAL;
53416 		for (i = 0; i < ARRAY_SIZE(rockchip_pull_list[pull_type]);
53417@@ -2242,6 +2768,81 @@ static int rockchip_set_schmitt(struct rockchip_pin_bank *bank,
53418 	return regmap_update_bits(regmap, reg, rmask, data);
53419 }
53420 
53421+#define PX30_SLEW_RATE_PMU_OFFSET		0x30
53422+#define PX30_SLEW_RATE_GRF_OFFSET		0x90
53423+#define PX30_SLEW_RATE_PINS_PER_PMU_REG		16
53424+#define PX30_SLEW_RATE_BANK_STRIDE		16
53425+#define PX30_SLEW_RATE_PINS_PER_GRF_REG		8
53426+
53427+static int px30_calc_slew_rate_reg_and_bit(struct rockchip_pin_bank *bank,
53428+					   int pin_num,
53429+					   struct regmap **regmap,
53430+					   int *reg, u8 *bit)
53431+{
53432+	struct rockchip_pinctrl *info = bank->drvdata;
53433+	int pins_per_reg;
53434+
53435+	if (bank->bank_num == 0) {
53436+		*regmap = info->regmap_pmu;
53437+		*reg = PX30_SLEW_RATE_PMU_OFFSET;
53438+		pins_per_reg = PX30_SLEW_RATE_PINS_PER_PMU_REG;
53439+	} else {
53440+		*regmap = info->regmap_base;
53441+		*reg = PX30_SLEW_RATE_GRF_OFFSET;
53442+		pins_per_reg = PX30_SLEW_RATE_PINS_PER_GRF_REG;
53443+		*reg += (bank->bank_num  - 1) * PX30_SLEW_RATE_BANK_STRIDE;
53444+	}
53445+	*reg += ((pin_num / pins_per_reg) * 4);
53446+	*bit = pin_num % pins_per_reg;
53447+
53448+	return 0;
53449+}
53450+
53451+static int rockchip_get_slew_rate(struct rockchip_pin_bank *bank, int pin_num)
53452+{
53453+	struct rockchip_pinctrl *info = bank->drvdata;
53454+	struct rockchip_pin_ctrl *ctrl = info->ctrl;
53455+	struct regmap *regmap;
53456+	int reg, ret;
53457+	u8 bit;
53458+	u32 data;
53459+
53460+	ret = ctrl->slew_rate_calc_reg(bank, pin_num, &regmap, &reg, &bit);
53461+	if (ret)
53462+		return ret;
53463+
53464+	ret = regmap_read(regmap, reg, &data);
53465+	if (ret)
53466+		return ret;
53467+
53468+	data >>= bit;
53469+	return data & 0x1;
53470+}
53471+
53472+static int rockchip_set_slew_rate(struct rockchip_pin_bank *bank,
53473+				  int pin_num, int speed)
53474+{
53475+	struct rockchip_pinctrl *info = bank->drvdata;
53476+	struct rockchip_pin_ctrl *ctrl = info->ctrl;
53477+	struct regmap *regmap;
53478+	int reg, ret;
53479+	u8 bit;
53480+	u32 data, rmask;
53481+
53482+	dev_dbg(info->dev, "setting slew rate of GPIO%d-%d to %d\n",
53483+		bank->bank_num, pin_num, speed);
53484+
53485+	ret = ctrl->slew_rate_calc_reg(bank, pin_num, &regmap, &reg, &bit);
53486+	if (ret)
53487+		return ret;
53488+
53489+	/* enable the write to the equivalent lower bits */
53490+	data = BIT(bit + 16) | (speed << bit);
53491+	rmask = BIT(bit + 16) | BIT(bit);
53492+
53493+	return regmap_update_bits(regmap, reg, rmask, data);
53494+}
53495+
53496 /*
53497  * Pinmux_ops handling
53498  */
53499@@ -2297,9 +2898,9 @@ static int rockchip_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
53500 			break;
53501 	}
53502 
53503-	if (ret) {
53504+	if (ret && cnt) {
53505 		/* revert the already done pin settings */
53506-		for (cnt--; cnt >= 0; cnt--)
53507+		for (cnt--; cnt >= 0 && !data[cnt].func; cnt--)
53508 			rockchip_set_mux(bank, pins[cnt] - bank->pin_base, 0);
53509 
53510 		return ret;
53511@@ -2308,86 +2909,11 @@ static int rockchip_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
53512 	return 0;
53513 }
53514 
53515-static int rockchip_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
53516-{
53517-	struct rockchip_pin_bank *bank = gpiochip_get_data(chip);
53518-	u32 data;
53519-	int ret;
53520-
53521-	ret = clk_enable(bank->clk);
53522-	if (ret < 0) {
53523-		dev_err(bank->drvdata->dev,
53524-			"failed to enable clock for bank %s\n", bank->name);
53525-		return ret;
53526-	}
53527-	data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
53528-	clk_disable(bank->clk);
53529-
53530-	if (data & BIT(offset))
53531-		return GPIO_LINE_DIRECTION_OUT;
53532-
53533-	return GPIO_LINE_DIRECTION_IN;
53534-}
53535-
53536-/*
53537- * The calls to gpio_direction_output() and gpio_direction_input()
53538- * leads to this function call (via the pinctrl_gpio_direction_{input|output}()
53539- * function called from the gpiolib interface).
53540- */
53541-static int _rockchip_pmx_gpio_set_direction(struct gpio_chip *chip,
53542-					    int pin, bool input)
53543-{
53544-	struct rockchip_pin_bank *bank;
53545-	int ret;
53546-	unsigned long flags;
53547-	u32 data;
53548-
53549-	bank = gpiochip_get_data(chip);
53550-
53551-	ret = rockchip_set_mux(bank, pin, RK_FUNC_GPIO);
53552-	if (ret < 0)
53553-		return ret;
53554-
53555-	clk_enable(bank->clk);
53556-	raw_spin_lock_irqsave(&bank->slock, flags);
53557-
53558-	data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
53559-	/* set bit to 1 for output, 0 for input */
53560-	if (!input)
53561-		data |= BIT(pin);
53562-	else
53563-		data &= ~BIT(pin);
53564-	writel_relaxed(data, bank->reg_base + GPIO_SWPORT_DDR);
53565-
53566-	raw_spin_unlock_irqrestore(&bank->slock, flags);
53567-	clk_disable(bank->clk);
53568-
53569-	return 0;
53570-}
53571-
53572-static int rockchip_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
53573-					      struct pinctrl_gpio_range *range,
53574-					      unsigned offset, bool input)
53575-{
53576-	struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
53577-	struct gpio_chip *chip;
53578-	int pin;
53579-
53580-	chip = range->gc;
53581-	pin = offset - chip->base;
53582-	dev_dbg(info->dev, "gpio_direction for pin %u as %s-%d to %s\n",
53583-		 offset, range->name, pin, input ? "input" : "output");
53584-
53585-	return _rockchip_pmx_gpio_set_direction(chip, offset - chip->base,
53586-						input);
53587-}
53588-
53589 static const struct pinmux_ops rockchip_pmx_ops = {
53590 	.get_functions_count	= rockchip_pmx_get_funcs_count,
53591 	.get_function_name	= rockchip_pmx_get_func_name,
53592 	.get_function_groups	= rockchip_pmx_get_groups,
53593 	.set_mux		= rockchip_pmx_set,
53594-	.gpio_set_direction	= rockchip_pmx_gpio_set_direction,
53595 };
53596 
53597 /*
53598@@ -2406,20 +2932,37 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
53599 		return pull ? false : true;
53600 	case PX30:
53601 	case RV1108:
53602+	case RV1126:
53603+	case RK1808:
53604 	case RK3188:
53605 	case RK3288:
53606 	case RK3308:
53607 	case RK3368:
53608 	case RK3399:
53609 	case RK3568:
53610+	case RK3588:
53611 		return (pull != PIN_CONFIG_BIAS_PULL_PIN_DEFAULT);
53612 	}
53613 
53614 	return false;
53615 }
53616 
53617-static void rockchip_gpio_set(struct gpio_chip *gc, unsigned offset, int value);
53618-static int rockchip_gpio_get(struct gpio_chip *gc, unsigned offset);
53619+static int rockchip_pinconf_defer_output(struct rockchip_pin_bank *bank,
53620+					 unsigned int pin, u32 arg)
53621+{
53622+	struct rockchip_pin_output_deferred *cfg;
53623+
53624+	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
53625+	if (!cfg)
53626+		return -ENOMEM;
53627+
53628+	cfg->pin = pin;
53629+	cfg->arg = arg;
53630+
53631+	list_add_tail(&cfg->head, &bank->deferred_output);
53632+
53633+	return 0;
53634+}
53635 
53636 /* set the pin config settings for a specified pin */
53637 static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
53638@@ -2427,6 +2970,7 @@ static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
53639 {
53640 	struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
53641 	struct rockchip_pin_bank *bank = pin_to_bank(info, pin);
53642+	struct gpio_chip *gpio = &bank->gpio_chip;
53643 	enum pin_config_param param;
53644 	u32 arg;
53645 	int i;
53646@@ -2459,16 +3003,35 @@ static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
53647 				return rc;
53648 			break;
53649 		case PIN_CONFIG_OUTPUT:
53650-			rockchip_gpio_set(&bank->gpio_chip,
53651-					  pin - bank->pin_base, arg);
53652-			rc = _rockchip_pmx_gpio_set_direction(&bank->gpio_chip,
53653-					  pin - bank->pin_base, false);
53654-			if (rc)
53655-				return rc;
53656-			break;
53657-		case PIN_CONFIG_DRIVE_STRENGTH:
53658-			/* rk3288 is the first with per-pin drive-strength */
53659-			if (!info->ctrl->drv_calc_reg)
53660+			rc = rockchip_set_mux(bank, pin - bank->pin_base,
53661+					      RK_FUNC_GPIO);
53662+			if (rc != RK_FUNC_GPIO)
53663+				return -EINVAL;
53664+
53665+			/*
53666+			 * Check for gpio driver not being probed yet.
53667+			 * The lock makes sure that either gpio-probe has completed
53668+			 * or the gpio driver hasn't probed yet.
53669+			 */
53670+			mutex_lock(&bank->deferred_lock);
53671+			if (!gpio || !gpio->direction_output) {
53672+				rc = rockchip_pinconf_defer_output(bank, pin - bank->pin_base, arg);
53673+				mutex_unlock(&bank->deferred_lock);
53674+				if (rc)
53675+					return rc;
53676+
53677+				break;
53678+			}
53679+			mutex_unlock(&bank->deferred_lock);
53680+
53681+			rc = gpio->direction_output(gpio, pin - bank->pin_base,
53682+						    arg);
53683+			if (rc)
53684+				return rc;
53685+			break;
53686+		case PIN_CONFIG_DRIVE_STRENGTH:
53687+			/* rk3288 is the first with per-pin drive-strength */
53688+			if (!info->ctrl->drv_calc_reg)
53689 				return -ENOTSUPP;
53690 
53691 			rc = rockchip_set_drive_perpin(bank,
53692@@ -2485,9 +3048,17 @@ static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
53693 			if (rc < 0)
53694 				return rc;
53695 			break;
53696+		case PIN_CONFIG_SLEW_RATE:
53697+			if (!info->ctrl->slew_rate_calc_reg)
53698+				return -ENOTSUPP;
53699+
53700+			rc = rockchip_set_slew_rate(bank,
53701+						    pin - bank->pin_base, arg);
53702+			if (rc < 0)
53703+				return rc;
53704+			break;
53705 		default:
53706 			return -ENOTSUPP;
53707-			break;
53708 		}
53709 	} /* for each config */
53710 
53711@@ -2500,6 +3071,7 @@ static int rockchip_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
53712 {
53713 	struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
53714 	struct rockchip_pin_bank *bank = pin_to_bank(info, pin);
53715+	struct gpio_chip *gpio = &bank->gpio_chip;
53716 	enum pin_config_param param = pinconf_to_config_param(*config);
53717 	u16 arg;
53718 	int rc;
53719@@ -2528,7 +3100,12 @@ static int rockchip_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
53720 		if (rc != RK_FUNC_GPIO)
53721 			return -EINVAL;
53722 
53723-		rc = rockchip_gpio_get(&bank->gpio_chip, pin - bank->pin_base);
53724+		if (!gpio || !gpio->get) {
53725+			arg = 0;
53726+			break;
53727+		}
53728+
53729+		rc = gpio->get(gpio, pin - bank->pin_base);
53730 		if (rc < 0)
53731 			return rc;
53732 
53733@@ -2553,11 +3130,20 @@ static int rockchip_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
53734 		if (rc < 0)
53735 			return rc;
53736 
53737+		arg = rc;
53738+		break;
53739+	case PIN_CONFIG_SLEW_RATE:
53740+		if (!info->ctrl->slew_rate_calc_reg)
53741+			return -ENOTSUPP;
53742+
53743+		rc = rockchip_get_slew_rate(bank, pin - bank->pin_base);
53744+		if (rc < 0)
53745+			return rc;
53746+
53747 		arg = rc;
53748 		break;
53749 	default:
53750 		return -ENOTSUPP;
53751-		break;
53752 	}
53753 
53754 	*config = pinconf_to_config_packed(param, arg);
53755@@ -2650,7 +3236,6 @@ static int rockchip_pinctrl_parse_groups(struct device_node *np,
53756 		np_config = of_find_node_by_phandle(be32_to_cpup(phandle));
53757 		ret = pinconf_generic_parse_dt_config(np_config, NULL,
53758 				&grp->data[j].configs, &grp->data[j].nconfigs);
53759-		of_node_put(np_config);
53760 		if (ret)
53761 			return ret;
53762 	}
53763@@ -2767,7 +3352,7 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
53764 	ctrldesc->npins = info->ctrl->nr_pins;
53765 
53766 	pdesc = pindesc;
53767-	for (bank = 0 , k = 0; bank < info->ctrl->nr_banks; bank++) {
53768+	for (bank = 0, k = 0; bank < info->ctrl->nr_banks; bank++) {
53769 		pin_bank = &info->ctrl->pin_banks[bank];
53770 		for (pin = 0; pin < pin_bank->nr_pins; pin++, k++) {
53771 			pdesc->number = k;
53772@@ -2775,6 +3360,9 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
53773 						pin_bank->name, pin);
53774 			pdesc++;
53775 		}
53776+
53777+		INIT_LIST_HEAD(&pin_bank->deferred_output);
53778+		mutex_init(&pin_bank->deferred_lock);
53779 	}
53780 
53781 	ret = rockchip_pinctrl_parse_dt(pdev, info);
53782@@ -2787,552 +3375,9 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
53783 		return PTR_ERR(info->pctl_dev);
53784 	}
53785 
53786-	for (bank = 0; bank < info->ctrl->nr_banks; ++bank) {
53787-		pin_bank = &info->ctrl->pin_banks[bank];
53788-		pin_bank->grange.name = pin_bank->name;
53789-		pin_bank->grange.id = bank;
53790-		pin_bank->grange.pin_base = pin_bank->pin_base;
53791-		pin_bank->grange.base = pin_bank->gpio_chip.base;
53792-		pin_bank->grange.npins = pin_bank->gpio_chip.ngpio;
53793-		pin_bank->grange.gc = &pin_bank->gpio_chip;
53794-		pinctrl_add_gpio_range(info->pctl_dev, &pin_bank->grange);
53795-	}
53796-
53797-	return 0;
53798-}
53799-
53800-/*
53801- * GPIO handling
53802- */
53803-
53804-static void rockchip_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
53805-{
53806-	struct rockchip_pin_bank *bank = gpiochip_get_data(gc);
53807-	void __iomem *reg = bank->reg_base + GPIO_SWPORT_DR;
53808-	unsigned long flags;
53809-	u32 data;
53810-
53811-	clk_enable(bank->clk);
53812-	raw_spin_lock_irqsave(&bank->slock, flags);
53813-
53814-	data = readl(reg);
53815-	data &= ~BIT(offset);
53816-	if (value)
53817-		data |= BIT(offset);
53818-	writel(data, reg);
53819-
53820-	raw_spin_unlock_irqrestore(&bank->slock, flags);
53821-	clk_disable(bank->clk);
53822-}
53823-
53824-/*
53825- * Returns the level of the pin for input direction and setting of the DR
53826- * register for output gpios.
53827- */
53828-static int rockchip_gpio_get(struct gpio_chip *gc, unsigned offset)
53829-{
53830-	struct rockchip_pin_bank *bank = gpiochip_get_data(gc);
53831-	u32 data;
53832-
53833-	clk_enable(bank->clk);
53834-	data = readl(bank->reg_base + GPIO_EXT_PORT);
53835-	clk_disable(bank->clk);
53836-	data >>= offset;
53837-	data &= 1;
53838-	return data;
53839-}
53840-
53841-/*
53842- * gpiolib gpio_direction_input callback function. The setting of the pin
53843- * mux function as 'gpio input' will be handled by the pinctrl subsystem
53844- * interface.
53845- */
53846-static int rockchip_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
53847-{
53848-	return pinctrl_gpio_direction_input(gc->base + offset);
53849-}
53850-
53851-/*
53852- * gpiolib gpio_direction_output callback function. The setting of the pin
53853- * mux function as 'gpio output' will be handled by the pinctrl subsystem
53854- * interface.
53855- */
53856-static int rockchip_gpio_direction_output(struct gpio_chip *gc,
53857-					  unsigned offset, int value)
53858-{
53859-	rockchip_gpio_set(gc, offset, value);
53860-	return pinctrl_gpio_direction_output(gc->base + offset);
53861-}
53862-
53863-static void rockchip_gpio_set_debounce(struct gpio_chip *gc,
53864-				       unsigned int offset, bool enable)
53865-{
53866-	struct rockchip_pin_bank *bank = gpiochip_get_data(gc);
53867-	void __iomem *reg = bank->reg_base + GPIO_DEBOUNCE;
53868-	unsigned long flags;
53869-	u32 data;
53870-
53871-	clk_enable(bank->clk);
53872-	raw_spin_lock_irqsave(&bank->slock, flags);
53873-
53874-	data = readl(reg);
53875-	if (enable)
53876-		data |= BIT(offset);
53877-	else
53878-		data &= ~BIT(offset);
53879-	writel(data, reg);
53880-
53881-	raw_spin_unlock_irqrestore(&bank->slock, flags);
53882-	clk_disable(bank->clk);
53883-}
53884-
53885-/*
53886- * gpiolib set_config callback function. The setting of the pin
53887- * mux function as 'gpio output' will be handled by the pinctrl subsystem
53888- * interface.
53889- */
53890-static int rockchip_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
53891-				  unsigned long config)
53892-{
53893-	enum pin_config_param param = pinconf_to_config_param(config);
53894-
53895-	switch (param) {
53896-	case PIN_CONFIG_INPUT_DEBOUNCE:
53897-		rockchip_gpio_set_debounce(gc, offset, true);
53898-		/*
53899-		 * Rockchip's gpio could only support up to one period
53900-		 * of the debounce clock(pclk), which is far away from
53901-		 * satisftying the requirement, as pclk is usually near
53902-		 * 100MHz shared by all peripherals. So the fact is it
53903-		 * has crippled debounce capability could only be useful
53904-		 * to prevent any spurious glitches from waking up the system
53905-		 * if the gpio is conguired as wakeup interrupt source. Let's
53906-		 * still return -ENOTSUPP as before, to make sure the caller
53907-		 * of gpiod_set_debounce won't change its behaviour.
53908-		 */
53909-		return -ENOTSUPP;
53910-	default:
53911-		return -ENOTSUPP;
53912-	}
53913-}
53914-
53915-/*
53916- * gpiolib gpio_to_irq callback function. Creates a mapping between a GPIO pin
53917- * and a virtual IRQ, if not already present.
53918- */
53919-static int rockchip_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
53920-{
53921-	struct rockchip_pin_bank *bank = gpiochip_get_data(gc);
53922-	unsigned int virq;
53923-
53924-	if (!bank->domain)
53925-		return -ENXIO;
53926-
53927-	clk_enable(bank->clk);
53928-	virq = irq_create_mapping(bank->domain, offset);
53929-	clk_disable(bank->clk);
53930-
53931-	return (virq) ? : -ENXIO;
53932-}
53933-
53934-static const struct gpio_chip rockchip_gpiolib_chip = {
53935-	.request = gpiochip_generic_request,
53936-	.free = gpiochip_generic_free,
53937-	.set = rockchip_gpio_set,
53938-	.get = rockchip_gpio_get,
53939-	.get_direction	= rockchip_gpio_get_direction,
53940-	.direction_input = rockchip_gpio_direction_input,
53941-	.direction_output = rockchip_gpio_direction_output,
53942-	.set_config = rockchip_gpio_set_config,
53943-	.to_irq = rockchip_gpio_to_irq,
53944-	.owner = THIS_MODULE,
53945-};
53946-
53947-/*
53948- * Interrupt handling
53949- */
53950-
53951-static void rockchip_irq_demux(struct irq_desc *desc)
53952-{
53953-	struct irq_chip *chip = irq_desc_get_chip(desc);
53954-	struct rockchip_pin_bank *bank = irq_desc_get_handler_data(desc);
53955-	u32 pend;
53956-
53957-	dev_dbg(bank->drvdata->dev, "got irq for bank %s\n", bank->name);
53958-
53959-	chained_irq_enter(chip, desc);
53960-
53961-	pend = readl_relaxed(bank->reg_base + GPIO_INT_STATUS);
53962-
53963-	while (pend) {
53964-		unsigned int irq, virq;
53965-
53966-		irq = __ffs(pend);
53967-		pend &= ~BIT(irq);
53968-		virq = irq_find_mapping(bank->domain, irq);
53969-
53970-		if (!virq) {
53971-			dev_err(bank->drvdata->dev, "unmapped irq %d\n", irq);
53972-			continue;
53973-		}
53974-
53975-		dev_dbg(bank->drvdata->dev, "handling irq %d\n", irq);
53976-
53977-		/*
53978-		 * Triggering IRQ on both rising and falling edge
53979-		 * needs manual intervention.
53980-		 */
53981-		if (bank->toggle_edge_mode & BIT(irq)) {
53982-			u32 data, data_old, polarity;
53983-			unsigned long flags;
53984-
53985-			data = readl_relaxed(bank->reg_base + GPIO_EXT_PORT);
53986-			do {
53987-				raw_spin_lock_irqsave(&bank->slock, flags);
53988-
53989-				polarity = readl_relaxed(bank->reg_base +
53990-							 GPIO_INT_POLARITY);
53991-				if (data & BIT(irq))
53992-					polarity &= ~BIT(irq);
53993-				else
53994-					polarity |= BIT(irq);
53995-				writel(polarity,
53996-				       bank->reg_base + GPIO_INT_POLARITY);
53997-
53998-				raw_spin_unlock_irqrestore(&bank->slock, flags);
53999-
54000-				data_old = data;
54001-				data = readl_relaxed(bank->reg_base +
54002-						     GPIO_EXT_PORT);
54003-			} while ((data & BIT(irq)) != (data_old & BIT(irq)));
54004-		}
54005-
54006-		generic_handle_irq(virq);
54007-	}
54008-
54009-	chained_irq_exit(chip, desc);
54010-}
54011-
54012-static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
54013-{
54014-	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
54015-	struct rockchip_pin_bank *bank = gc->private;
54016-	u32 mask = BIT(d->hwirq);
54017-	u32 polarity;
54018-	u32 level;
54019-	u32 data;
54020-	unsigned long flags;
54021-	int ret;
54022-
54023-	/* make sure the pin is configured as gpio input */
54024-	ret = rockchip_set_mux(bank, d->hwirq, RK_FUNC_GPIO);
54025-	if (ret < 0)
54026-		return ret;
54027-
54028-	clk_enable(bank->clk);
54029-	raw_spin_lock_irqsave(&bank->slock, flags);
54030-
54031-	data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
54032-	data &= ~mask;
54033-	writel_relaxed(data, bank->reg_base + GPIO_SWPORT_DDR);
54034-
54035-	raw_spin_unlock_irqrestore(&bank->slock, flags);
54036-
54037-	if (type & IRQ_TYPE_EDGE_BOTH)
54038-		irq_set_handler_locked(d, handle_edge_irq);
54039-	else
54040-		irq_set_handler_locked(d, handle_level_irq);
54041-
54042-	raw_spin_lock_irqsave(&bank->slock, flags);
54043-	irq_gc_lock(gc);
54044-
54045-	level = readl_relaxed(gc->reg_base + GPIO_INTTYPE_LEVEL);
54046-	polarity = readl_relaxed(gc->reg_base + GPIO_INT_POLARITY);
54047-
54048-	switch (type) {
54049-	case IRQ_TYPE_EDGE_BOTH:
54050-		bank->toggle_edge_mode |= mask;
54051-		level |= mask;
54052-
54053-		/*
54054-		 * Determine gpio state. If 1 next interrupt should be falling
54055-		 * otherwise rising.
54056-		 */
54057-		data = readl(bank->reg_base + GPIO_EXT_PORT);
54058-		if (data & mask)
54059-			polarity &= ~mask;
54060-		else
54061-			polarity |= mask;
54062-		break;
54063-	case IRQ_TYPE_EDGE_RISING:
54064-		bank->toggle_edge_mode &= ~mask;
54065-		level |= mask;
54066-		polarity |= mask;
54067-		break;
54068-	case IRQ_TYPE_EDGE_FALLING:
54069-		bank->toggle_edge_mode &= ~mask;
54070-		level |= mask;
54071-		polarity &= ~mask;
54072-		break;
54073-	case IRQ_TYPE_LEVEL_HIGH:
54074-		bank->toggle_edge_mode &= ~mask;
54075-		level &= ~mask;
54076-		polarity |= mask;
54077-		break;
54078-	case IRQ_TYPE_LEVEL_LOW:
54079-		bank->toggle_edge_mode &= ~mask;
54080-		level &= ~mask;
54081-		polarity &= ~mask;
54082-		break;
54083-	default:
54084-		irq_gc_unlock(gc);
54085-		raw_spin_unlock_irqrestore(&bank->slock, flags);
54086-		clk_disable(bank->clk);
54087-		return -EINVAL;
54088-	}
54089-
54090-	writel_relaxed(level, gc->reg_base + GPIO_INTTYPE_LEVEL);
54091-	writel_relaxed(polarity, gc->reg_base + GPIO_INT_POLARITY);
54092-
54093-	irq_gc_unlock(gc);
54094-	raw_spin_unlock_irqrestore(&bank->slock, flags);
54095-	clk_disable(bank->clk);
54096-
54097 	return 0;
54098 }
54099 
54100-static void rockchip_irq_suspend(struct irq_data *d)
54101-{
54102-	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
54103-	struct rockchip_pin_bank *bank = gc->private;
54104-
54105-	clk_enable(bank->clk);
54106-	bank->saved_masks = irq_reg_readl(gc, GPIO_INTMASK);
54107-	irq_reg_writel(gc, ~gc->wake_active, GPIO_INTMASK);
54108-	clk_disable(bank->clk);
54109-}
54110-
54111-static void rockchip_irq_resume(struct irq_data *d)
54112-{
54113-	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
54114-	struct rockchip_pin_bank *bank = gc->private;
54115-
54116-	clk_enable(bank->clk);
54117-	irq_reg_writel(gc, bank->saved_masks, GPIO_INTMASK);
54118-	clk_disable(bank->clk);
54119-}
54120-
54121-static void rockchip_irq_enable(struct irq_data *d)
54122-{
54123-	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
54124-	struct rockchip_pin_bank *bank = gc->private;
54125-
54126-	clk_enable(bank->clk);
54127-	irq_gc_mask_clr_bit(d);
54128-}
54129-
54130-static void rockchip_irq_disable(struct irq_data *d)
54131-{
54132-	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
54133-	struct rockchip_pin_bank *bank = gc->private;
54134-
54135-	irq_gc_mask_set_bit(d);
54136-	clk_disable(bank->clk);
54137-}
54138-
54139-static int rockchip_interrupts_register(struct platform_device *pdev,
54140-						struct rockchip_pinctrl *info)
54141-{
54142-	struct rockchip_pin_ctrl *ctrl = info->ctrl;
54143-	struct rockchip_pin_bank *bank = ctrl->pin_banks;
54144-	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
54145-	struct irq_chip_generic *gc;
54146-	int ret;
54147-	int i;
54148-
54149-	for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
54150-		if (!bank->valid) {
54151-			dev_warn(&pdev->dev, "bank %s is not valid\n",
54152-				 bank->name);
54153-			continue;
54154-		}
54155-
54156-		ret = clk_enable(bank->clk);
54157-		if (ret) {
54158-			dev_err(&pdev->dev, "failed to enable clock for bank %s\n",
54159-				bank->name);
54160-			continue;
54161-		}
54162-
54163-		bank->domain = irq_domain_add_linear(bank->of_node, 32,
54164-						&irq_generic_chip_ops, NULL);
54165-		if (!bank->domain) {
54166-			dev_warn(&pdev->dev, "could not initialize irq domain for bank %s\n",
54167-				 bank->name);
54168-			clk_disable(bank->clk);
54169-			continue;
54170-		}
54171-
54172-		ret = irq_alloc_domain_generic_chips(bank->domain, 32, 1,
54173-					 "rockchip_gpio_irq", handle_level_irq,
54174-					 clr, 0, 0);
54175-		if (ret) {
54176-			dev_err(&pdev->dev, "could not alloc generic chips for bank %s\n",
54177-				bank->name);
54178-			irq_domain_remove(bank->domain);
54179-			clk_disable(bank->clk);
54180-			continue;
54181-		}
54182-
54183-		gc = irq_get_domain_generic_chip(bank->domain, 0);
54184-		gc->reg_base = bank->reg_base;
54185-		gc->private = bank;
54186-		gc->chip_types[0].regs.mask = GPIO_INTMASK;
54187-		gc->chip_types[0].regs.ack = GPIO_PORTS_EOI;
54188-		gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
54189-		gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
54190-		gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
54191-		gc->chip_types[0].chip.irq_enable = rockchip_irq_enable;
54192-		gc->chip_types[0].chip.irq_disable = rockchip_irq_disable;
54193-		gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
54194-		gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend;
54195-		gc->chip_types[0].chip.irq_resume = rockchip_irq_resume;
54196-		gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type;
54197-		gc->wake_enabled = IRQ_MSK(bank->nr_pins);
54198-
54199-		/*
54200-		 * Linux assumes that all interrupts start out disabled/masked.
54201-		 * Our driver only uses the concept of masked and always keeps
54202-		 * things enabled, so for us that's all masked and all enabled.
54203-		 */
54204-		writel_relaxed(0xffffffff, bank->reg_base + GPIO_INTMASK);
54205-		writel_relaxed(0xffffffff, bank->reg_base + GPIO_INTEN);
54206-		gc->mask_cache = 0xffffffff;
54207-
54208-		irq_set_chained_handler_and_data(bank->irq,
54209-						 rockchip_irq_demux, bank);
54210-		clk_disable(bank->clk);
54211-	}
54212-
54213-	return 0;
54214-}
54215-
54216-static int rockchip_gpiolib_register(struct platform_device *pdev,
54217-						struct rockchip_pinctrl *info)
54218-{
54219-	struct rockchip_pin_ctrl *ctrl = info->ctrl;
54220-	struct rockchip_pin_bank *bank = ctrl->pin_banks;
54221-	struct gpio_chip *gc;
54222-	int ret;
54223-	int i;
54224-
54225-	for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
54226-		if (!bank->valid) {
54227-			dev_warn(&pdev->dev, "bank %s is not valid\n",
54228-				 bank->name);
54229-			continue;
54230-		}
54231-
54232-		bank->gpio_chip = rockchip_gpiolib_chip;
54233-
54234-		gc = &bank->gpio_chip;
54235-		gc->base = bank->pin_base;
54236-		gc->ngpio = bank->nr_pins;
54237-		gc->parent = &pdev->dev;
54238-		gc->of_node = bank->of_node;
54239-		gc->label = bank->name;
54240-
54241-		ret = gpiochip_add_data(gc, bank);
54242-		if (ret) {
54243-			dev_err(&pdev->dev, "failed to register gpio_chip %s, error code: %d\n",
54244-							gc->label, ret);
54245-			goto fail;
54246-		}
54247-	}
54248-
54249-	rockchip_interrupts_register(pdev, info);
54250-
54251-	return 0;
54252-
54253-fail:
54254-	for (--i, --bank; i >= 0; --i, --bank) {
54255-		if (!bank->valid)
54256-			continue;
54257-		gpiochip_remove(&bank->gpio_chip);
54258-	}
54259-	return ret;
54260-}
54261-
54262-static int rockchip_gpiolib_unregister(struct platform_device *pdev,
54263-						struct rockchip_pinctrl *info)
54264-{
54265-	struct rockchip_pin_ctrl *ctrl = info->ctrl;
54266-	struct rockchip_pin_bank *bank = ctrl->pin_banks;
54267-	int i;
54268-
54269-	for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
54270-		if (!bank->valid)
54271-			continue;
54272-		gpiochip_remove(&bank->gpio_chip);
54273-	}
54274-
54275-	return 0;
54276-}
54277-
54278-static int rockchip_get_bank_data(struct rockchip_pin_bank *bank,
54279-				  struct rockchip_pinctrl *info)
54280-{
54281-	struct resource res;
54282-	void __iomem *base;
54283-
54284-	if (of_address_to_resource(bank->of_node, 0, &res)) {
54285-		dev_err(info->dev, "cannot find IO resource for bank\n");
54286-		return -ENOENT;
54287-	}
54288-
54289-	bank->reg_base = devm_ioremap_resource(info->dev, &res);
54290-	if (IS_ERR(bank->reg_base))
54291-		return PTR_ERR(bank->reg_base);
54292-
54293-	/*
54294-	 * special case, where parts of the pull setting-registers are
54295-	 * part of the PMU register space
54296-	 */
54297-	if (of_device_is_compatible(bank->of_node,
54298-				    "rockchip,rk3188-gpio-bank0")) {
54299-		struct device_node *node;
54300-
54301-		node = of_parse_phandle(bank->of_node->parent,
54302-					"rockchip,pmu", 0);
54303-		if (!node) {
54304-			if (of_address_to_resource(bank->of_node, 1, &res)) {
54305-				dev_err(info->dev, "cannot find IO resource for bank\n");
54306-				return -ENOENT;
54307-			}
54308-
54309-			base = devm_ioremap_resource(info->dev, &res);
54310-			if (IS_ERR(base))
54311-				return PTR_ERR(base);
54312-			rockchip_regmap_config.max_register =
54313-						    resource_size(&res) - 4;
54314-			rockchip_regmap_config.name =
54315-					    "rockchip,rk3188-gpio-bank0-pull";
54316-			bank->regmap_pull = devm_regmap_init_mmio(info->dev,
54317-						    base,
54318-						    &rockchip_regmap_config);
54319-		}
54320-		of_node_put(node);
54321-	}
54322-
54323-	bank->irq = irq_of_parse_and_map(bank->of_node, 0);
54324-
54325-	bank->clk = of_clk_get(bank->of_node, 0);
54326-	if (IS_ERR(bank->clk))
54327-		return PTR_ERR(bank->clk);
54328-
54329-	return clk_prepare(bank->clk);
54330-}
54331-
54332 static const struct of_device_id rockchip_pinctrl_dt_match[];
54333 
54334 /* retrieve the soc specific data */
54335@@ -3342,7 +3387,6 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
54336 {
54337 	const struct of_device_id *match;
54338 	struct device_node *node = pdev->dev.of_node;
54339-	struct device_node *np;
54340 	struct rockchip_pin_ctrl *ctrl;
54341 	struct rockchip_pin_bank *bank;
54342 	int grf_offs, pmu_offs, drv_grf_offs, drv_pmu_offs, i, j;
54343@@ -3350,23 +3394,6 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
54344 	match = of_match_node(rockchip_pinctrl_dt_match, node);
54345 	ctrl = (struct rockchip_pin_ctrl *)match->data;
54346 
54347-	for_each_child_of_node(node, np) {
54348-		if (!of_find_property(np, "gpio-controller", NULL))
54349-			continue;
54350-
54351-		bank = ctrl->pin_banks;
54352-		for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
54353-			if (!strcmp(bank->name, np->name)) {
54354-				bank->of_node = np;
54355-
54356-				if (!rockchip_get_bank_data(bank, d))
54357-					bank->valid = true;
54358-
54359-				break;
54360-			}
54361-		}
54362-	}
54363-
54364 	grf_offs = ctrl->grf_mux_offset;
54365 	pmu_offs = ctrl->pmu_mux_offset;
54366 	drv_pmu_offs = ctrl->pmu_drv_offset;
54367@@ -3391,12 +3418,13 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
54368 
54369 			/* preset iomux offset value, set new start value */
54370 			if (iom->offset >= 0) {
54371-				if (iom->type & IOMUX_SOURCE_PMU)
54372+				if ((iom->type & IOMUX_SOURCE_PMU) || (iom->type & IOMUX_L_SOURCE_PMU))
54373 					pmu_offs = iom->offset;
54374 				else
54375 					grf_offs = iom->offset;
54376 			} else { /* set current iomux offset */
54377-				iom->offset = (iom->type & IOMUX_SOURCE_PMU) ?
54378+				iom->offset = ((iom->type & IOMUX_SOURCE_PMU) ||
54379+					       (iom->type & IOMUX_L_SOURCE_PMU)) ?
54380 							pmu_offs : grf_offs;
54381 			}
54382 
54383@@ -3421,7 +3449,7 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
54384 			inc = (iom->type & (IOMUX_WIDTH_4BIT |
54385 					    IOMUX_WIDTH_3BIT |
54386 					    IOMUX_WIDTH_2BIT)) ? 8 : 4;
54387-			if (iom->type & IOMUX_SOURCE_PMU)
54388+			if ((iom->type & IOMUX_SOURCE_PMU) || (iom->type & IOMUX_L_SOURCE_PMU))
54389 				pmu_offs += inc;
54390 			else
54391 				grf_offs += inc;
54392@@ -3516,6 +3544,46 @@ static int __maybe_unused rockchip_pinctrl_resume(struct device *dev)
54393 static SIMPLE_DEV_PM_OPS(rockchip_pinctrl_dev_pm_ops, rockchip_pinctrl_suspend,
54394 			 rockchip_pinctrl_resume);
54395 
54396+/* SoC data specially handle */
54397+
54398+/* rk3308 SoC data initialize */
54399+#define RK3308_GRF_SOC_CON13			0x608
54400+#define RK3308_GRF_SOC_CON15			0x610
54401+
54402+/* RK3308_GRF_SOC_CON13 */
54403+#define RK3308_GRF_I2C3_IOFUNC_SRC_CTRL	(BIT(16 + 10) | BIT(10))
54404+#define RK3308_GRF_GPIO2A3_SEL_SRC_CTRL	(BIT(16 + 7)  | BIT(7))
54405+#define RK3308_GRF_GPIO2A2_SEL_SRC_CTRL	(BIT(16 + 3)  | BIT(3))
54406+
54407+/* RK3308_GRF_SOC_CON15 */
54408+#define RK3308_GRF_GPIO2C0_SEL_SRC_CTRL	(BIT(16 + 11) | BIT(11))
54409+#define RK3308_GRF_GPIO3B3_SEL_SRC_CTRL	(BIT(16 + 7)  | BIT(7))
54410+#define RK3308_GRF_GPIO3B2_SEL_SRC_CTRL	(BIT(16 + 3)  | BIT(3))
54411+
54412+static int rk3308_soc_data_init(struct rockchip_pinctrl *info)
54413+{
54414+	int ret;
54415+
54416+	/*
54417+	 * Enable the special ctrl of selected sources.
54418+	 */
54419+
54420+	ret = regmap_write(info->regmap_base, RK3308_GRF_SOC_CON13,
54421+			   RK3308_GRF_I2C3_IOFUNC_SRC_CTRL |
54422+			   RK3308_GRF_GPIO2A3_SEL_SRC_CTRL |
54423+			   RK3308_GRF_GPIO2A2_SEL_SRC_CTRL);
54424+	if (ret)
54425+		return ret;
54426+
54427+	ret = regmap_write(info->regmap_base, RK3308_GRF_SOC_CON15,
54428+			   RK3308_GRF_GPIO2C0_SEL_SRC_CTRL |
54429+			   RK3308_GRF_GPIO3B3_SEL_SRC_CTRL |
54430+			   RK3308_GRF_GPIO3B2_SEL_SRC_CTRL);
54431+
54432+	return ret;
54433+
54434+}
54435+
54436 static int rockchip_pinctrl_probe(struct platform_device *pdev)
54437 {
54438 	struct rockchip_pinctrl *info;
54439@@ -3589,17 +3657,50 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev)
54440 			return PTR_ERR(info->regmap_pmu);
54441 	}
54442 
54443-	ret = rockchip_gpiolib_register(pdev, info);
54444+	/* Special handle for some Socs */
54445+	if (ctrl->soc_data_init) {
54446+		ret = ctrl->soc_data_init(info);
54447+		if (ret)
54448+			return ret;
54449+	}
54450+
54451+	ret = rockchip_pinctrl_register(pdev, info);
54452 	if (ret)
54453 		return ret;
54454 
54455-	ret = rockchip_pinctrl_register(pdev, info);
54456+	platform_set_drvdata(pdev, info);
54457+
54458+	ret = of_platform_populate(np, rockchip_bank_match, NULL, NULL);
54459 	if (ret) {
54460-		rockchip_gpiolib_unregister(pdev, info);
54461+		dev_err(&pdev->dev, "failed to register gpio device\n");
54462 		return ret;
54463 	}
54464+	dev_info(dev, "probed %s\n", dev_name(dev));
54465 
54466-	platform_set_drvdata(pdev, info);
54467+	return 0;
54468+}
54469+
54470+static int rockchip_pinctrl_remove(struct platform_device *pdev)
54471+{
54472+	struct rockchip_pinctrl *info = platform_get_drvdata(pdev);
54473+	struct rockchip_pin_bank *bank;
54474+	struct rockchip_pin_output_deferred *cfg;
54475+	int i;
54476+
54477+	of_platform_depopulate(&pdev->dev);
54478+
54479+	for (i = 0; i < info->ctrl->nr_banks; i++) {
54480+		bank = &info->ctrl->pin_banks[i];
54481+
54482+		mutex_lock(&bank->deferred_lock);
54483+		while (!list_empty(&bank->deferred_output)) {
54484+			cfg = list_first_entry(&bank->deferred_output,
54485+					       struct rockchip_pin_output_deferred, head);
54486+			list_del(&cfg->head);
54487+			kfree(cfg);
54488+		}
54489+		mutex_unlock(&bank->deferred_lock);
54490+	}
54491 
54492 	return 0;
54493 }
54494@@ -3639,6 +3740,7 @@ static struct rockchip_pin_ctrl px30_pin_ctrl = {
54495 		.pull_calc_reg		= px30_calc_pull_reg_and_bit,
54496 		.drv_calc_reg		= px30_calc_drv_reg_and_bit,
54497 		.schmitt_calc_reg	= px30_calc_schmitt_reg_and_bit,
54498+		.slew_rate_calc_reg	= px30_calc_slew_rate_reg_and_bit,
54499 };
54500 
54501 static struct rockchip_pin_bank rv1108_pin_banks[] = {
54502@@ -3665,6 +3767,86 @@ static struct rockchip_pin_ctrl rv1108_pin_ctrl = {
54503 	.schmitt_calc_reg	= rv1108_calc_schmitt_reg_and_bit,
54504 };
54505 
54506+static struct rockchip_pin_bank rv1126_pin_banks[] = {
54507+	PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0",
54508+			     IOMUX_WIDTH_4BIT | IOMUX_SOURCE_PMU,
54509+			     IOMUX_WIDTH_4BIT | IOMUX_SOURCE_PMU,
54510+			     IOMUX_WIDTH_4BIT | IOMUX_L_SOURCE_PMU,
54511+			     IOMUX_WIDTH_4BIT),
54512+	PIN_BANK_IOMUX_FLAGS_OFFSET(1, 32, "gpio1",
54513+			     IOMUX_WIDTH_4BIT,
54514+			     IOMUX_WIDTH_4BIT,
54515+			     IOMUX_WIDTH_4BIT,
54516+			     IOMUX_WIDTH_4BIT,
54517+			     0x10010, 0x10018, 0x10020, 0x10028),
54518+	PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2",
54519+			     IOMUX_WIDTH_4BIT,
54520+			     IOMUX_WIDTH_4BIT,
54521+			     IOMUX_WIDTH_4BIT,
54522+			     IOMUX_WIDTH_4BIT),
54523+	PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3",
54524+			     IOMUX_WIDTH_4BIT,
54525+			     IOMUX_WIDTH_4BIT,
54526+			     IOMUX_WIDTH_4BIT,
54527+			     IOMUX_WIDTH_4BIT),
54528+	PIN_BANK_IOMUX_FLAGS(4, 2, "gpio4",
54529+			     IOMUX_WIDTH_4BIT, 0, 0, 0),
54530+};
54531+
54532+static struct rockchip_pin_ctrl rv1126_pin_ctrl = {
54533+	.pin_banks		= rv1126_pin_banks,
54534+	.nr_banks		= ARRAY_SIZE(rv1126_pin_banks),
54535+	.label			= "RV1126-GPIO",
54536+	.type			= RV1126,
54537+	.grf_mux_offset		= 0x10004, /* mux offset from GPIO0_D0 */
54538+	.pmu_mux_offset		= 0x0,
54539+	.iomux_routes		= rv1126_mux_route_data,
54540+	.niomux_routes		= ARRAY_SIZE(rv1126_mux_route_data),
54541+	.iomux_recalced		= rv1126_mux_recalced_data,
54542+	.niomux_recalced	= ARRAY_SIZE(rv1126_mux_recalced_data),
54543+	.pull_calc_reg		= rv1126_calc_pull_reg_and_bit,
54544+	.drv_calc_reg		= rv1126_calc_drv_reg_and_bit,
54545+	.schmitt_calc_reg	= rv1126_calc_schmitt_reg_and_bit,
54546+};
54547+
54548+static struct rockchip_pin_bank rk1808_pin_banks[] = {
54549+	PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", IOMUX_SOURCE_PMU,
54550+					     IOMUX_SOURCE_PMU,
54551+					     IOMUX_SOURCE_PMU,
54552+					     IOMUX_SOURCE_PMU),
54553+	PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", IOMUX_WIDTH_4BIT,
54554+					     IOMUX_WIDTH_4BIT,
54555+					     IOMUX_WIDTH_4BIT,
54556+					     IOMUX_WIDTH_4BIT),
54557+	PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", IOMUX_WIDTH_4BIT,
54558+					     IOMUX_WIDTH_4BIT,
54559+					     IOMUX_WIDTH_4BIT,
54560+					     IOMUX_WIDTH_4BIT),
54561+	PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3", IOMUX_WIDTH_4BIT,
54562+					     IOMUX_WIDTH_4BIT,
54563+					     IOMUX_WIDTH_4BIT,
54564+					     IOMUX_WIDTH_4BIT),
54565+	PIN_BANK_IOMUX_FLAGS(4, 32, "gpio4", IOMUX_WIDTH_4BIT,
54566+					     IOMUX_WIDTH_4BIT,
54567+					     IOMUX_WIDTH_4BIT,
54568+					     IOMUX_WIDTH_4BIT),
54569+};
54570+
54571+static struct rockchip_pin_ctrl rk1808_pin_ctrl = {
54572+	.pin_banks		= rk1808_pin_banks,
54573+	.nr_banks		= ARRAY_SIZE(rk1808_pin_banks),
54574+	.label			= "RK1808-GPIO",
54575+	.type			= RK1808,
54576+	.iomux_routes		= rk1808_mux_route_data,
54577+	.niomux_routes		= ARRAY_SIZE(rk1808_mux_route_data),
54578+	.grf_mux_offset		= 0x0,
54579+	.pmu_mux_offset		= 0x0,
54580+	.pull_calc_reg		= rk1808_calc_pull_reg_and_bit,
54581+	.drv_calc_reg		= rk1808_calc_drv_reg_and_bit,
54582+	.schmitt_calc_reg	= rk1808_calc_schmitt_reg_and_bit,
54583+	.slew_rate_calc_reg	= rk1808_calc_slew_rate_reg_and_bit,
54584+};
54585+
54586 static struct rockchip_pin_bank rk2928_pin_banks[] = {
54587 	PIN_BANK(0, 32, "gpio0"),
54588 	PIN_BANK(1, 32, "gpio1"),
54589@@ -3787,9 +3969,9 @@ static struct rockchip_pin_ctrl rk3228_pin_ctrl = {
54590 };
54591 
54592 static struct rockchip_pin_bank rk3288_pin_banks[] = {
54593-	PIN_BANK_IOMUX_FLAGS(0, 24, "gpio0", IOMUX_SOURCE_PMU,
54594-					     IOMUX_SOURCE_PMU,
54595-					     IOMUX_SOURCE_PMU,
54596+	PIN_BANK_IOMUX_FLAGS(0, 24, "gpio0", IOMUX_SOURCE_PMU | IOMUX_WRITABLE_32BIT,
54597+					     IOMUX_SOURCE_PMU | IOMUX_WRITABLE_32BIT,
54598+					     IOMUX_SOURCE_PMU | IOMUX_WRITABLE_32BIT,
54599 					     IOMUX_UNROUTED
54600 			    ),
54601 	PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", IOMUX_UNROUTED,
54602@@ -3864,6 +4046,7 @@ static struct rockchip_pin_ctrl rk3308_pin_ctrl = {
54603 		.niomux_recalced	= ARRAY_SIZE(rk3308_mux_recalced_data),
54604 		.iomux_routes		= rk3308_mux_route_data,
54605 		.niomux_routes		= ARRAY_SIZE(rk3308_mux_route_data),
54606+		.soc_data_init		= rk3308_soc_data_init,
54607 		.pull_calc_reg		= rk3308_calc_pull_reg_and_bit,
54608 		.drv_calc_reg		= rk3308_calc_drv_reg_and_bit,
54609 		.schmitt_calc_reg	= rk3308_calc_schmitt_reg_and_bit,
54610@@ -4024,14 +4207,42 @@ static struct rockchip_pin_ctrl rk3568_pin_ctrl = {
54611 	.niomux_routes		= ARRAY_SIZE(rk3568_mux_route_data),
54612 	.pull_calc_reg		= rk3568_calc_pull_reg_and_bit,
54613 	.drv_calc_reg		= rk3568_calc_drv_reg_and_bit,
54614+	.slew_rate_calc_reg	= rk3568_calc_slew_rate_reg_and_bit,
54615 	.schmitt_calc_reg	= rk3568_calc_schmitt_reg_and_bit,
54616 };
54617 
54618+static struct rockchip_pin_bank rk3588_pin_banks[] = {
54619+	RK3588_PIN_BANK_FLAGS(0, 32, "gpio0",
54620+			      IOMUX_WIDTH_4BIT, PULL_TYPE_IO_1V8_ONLY),
54621+	RK3588_PIN_BANK_FLAGS(1, 32, "gpio1",
54622+			      IOMUX_WIDTH_4BIT, PULL_TYPE_IO_1V8_ONLY),
54623+	RK3588_PIN_BANK_FLAGS(2, 32, "gpio2",
54624+			      IOMUX_WIDTH_4BIT, PULL_TYPE_IO_1V8_ONLY),
54625+	RK3588_PIN_BANK_FLAGS(3, 32, "gpio3",
54626+			      IOMUX_WIDTH_4BIT, PULL_TYPE_IO_1V8_ONLY),
54627+	RK3588_PIN_BANK_FLAGS(4, 32, "gpio4",
54628+			      IOMUX_WIDTH_4BIT, PULL_TYPE_IO_1V8_ONLY),
54629+};
54630+
54631+static struct rockchip_pin_ctrl rk3588_pin_ctrl = {
54632+	.pin_banks		= rk3588_pin_banks,
54633+	.nr_banks		= ARRAY_SIZE(rk3588_pin_banks),
54634+	.label			= "RK3588-GPIO",
54635+	.type			= RK3588,
54636+	.pull_calc_reg		= rk3588_calc_pull_reg_and_bit,
54637+	.drv_calc_reg		= rk3588_calc_drv_reg_and_bit,
54638+	.schmitt_calc_reg	= rk3588_calc_schmitt_reg_and_bit,
54639+};
54640+
54641 static const struct of_device_id rockchip_pinctrl_dt_match[] = {
54642 	{ .compatible = "rockchip,px30-pinctrl",
54643 		.data = &px30_pin_ctrl },
54644 	{ .compatible = "rockchip,rv1108-pinctrl",
54645 		.data = &rv1108_pin_ctrl },
54646+	{ .compatible = "rockchip,rv1126-pinctrl",
54647+		.data = &rv1126_pin_ctrl },
54648+	{ .compatible = "rockchip,rk1808-pinctrl",
54649+		.data = &rk1808_pin_ctrl },
54650 	{ .compatible = "rockchip,rk2928-pinctrl",
54651 		.data = &rk2928_pin_ctrl },
54652 	{ .compatible = "rockchip,rk3036-pinctrl",
54653@@ -4058,11 +4269,14 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = {
54654 		.data = &rk3399_pin_ctrl },
54655 	{ .compatible = "rockchip,rk3568-pinctrl",
54656 		.data = &rk3568_pin_ctrl },
54657+	{ .compatible = "rockchip,rk3588-pinctrl",
54658+		.data = &rk3588_pin_ctrl },
54659 	{},
54660 };
54661 
54662 static struct platform_driver rockchip_pinctrl_driver = {
54663 	.probe		= rockchip_pinctrl_probe,
54664+	.remove		= rockchip_pinctrl_remove,
54665 	.driver = {
54666 		.name	= "rockchip-pinctrl",
54667 		.pm = &rockchip_pinctrl_dev_pm_ops,
54668@@ -4075,3 +4289,14 @@ static int __init rockchip_pinctrl_drv_register(void)
54669 	return platform_driver_register(&rockchip_pinctrl_driver);
54670 }
54671 postcore_initcall(rockchip_pinctrl_drv_register);
54672+
54673+static void __exit rockchip_pinctrl_drv_unregister(void)
54674+{
54675+	platform_driver_unregister(&rockchip_pinctrl_driver);
54676+}
54677+module_exit(rockchip_pinctrl_drv_unregister);
54678+
54679+MODULE_DESCRIPTION("ROCKCHIP Pin Controller Driver");
54680+MODULE_LICENSE("GPL");
54681+MODULE_ALIAS("platform:pinctrl-rockchip");
54682+MODULE_DEVICE_TABLE(of, rockchip_pinctrl_dt_match);
54683diff --git a/drivers/power/reset/reboot-mode.c b/drivers/power/reset/reboot-mode.c
54684index b4076b10b..feaaa80ca 100644
54685--- a/drivers/power/reset/reboot-mode.c
54686+++ b/drivers/power/reset/reboot-mode.c
54687@@ -6,10 +6,12 @@
54688 #include <linux/device.h>
54689 #include <linux/init.h>
54690 #include <linux/kernel.h>
54691+#include <linux/kobject.h>
54692 #include <linux/module.h>
54693 #include <linux/of.h>
54694 #include <linux/reboot.h>
54695 #include <linux/reboot-mode.h>
54696+#include <linux/sysfs.h>
54697 
54698 #define PREFIX "mode-"
54699 
54700@@ -19,14 +21,24 @@ struct mode_info {
54701 	struct list_head list;
54702 };
54703 
54704-static unsigned int get_reboot_mode_magic(struct reboot_mode_driver *reboot,
54705-					  const char *cmd)
54706+static const char *boot_mode = "coldboot";
54707+
54708+static ssize_t boot_mode_show(struct kobject *kobj, struct kobj_attribute *attr,
54709+			      char *buf)
54710+{
54711+	return scnprintf(buf, PAGE_SIZE, "%s\n", boot_mode);
54712+}
54713+
54714+static struct kobj_attribute kobj_boot_mode = __ATTR_RO(boot_mode);
54715+
54716+static int get_reboot_mode_magic(struct reboot_mode_driver *reboot,
54717+				 const char *cmd)
54718 {
54719 	const char *normal = "normal";
54720 	int magic = 0;
54721 	struct mode_info *info;
54722 
54723-	if (!cmd)
54724+	if (!cmd || !cmd[0])
54725 		cmd = normal;
54726 
54727 	list_for_each_entry(info, &reboot->head, list) {
54728@@ -39,20 +51,56 @@ static unsigned int get_reboot_mode_magic(struct reboot_mode_driver *reboot,
54729 	return magic;
54730 }
54731 
54732+static void reboot_mode_write(struct reboot_mode_driver *reboot,
54733+			      const void *cmd)
54734+{
54735+	int magic;
54736+
54737+	magic = get_reboot_mode_magic(reboot, cmd);
54738+	if (!magic)
54739+		magic = get_reboot_mode_magic(reboot, NULL);
54740+	if (magic)
54741+		reboot->write(reboot, magic);
54742+}
54743+
54744 static int reboot_mode_notify(struct notifier_block *this,
54745 			      unsigned long mode, void *cmd)
54746 {
54747 	struct reboot_mode_driver *reboot;
54748-	unsigned int magic;
54749 
54750 	reboot = container_of(this, struct reboot_mode_driver, reboot_notifier);
54751-	magic = get_reboot_mode_magic(reboot, cmd);
54752-	if (magic)
54753-		reboot->write(reboot, magic);
54754+	reboot_mode_write(reboot, cmd);
54755 
54756 	return NOTIFY_DONE;
54757 }
54758 
54759+static int reboot_mode_panic_notify(struct notifier_block *this,
54760+				      unsigned long ev, void *ptr)
54761+{
54762+	struct reboot_mode_driver *reboot;
54763+	const char *cmd = "panic";
54764+
54765+	reboot = container_of(this, struct reboot_mode_driver, panic_notifier);
54766+	reboot_mode_write(reboot, cmd);
54767+
54768+	return NOTIFY_DONE;
54769+}
54770+
54771+static int boot_mode_parse(struct reboot_mode_driver *reboot)
54772+{
54773+	struct mode_info *info;
54774+	unsigned int magic = reboot->read(reboot);
54775+
54776+	list_for_each_entry(info, &reboot->head, list) {
54777+		if (info->magic == magic) {
54778+			boot_mode = info->mode;
54779+			break;
54780+		}
54781+	}
54782+
54783+	return 0;
54784+}
54785+
54786 /**
54787  * reboot_mode_register - register a reboot mode driver
54788  * @reboot: reboot mode driver
54789@@ -101,10 +149,16 @@ int reboot_mode_register(struct reboot_mode_driver *reboot)
54790 		list_add_tail(&info->list, &reboot->head);
54791 	}
54792 
54793+	boot_mode_parse(reboot);
54794 	reboot->reboot_notifier.notifier_call = reboot_mode_notify;
54795+	reboot->panic_notifier.notifier_call = reboot_mode_panic_notify;
54796 	register_reboot_notifier(&reboot->reboot_notifier);
54797+	register_pre_restart_handler(&reboot->reboot_notifier);
54798+	atomic_notifier_chain_register(&panic_notifier_list,
54799+				       &reboot->panic_notifier);
54800+	ret = sysfs_create_file(kernel_kobj, &kobj_boot_mode.attr);
54801 
54802-	return 0;
54803+	return ret;
54804 
54805 error:
54806 	list_for_each_entry(info, &reboot->head, list)
54807diff --git a/drivers/power/reset/syscon-reboot-mode.c b/drivers/power/reset/syscon-reboot-mode.c
54808index e0772c9f7..2fe0089c4 100644
54809--- a/drivers/power/reset/syscon-reboot-mode.c
54810+++ b/drivers/power/reset/syscon-reboot-mode.c
54811@@ -36,6 +36,17 @@ static int syscon_reboot_mode_write(struct reboot_mode_driver *reboot,
54812 	return ret;
54813 }
54814 
54815+static int syscon_reboot_mode_read(struct reboot_mode_driver *reboot)
54816+{
54817+	struct syscon_reboot_mode *syscon_rbm;
54818+	u32 val = 0;
54819+
54820+	syscon_rbm = container_of(reboot, struct syscon_reboot_mode, reboot);
54821+	regmap_read(syscon_rbm->map, syscon_rbm->offset, &val);
54822+
54823+	return val;
54824+}
54825+
54826 static int syscon_reboot_mode_probe(struct platform_device *pdev)
54827 {
54828 	int ret;
54829@@ -47,6 +58,7 @@ static int syscon_reboot_mode_probe(struct platform_device *pdev)
54830 
54831 	syscon_rbm->reboot.dev = &pdev->dev;
54832 	syscon_rbm->reboot.write = syscon_reboot_mode_write;
54833+	syscon_rbm->reboot.read = syscon_reboot_mode_read;
54834 	syscon_rbm->mask = 0xffffffff;
54835 
54836 	syscon_rbm->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
54837diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
54838index 2b644590f..8cdd19de0 100644
54839--- a/drivers/power/supply/power_supply_core.c
54840+++ b/drivers/power/supply/power_supply_core.c
54841@@ -32,6 +32,13 @@ EXPORT_SYMBOL_GPL(power_supply_notifier);
54842 
54843 static struct device_type power_supply_dev_type;
54844 
54845+struct match_device_node_array_param {
54846+	struct device_node *parent_of_node;
54847+	struct power_supply **psy;
54848+	ssize_t psy_size;
54849+	ssize_t psy_count;
54850+};
54851+
54852 #define POWER_SUPPLY_DEFERRED_REGISTER_TIME	msecs_to_jiffies(10)
54853 
54854 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
54855@@ -522,6 +529,77 @@ struct power_supply *power_supply_get_by_phandle(struct device_node *np,
54856 }
54857 EXPORT_SYMBOL_GPL(power_supply_get_by_phandle);
54858 
54859+static int power_supply_match_device_node_array(struct device *dev,
54860+						void *data)
54861+{
54862+	struct match_device_node_array_param *param =
54863+		(struct match_device_node_array_param *)data;
54864+	struct power_supply **psy = param->psy;
54865+	ssize_t size = param->psy_size;
54866+	ssize_t *count = &param->psy_count;
54867+
54868+	if (!dev->parent || dev->parent->of_node != param->parent_of_node)
54869+		return 0;
54870+
54871+	if (*count >= size)
54872+		return -EOVERFLOW;
54873+
54874+	psy[*count] = dev_get_drvdata(dev);
54875+	atomic_inc(&psy[*count]->use_cnt);
54876+	(*count)++;
54877+
54878+	return 0;
54879+}
54880+
54881+/**
54882+ * power_supply_get_by_phandle_array() - Similar to
54883+ * power_supply_get_by_phandle but returns an array of power supply
54884+ * objects which are associated with the phandle.
54885+ * @np: Pointer to device node holding phandle property.
54886+ * @property: Name of property holding a power supply name.
54887+ * @psy: Array of power_supply pointers provided by the client which is
54888+ * filled by power_supply_get_by_phandle_array.
54889+ * @size: size of power_supply pointer array.
54890+ *
54891+ * If power supply was found, it increases reference count for the
54892+ * internal power supply's device. The user should power_supply_put()
54893+ * after usage.
54894+ *
54895+ * Return: On success returns the number of power supply objects filled
54896+ * in the @psy array.
54897+ * -EOVERFLOW when size of @psy array is not suffice.
54898+ * -EINVAL when @psy is NULL or @size is 0.
54899+ * -ENODEV when matching device_node is not found.
54900+ */
54901+int power_supply_get_by_phandle_array(struct device_node *np,
54902+				      const char *property,
54903+				      struct power_supply **psy,
54904+				      ssize_t size)
54905+{
54906+	struct device_node *power_supply_np;
54907+	int ret;
54908+	struct match_device_node_array_param param;
54909+
54910+	if (!psy || !size)
54911+		return -EINVAL;
54912+
54913+	power_supply_np = of_parse_phandle(np, property, 0);
54914+	if (!power_supply_np)
54915+		return -ENODEV;
54916+
54917+	param.parent_of_node = power_supply_np;
54918+	param.psy = psy;
54919+	param.psy_size = size;
54920+	param.psy_count = 0;
54921+	ret = class_for_each_device(power_supply_class, NULL, &param,
54922+				    power_supply_match_device_node_array);
54923+
54924+	of_node_put(power_supply_np);
54925+
54926+	return param.psy_count;
54927+}
54928+EXPORT_SYMBOL_GPL(power_supply_get_by_phandle_array);
54929+
54930 static void devm_power_supply_put(struct device *dev, void *res)
54931 {
54932 	struct power_supply **psy = res;
54933diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
54934index a616b9d8f..a5e656360 100644
54935--- a/drivers/power/supply/power_supply_sysfs.c
54936+++ b/drivers/power/supply/power_supply_sysfs.c
54937@@ -89,6 +89,7 @@ static const char * const POWER_SUPPLY_CHARGE_TYPE_TEXT[] = {
54938 	[POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE]	= "Adaptive",
54939 	[POWER_SUPPLY_CHARGE_TYPE_CUSTOM]	= "Custom",
54940 	[POWER_SUPPLY_CHARGE_TYPE_LONGLIFE]	= "Long Life",
54941+	[POWER_SUPPLY_CHARGE_TYPE_TAPER]	= "Taper",
54942 };
54943 
54944 static const char * const POWER_SUPPLY_HEALTH_TEXT[] = {
54945diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
54946index 1f3079562..c6b3ce057 100644
54947--- a/drivers/pwm/pwm-rockchip.c
54948+++ b/drivers/pwm/pwm-rockchip.c
54949@@ -11,6 +11,7 @@
54950 #include <linux/module.h>
54951 #include <linux/of.h>
54952 #include <linux/of_device.h>
54953+#include <linux/pinctrl/consumer.h>
54954 #include <linux/platform_device.h>
54955 #include <linux/pwm.h>
54956 #include <linux/time.h>
54957@@ -26,15 +27,25 @@
54958 #define PWM_INACTIVE_POSITIVE	(1 << 4)
54959 #define PWM_POLARITY_MASK	(PWM_DUTY_POSITIVE | PWM_INACTIVE_POSITIVE)
54960 #define PWM_OUTPUT_LEFT		(0 << 5)
54961+#define PWM_OUTPUT_CENTER	(1 << 5)
54962 #define PWM_LOCK_EN		(1 << 6)
54963 #define PWM_LP_DISABLE		(0 << 8)
54964 
54965+#define PWM_ONESHOT_COUNT_SHIFT	24
54966+#define PWM_ONESHOT_COUNT_MAX	256
54967+
54968 struct rockchip_pwm_chip {
54969 	struct pwm_chip chip;
54970 	struct clk *clk;
54971 	struct clk *pclk;
54972+	struct pinctrl *pinctrl;
54973+	struct pinctrl_state *active_state;
54974 	const struct rockchip_pwm_data *data;
54975 	void __iomem *base;
54976+	unsigned long clk_rate;
54977+	bool vop_pwm_en; /* indicate voppwm mirror register state */
54978+	bool center_aligned;
54979+	bool oneshot;
54980 };
54981 
54982 struct rockchip_pwm_regs {
54983@@ -49,7 +60,9 @@ struct rockchip_pwm_data {
54984 	unsigned int prescaler;
54985 	bool supports_polarity;
54986 	bool supports_lock;
54987+	bool vop_pwm;
54988 	u32 enable_conf;
54989+	u32 enable_conf_mask;
54990 };
54991 
54992 static inline struct rockchip_pwm_chip *to_rockchip_pwm_chip(struct pwm_chip *c)
54993@@ -63,7 +76,6 @@ static void rockchip_pwm_get_state(struct pwm_chip *chip,
54994 {
54995 	struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
54996 	u32 enable_conf = pc->data->enable_conf;
54997-	unsigned long clk_rate;
54998 	u64 tmp;
54999 	u32 val;
55000 	int ret;
55001@@ -72,15 +84,13 @@ static void rockchip_pwm_get_state(struct pwm_chip *chip,
55002 	if (ret)
55003 		return;
55004 
55005-	clk_rate = clk_get_rate(pc->clk);
55006-
55007 	tmp = readl_relaxed(pc->base + pc->data->regs.period);
55008 	tmp *= pc->data->prescaler * NSEC_PER_SEC;
55009-	state->period = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate);
55010+	state->period = DIV_ROUND_CLOSEST_ULL(tmp, pc->clk_rate);
55011 
55012 	tmp = readl_relaxed(pc->base + pc->data->regs.duty);
55013 	tmp *= pc->data->prescaler * NSEC_PER_SEC;
55014-	state->duty_cycle =  DIV_ROUND_CLOSEST_ULL(tmp, clk_rate);
55015+	state->duty_cycle =  DIV_ROUND_CLOSEST_ULL(tmp, pc->clk_rate);
55016 
55017 	val = readl_relaxed(pc->base + pc->data->regs.ctrl);
55018 	state->enabled = (val & enable_conf) == enable_conf;
55019@@ -98,28 +108,48 @@ static void rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
55020 {
55021 	struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
55022 	unsigned long period, duty;
55023-	u64 clk_rate, div;
55024+	unsigned long flags;
55025+	u64 div;
55026 	u32 ctrl;
55027 
55028-	clk_rate = clk_get_rate(pc->clk);
55029-
55030 	/*
55031 	 * Since period and duty cycle registers have a width of 32
55032 	 * bits, every possible input period can be obtained using the
55033 	 * default prescaler value for all practical clock rate values.
55034 	 */
55035-	div = clk_rate * state->period;
55036+	div = (u64)pc->clk_rate * state->period;
55037 	period = DIV_ROUND_CLOSEST_ULL(div,
55038 				       pc->data->prescaler * NSEC_PER_SEC);
55039 
55040-	div = clk_rate * state->duty_cycle;
55041+	div = (u64)pc->clk_rate * state->duty_cycle;
55042 	duty = DIV_ROUND_CLOSEST_ULL(div, pc->data->prescaler * NSEC_PER_SEC);
55043 
55044+	local_irq_save(flags);
55045 	/*
55046 	 * Lock the period and duty of previous configuration, then
55047 	 * change the duty and period, that would not be effective.
55048 	 */
55049 	ctrl = readl_relaxed(pc->base + pc->data->regs.ctrl);
55050+	if (pc->data->vop_pwm) {
55051+		if (pc->vop_pwm_en)
55052+			ctrl |= PWM_ENABLE;
55053+		else
55054+			ctrl &= ~PWM_ENABLE;
55055+	}
55056+
55057+#ifdef CONFIG_PWM_ROCKCHIP_ONESHOT
55058+	if (state->oneshot_count > PWM_ONESHOT_COUNT_MAX) {
55059+		pc->oneshot = false;
55060+		dev_err(chip->dev, "Oneshot_count value overflow.\n");
55061+	} else if (state->oneshot_count > 0) {
55062+		pc->oneshot = true;
55063+		ctrl |= (state->oneshot_count - 1) << PWM_ONESHOT_COUNT_SHIFT;
55064+	} else {
55065+		pc->oneshot = false;
55066+		ctrl |= PWM_CONTINUOUS;
55067+	}
55068+#endif
55069+
55070 	if (pc->data->supports_lock) {
55071 		ctrl |= PWM_LOCK_EN;
55072 		writel_relaxed(ctrl, pc->base + pc->data->regs.ctrl);
55073@@ -145,6 +175,7 @@ static void rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
55074 		ctrl &= ~PWM_LOCK_EN;
55075 
55076 	writel(ctrl, pc->base + pc->data->regs.ctrl);
55077+	local_irq_restore(flags);
55078 }
55079 
55080 static int rockchip_pwm_enable(struct pwm_chip *chip,
55081@@ -163,13 +194,24 @@ static int rockchip_pwm_enable(struct pwm_chip *chip,
55082 	}
55083 
55084 	val = readl_relaxed(pc->base + pc->data->regs.ctrl);
55085+	val &= ~pc->data->enable_conf_mask;
55086+
55087+	if (PWM_OUTPUT_CENTER & pc->data->enable_conf_mask) {
55088+		if (pc->center_aligned)
55089+			val |= PWM_OUTPUT_CENTER;
55090+	}
55091 
55092-	if (enable)
55093+	if (enable) {
55094 		val |= enable_conf;
55095-	else
55096+		if (pc->oneshot)
55097+			val &= ~PWM_CONTINUOUS;
55098+	} else {
55099 		val &= ~enable_conf;
55100+	}
55101 
55102 	writel_relaxed(val, pc->base + pc->data->regs.ctrl);
55103+	if (pc->data->vop_pwm)
55104+		pc->vop_pwm_en = enable;
55105 
55106 	if (!enable)
55107 		clk_disable(pc->clk);
55108@@ -207,6 +249,8 @@ static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
55109 			goto out;
55110 	}
55111 
55112+	if (state->enabled)
55113+		ret = pinctrl_select_state(pc->pinctrl, pc->active_state);
55114 out:
55115 	clk_disable(pc->pclk);
55116 
55117@@ -229,7 +273,9 @@ static const struct rockchip_pwm_data pwm_data_v1 = {
55118 	.prescaler = 2,
55119 	.supports_polarity = false,
55120 	.supports_lock = false,
55121+	.vop_pwm = false,
55122 	.enable_conf = PWM_CTRL_OUTPUT_EN | PWM_CTRL_TIMER_EN,
55123+	.enable_conf_mask = BIT(1) | BIT(3),
55124 };
55125 
55126 static const struct rockchip_pwm_data pwm_data_v2 = {
55127@@ -242,8 +288,10 @@ static const struct rockchip_pwm_data pwm_data_v2 = {
55128 	.prescaler = 1,
55129 	.supports_polarity = true,
55130 	.supports_lock = false,
55131+	.vop_pwm = false,
55132 	.enable_conf = PWM_OUTPUT_LEFT | PWM_LP_DISABLE | PWM_ENABLE |
55133 		       PWM_CONTINUOUS,
55134+	.enable_conf_mask = GENMASK(2, 0) | BIT(5) | BIT(8),
55135 };
55136 
55137 static const struct rockchip_pwm_data pwm_data_vop = {
55138@@ -256,8 +304,10 @@ static const struct rockchip_pwm_data pwm_data_vop = {
55139 	.prescaler = 1,
55140 	.supports_polarity = true,
55141 	.supports_lock = false,
55142+	.vop_pwm = true,
55143 	.enable_conf = PWM_OUTPUT_LEFT | PWM_LP_DISABLE | PWM_ENABLE |
55144 		       PWM_CONTINUOUS,
55145+	.enable_conf_mask = GENMASK(2, 0) | BIT(5) | BIT(8),
55146 };
55147 
55148 static const struct rockchip_pwm_data pwm_data_v3 = {
55149@@ -270,8 +320,10 @@ static const struct rockchip_pwm_data pwm_data_v3 = {
55150 	.prescaler = 1,
55151 	.supports_polarity = true,
55152 	.supports_lock = true,
55153+	.vop_pwm = false,
55154 	.enable_conf = PWM_OUTPUT_LEFT | PWM_LP_DISABLE | PWM_ENABLE |
55155 		       PWM_CONTINUOUS,
55156+	.enable_conf_mask = GENMASK(2, 0) | BIT(5) | BIT(8),
55157 };
55158 
55159 static const struct of_device_id rockchip_pwm_dt_ids[] = {
55160@@ -301,7 +353,8 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
55161 		return -ENOMEM;
55162 
55163 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
55164-	pc->base = devm_ioremap_resource(&pdev->dev, r);
55165+	pc->base = devm_ioremap(&pdev->dev, r->start,
55166+				resource_size(r));
55167 	if (IS_ERR(pc->base))
55168 		return PTR_ERR(pc->base);
55169 
55170@@ -339,6 +392,18 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
55171 		goto err_clk;
55172 	}
55173 
55174+	pc->pinctrl = devm_pinctrl_get(&pdev->dev);
55175+	if (IS_ERR(pc->pinctrl)) {
55176+		dev_err(&pdev->dev, "Get pinctrl failed!\n");
55177+		return PTR_ERR(pc->pinctrl);
55178+	}
55179+
55180+	pc->active_state = pinctrl_lookup_state(pc->pinctrl, "active");
55181+	if (IS_ERR(pc->active_state)) {
55182+		dev_err(&pdev->dev, "No active pinctrl state\n");
55183+		return PTR_ERR(pc->active_state);
55184+	}
55185+
55186 	platform_set_drvdata(pdev, pc);
55187 
55188 	pc->data = id->data;
55189@@ -346,6 +411,7 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
55190 	pc->chip.ops = &rockchip_pwm_ops;
55191 	pc->chip.base = -1;
55192 	pc->chip.npwm = 1;
55193+	pc->clk_rate = clk_get_rate(pc->clk);
55194 
55195 	if (pc->data->supports_polarity) {
55196 		pc->chip.of_xlate = of_pwm_xlate_with_flags;
55197@@ -356,6 +422,9 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
55198 	ctrl = readl_relaxed(pc->base + pc->data->regs.ctrl);
55199 	enabled = (ctrl & enable_conf) == enable_conf;
55200 
55201+	pc->center_aligned =
55202+		device_property_read_bool(&pdev->dev, "center-aligned");
55203+
55204 	ret = pwmchip_add(&pc->chip);
55205 	if (ret < 0) {
55206 		dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
55207@@ -382,6 +451,20 @@ static int rockchip_pwm_remove(struct platform_device *pdev)
55208 {
55209 	struct rockchip_pwm_chip *pc = platform_get_drvdata(pdev);
55210 
55211+	/*
55212+	 * Disable the PWM clk before unpreparing it if the PWM device is still
55213+	 * running. This should only happen when the last PWM user left it
55214+	 * enabled, or when nobody requested a PWM that was previously enabled
55215+	 * by the bootloader.
55216+	 *
55217+	 * FIXME: Maybe the core should disable all PWM devices in
55218+	 * pwmchip_remove(). In this case we'd only have to call
55219+	 * clk_unprepare() after pwmchip_remove().
55220+	 *
55221+	 */
55222+	if (pwm_is_enabled(pc->chip.pwms))
55223+		clk_disable(pc->clk);
55224+
55225 	clk_unprepare(pc->pclk);
55226 	clk_unprepare(pc->clk);
55227 
55228@@ -396,7 +479,21 @@ static struct platform_driver rockchip_pwm_driver = {
55229 	.probe = rockchip_pwm_probe,
55230 	.remove = rockchip_pwm_remove,
55231 };
55232+#ifdef CONFIG_ROCKCHIP_THUNDER_BOOT
55233+static int __init rockchip_pwm_driver_init(void)
55234+{
55235+	return platform_driver_register(&rockchip_pwm_driver);
55236+}
55237+subsys_initcall(rockchip_pwm_driver_init);
55238+
55239+static void __exit rockchip_pwm_driver_exit(void)
55240+{
55241+	platform_driver_unregister(&rockchip_pwm_driver);
55242+}
55243+module_exit(rockchip_pwm_driver_exit);
55244+#else
55245 module_platform_driver(rockchip_pwm_driver);
55246+#endif
55247 
55248 MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
55249 MODULE_DESCRIPTION("Rockchip SoC PWM driver");
55250diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
55251index 9903c3a7e..ca47570cb 100644
55252--- a/drivers/pwm/sysfs.c
55253+++ b/drivers/pwm/sysfs.c
55254@@ -103,6 +103,43 @@ static ssize_t duty_cycle_store(struct device *child,
55255 	return ret ? : size;
55256 }
55257 
55258+#ifdef CONFIG_PWM_ROCKCHIP_ONESHOT
55259+static ssize_t oneshot_count_show(struct device *child,
55260+			       struct device_attribute *attr,
55261+			       char *buf)
55262+{
55263+	const struct pwm_device *pwm = child_to_pwm_device(child);
55264+	struct pwm_state state;
55265+
55266+	pwm_get_state(pwm, &state);
55267+
55268+	return sprintf(buf, "%llu\n", state.oneshot_count);
55269+}
55270+
55271+static ssize_t oneshot_count_store(struct device *child,
55272+				struct device_attribute *attr,
55273+				const char *buf, size_t size)
55274+{
55275+	struct pwm_export *export = child_to_pwm_export(child);
55276+	struct pwm_device *pwm = export->pwm;
55277+	struct pwm_state state;
55278+	unsigned int val;
55279+	int ret;
55280+
55281+	ret = kstrtouint(buf, 0, &val);
55282+	if (ret)
55283+		return ret;
55284+
55285+	mutex_lock(&export->lock);
55286+	pwm_get_state(pwm, &state);
55287+	state.oneshot_count = val;
55288+	ret = pwm_apply_state(pwm, &state);
55289+	mutex_unlock(&export->lock);
55290+
55291+	return ret ? : size;
55292+}
55293+#endif
55294+
55295 static ssize_t enable_show(struct device *child,
55296 			   struct device_attribute *attr,
55297 			   char *buf)
55298@@ -215,18 +252,49 @@ static ssize_t capture_show(struct device *child,
55299 	return sprintf(buf, "%u %u\n", result.period, result.duty_cycle);
55300 }
55301 
55302+static ssize_t output_type_show(struct device *child,
55303+			     struct device_attribute *attr,
55304+			     char *buf)
55305+{
55306+	const struct pwm_device *pwm = child_to_pwm_device(child);
55307+	const char *output_type = "unknown";
55308+	struct pwm_state state;
55309+
55310+	pwm_get_state(pwm, &state);
55311+	switch (state.output_type) {
55312+	case PWM_OUTPUT_FIXED:
55313+		output_type = "fixed";
55314+		break;
55315+	case PWM_OUTPUT_MODULATED:
55316+		output_type = "modulated";
55317+		break;
55318+	default:
55319+		break;
55320+	}
55321+
55322+	return snprintf(buf, PAGE_SIZE, "%s\n", output_type);
55323+}
55324+
55325 static DEVICE_ATTR_RW(period);
55326 static DEVICE_ATTR_RW(duty_cycle);
55327+#ifdef CONFIG_PWM_ROCKCHIP_ONESHOT
55328+static DEVICE_ATTR_RW(oneshot_count);
55329+#endif
55330 static DEVICE_ATTR_RW(enable);
55331 static DEVICE_ATTR_RW(polarity);
55332 static DEVICE_ATTR_RO(capture);
55333+static DEVICE_ATTR_RO(output_type);
55334 
55335 static struct attribute *pwm_attrs[] = {
55336 	&dev_attr_period.attr,
55337 	&dev_attr_duty_cycle.attr,
55338+#ifdef CONFIG_PWM_ROCKCHIP_ONESHOT
55339+	&dev_attr_oneshot_count.attr,
55340+#endif
55341 	&dev_attr_enable.attr,
55342 	&dev_attr_polarity.attr,
55343 	&dev_attr_capture.attr,
55344+	&dev_attr_output_type.attr,
55345 	NULL
55346 };
55347 ATTRIBUTE_GROUPS(pwm);
55348diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c
55349index 343ff61cc..14f85382d 100644
55350--- a/drivers/soc/rockchip/grf.c
55351+++ b/drivers/soc/rockchip/grf.c
55352@@ -7,6 +7,7 @@
55353 
55354 #include <linux/err.h>
55355 #include <linux/mfd/syscon.h>
55356+#include <linux/module.h>
55357 #include <linux/of_device.h>
55358 #include <linux/platform_device.h>
55359 #include <linux/regmap.h>
55360@@ -25,6 +26,21 @@ struct rockchip_grf_info {
55361 	int num_values;
55362 };
55363 
55364+#define PX30_GRF_SOC_CON5		0x414
55365+
55366+static const struct rockchip_grf_value px30_defaults[] __initconst = {
55367+	/*
55368+	 * Postponing auto jtag/sdmmc switching by 5 seconds.
55369+	 * The counter value is calculated based on 24MHz clock.
55370+	 */
55371+	{ "jtag switching delay", PX30_GRF_SOC_CON5, 0x7270E00},
55372+};
55373+
55374+static const struct rockchip_grf_info px30_grf __initconst = {
55375+	.values = px30_defaults,
55376+	.num_values = ARRAY_SIZE(px30_defaults),
55377+};
55378+
55379 #define RK3036_GRF_SOC_CON0		0x140
55380 
55381 static const struct rockchip_grf_value rk3036_defaults[] __initconst = {
55382@@ -86,6 +102,17 @@ static const struct rockchip_grf_info rk3328_grf __initconst = {
55383 	.num_values = ARRAY_SIZE(rk3328_defaults),
55384 };
55385 
55386+#define RK3308_GRF_SOC_CON3		0x30c
55387+
55388+static const struct rockchip_grf_value rk3308_defaults[] __initconst = {
55389+	{ "uart dma mask", RK3308_GRF_SOC_CON3, HIWORD_UPDATE(0, 0x1f, 10) },
55390+};
55391+
55392+static const struct rockchip_grf_info rk3308_grf __initconst = {
55393+	.values = rk3308_defaults,
55394+	.num_values = ARRAY_SIZE(rk3308_defaults),
55395+};
55396+
55397 #define RK3368_GRF_SOC_CON15		0x43c
55398 
55399 static const struct rockchip_grf_value rk3368_defaults[] __initconst = {
55400@@ -108,8 +135,37 @@ static const struct rockchip_grf_info rk3399_grf __initconst = {
55401 	.num_values = ARRAY_SIZE(rk3399_defaults),
55402 };
55403 
55404+#define DELAY_ONE_SECOND		0x16E3600
55405+
55406+#define RV1126_GRF1_SDDETFLT_CON	0x10254
55407+#define RV1126_GRF1_UART2RX_LOW_CON	0x10258
55408+#define RV1126_GRF1_IOFUNC_CON1		0x10264
55409+#define RV1126_GRF1_IOFUNC_CON3		0x1026C
55410+#define RV1126_JTAG_GROUP0		0x0      /* mux to sdmmc*/
55411+#define RV1126_JTAG_GROUP1		0x1      /* mux to uart2 */
55412+#define FORCE_JTAG_ENABLE		0x1
55413+#define FORCE_JTAG_DISABLE		0x0
55414+
55415+static const struct rockchip_grf_value rv1126_defaults[] __initconst = {
55416+	{ "jtag group0 force", RV1126_GRF1_IOFUNC_CON3,
55417+		HIWORD_UPDATE(FORCE_JTAG_DISABLE, 1, 4) },
55418+	{ "jtag group1 force", RV1126_GRF1_IOFUNC_CON3,
55419+		HIWORD_UPDATE(FORCE_JTAG_DISABLE, 1, 5) },
55420+	{ "jtag group1 tms low delay", RV1126_GRF1_UART2RX_LOW_CON, DELAY_ONE_SECOND },
55421+	{ "switch to jtag groupx", RV1126_GRF1_IOFUNC_CON1, HIWORD_UPDATE(RV1126_JTAG_GROUP0, 1, 15) },
55422+	{ "jtag group0 switching delay", RV1126_GRF1_SDDETFLT_CON, DELAY_ONE_SECOND * 5 },
55423+};
55424+
55425+static const struct rockchip_grf_info rv1126_grf __initconst = {
55426+	.values = rv1126_defaults,
55427+	.num_values = ARRAY_SIZE(rv1126_defaults),
55428+};
55429+
55430 static const struct of_device_id rockchip_grf_dt_match[] __initconst = {
55431 	{
55432+		.compatible = "rockchip,px30-grf",
55433+		.data = (void *)&px30_grf,
55434+	}, {
55435 		.compatible = "rockchip,rk3036-grf",
55436 		.data = (void *)&rk3036_grf,
55437 	}, {
55438@@ -121,6 +177,9 @@ static const struct of_device_id rockchip_grf_dt_match[] __initconst = {
55439 	}, {
55440 		.compatible = "rockchip,rk3288-grf",
55441 		.data = (void *)&rk3288_grf,
55442+	}, {
55443+		.compatible = "rockchip,rk3308-grf",
55444+		.data = (void *)&rk3308_grf,
55445 	}, {
55446 		.compatible = "rockchip,rk3328-grf",
55447 		.data = (void *)&rk3328_grf,
55448@@ -130,6 +189,9 @@ static const struct of_device_id rockchip_grf_dt_match[] __initconst = {
55449 	}, {
55450 		.compatible = "rockchip,rk3399-grf",
55451 		.data = (void *)&rk3399_grf,
55452+	}, {
55453+		.compatible = "rockchip,rv1126-grf",
55454+		.data = (void *)&rv1126_grf,
55455 	},
55456 	{ /* sentinel */ },
55457 };
55458@@ -175,3 +237,6 @@ static int __init rockchip_grf_init(void)
55459 	return 0;
55460 }
55461 postcore_initcall(rockchip_grf_init);
55462+
55463+MODULE_DESCRIPTION("Rockchip GRF");
55464+MODULE_LICENSE("GPL");
55465diff --git a/drivers/soc/rockchip/io-domain.c b/drivers/soc/rockchip/io-domain.c
55466index b29e829e8..7bedfdab0 100644
55467--- a/drivers/soc/rockchip/io-domain.c
55468+++ b/drivers/soc/rockchip/io-domain.c
55469@@ -51,6 +51,10 @@
55470 #define RK3399_PMUGRF_CON0_VSEL		BIT(8)
55471 #define RK3399_PMUGRF_VSEL_SUPPLY_NUM	9
55472 
55473+#define RK3568_PMU_GRF_IO_VSEL0		(0x0140)
55474+#define RK3568_PMU_GRF_IO_VSEL1		(0x0144)
55475+#define RK3568_PMU_GRF_IO_VSEL2		(0x0148)
55476+
55477 struct rockchip_iodomain;
55478 
55479 /**
55480@@ -74,8 +78,51 @@ struct rockchip_iodomain {
55481 	struct regmap *grf;
55482 	const struct rockchip_iodomain_soc_data *soc_data;
55483 	struct rockchip_iodomain_supply supplies[MAX_SUPPLIES];
55484+	int (*write)(struct rockchip_iodomain_supply *supply, int uV);
55485 };
55486 
55487+static int rk3568_pmu_iodomain_write(struct rockchip_iodomain_supply *supply,
55488+				     int uV)
55489+{
55490+	struct rockchip_iodomain *iod = supply->iod;
55491+	u32 is_3v3 = uV > MAX_VOLTAGE_1_8;
55492+	u32 val0, val1;
55493+	int b;
55494+
55495+	switch (supply->idx) {
55496+	case 0: /* pmuio1 */
55497+		break;
55498+	case 1: /* pmuio2 */
55499+		b = supply->idx;
55500+		val0 = BIT(16 + b) | (is_3v3 ? 0 : BIT(b));
55501+		b = supply->idx + 4;
55502+		val1 = BIT(16 + b) | (is_3v3 ? BIT(b) : 0);
55503+
55504+		regmap_write(iod->grf, RK3568_PMU_GRF_IO_VSEL2, val0);
55505+		regmap_write(iod->grf, RK3568_PMU_GRF_IO_VSEL2, val1);
55506+		break;
55507+	case 3: /* vccio2 */
55508+		break;
55509+	case 2: /* vccio1 */
55510+	case 4: /* vccio3 */
55511+	case 5: /* vccio4 */
55512+	case 6: /* vccio5 */
55513+	case 7: /* vccio6 */
55514+	case 8: /* vccio7 */
55515+		b = supply->idx - 1;
55516+		val0 = BIT(16 + b) | (is_3v3 ? 0 : BIT(b));
55517+		val1 = BIT(16 + b) | (is_3v3 ? BIT(b) : 0);
55518+
55519+		regmap_write(iod->grf, RK3568_PMU_GRF_IO_VSEL0, val0);
55520+		regmap_write(iod->grf, RK3568_PMU_GRF_IO_VSEL1, val1);
55521+		break;
55522+	default:
55523+		return -EINVAL;
55524+	};
55525+
55526+	return 0;
55527+}
55528+
55529 static int rockchip_iodomain_write(struct rockchip_iodomain_supply *supply,
55530 				   int uV)
55531 {
55532@@ -139,7 +186,7 @@ static int rockchip_iodomain_notify(struct notifier_block *nb,
55533 			return NOTIFY_BAD;
55534 	}
55535 
55536-	ret = rockchip_iodomain_write(supply, uV);
55537+	ret = supply->iod->write(supply, uV);
55538 	if (ret && event == REGULATOR_EVENT_PRE_VOLTAGE_CHANGE)
55539 		return NOTIFY_BAD;
55540 
55541@@ -401,6 +448,21 @@ static const struct rockchip_iodomain_soc_data soc_data_rk3399_pmu = {
55542 	.init = rk3399_pmu_iodomain_init,
55543 };
55544 
55545+static const struct rockchip_iodomain_soc_data soc_data_rk3568_pmu = {
55546+	.grf_offset = 0x140,
55547+	.supply_names = {
55548+		"pmuio1",
55549+		"pmuio2",
55550+		"vccio1",
55551+		"vccio2",
55552+		"vccio3",
55553+		"vccio4",
55554+		"vccio5",
55555+		"vccio6",
55556+		"vccio7",
55557+	},
55558+};
55559+
55560 static const struct rockchip_iodomain_soc_data soc_data_rv1108 = {
55561 	.grf_offset = 0x404,
55562 	.supply_names = {
55563@@ -431,6 +493,22 @@ static const struct rockchip_iodomain_soc_data soc_data_rv1108_pmu = {
55564 	},
55565 };
55566 
55567+static const struct rockchip_iodomain_soc_data soc_data_rv1126_pmu = {
55568+	.grf_offset = 0x140,
55569+	.supply_names = {
55570+		NULL,
55571+		"vccio1",
55572+		"vccio2",
55573+		"vccio3",
55574+		"vccio4",
55575+		"vccio5",
55576+		"vccio6",
55577+		"vccio7",
55578+		"pmuio0",
55579+		"pmuio1",
55580+	},
55581+};
55582+
55583 static const struct of_device_id rockchip_iodomain_match[] = {
55584 	{
55585 		.compatible = "rockchip,px30-io-voltage-domain",
55586@@ -472,6 +550,10 @@ static const struct of_device_id rockchip_iodomain_match[] = {
55587 		.compatible = "rockchip,rk3399-pmu-io-voltage-domain",
55588 		.data = &soc_data_rk3399_pmu
55589 	},
55590+	{
55591+		.compatible = "rockchip,rk3568-pmu-io-voltage-domain",
55592+		.data = &soc_data_rk3568_pmu
55593+	},
55594 	{
55595 		.compatible = "rockchip,rv1108-io-voltage-domain",
55596 		.data = &soc_data_rv1108
55597@@ -480,6 +562,10 @@ static const struct of_device_id rockchip_iodomain_match[] = {
55598 		.compatible = "rockchip,rv1108-pmu-io-voltage-domain",
55599 		.data = &soc_data_rv1108_pmu
55600 	},
55601+	{
55602+		.compatible = "rockchip,rv1126-pmu-io-voltage-domain",
55603+		.data = &soc_data_rv1126_pmu
55604+	},
55605 	{ /* sentinel */ },
55606 };
55607 MODULE_DEVICE_TABLE(of, rockchip_iodomain_match);
55608@@ -505,6 +591,11 @@ static int rockchip_iodomain_probe(struct platform_device *pdev)
55609 	match = of_match_node(rockchip_iodomain_match, np);
55610 	iod->soc_data = match->data;
55611 
55612+	if (match->data == &soc_data_rk3568_pmu)
55613+		iod->write = rk3568_pmu_iodomain_write;
55614+	else
55615+		iod->write = rockchip_iodomain_write;
55616+
55617 	parent = pdev->dev.parent;
55618 	if (parent && parent->of_node) {
55619 		iod->grf = syscon_node_to_regmap(parent->of_node);
55620@@ -565,7 +656,7 @@ static int rockchip_iodomain_probe(struct platform_device *pdev)
55621 		supply->reg = reg;
55622 		supply->nb.notifier_call = rockchip_iodomain_notify;
55623 
55624-		ret = rockchip_iodomain_write(supply, uV);
55625+		ret = iod->write(supply, uV);
55626 		if (ret) {
55627 			supply->reg = NULL;
55628 			goto unreg_notify;
55629diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
55630index 54eb6cfc5..f0484cf2e 100644
55631--- a/drivers/soc/rockchip/pm_domains.c
55632+++ b/drivers/soc/rockchip/pm_domains.c
55633@@ -5,6 +5,7 @@
55634  * Copyright (c) 2015 ROCKCHIP, Co. Ltd.
55635  */
55636 
55637+#include <linux/module.h>
55638 #include <linux/io.h>
55639 #include <linux/iopoll.h>
55640 #include <linux/err.h>
55641@@ -15,8 +16,15 @@
55642 #include <linux/of_platform.h>
55643 #include <linux/clk.h>
55644 #include <linux/regmap.h>
55645+#include <linux/slab.h>
55646 #include <linux/mfd/syscon.h>
55647+#include <linux/pm_runtime.h>
55648+#include <linux/regulator/consumer.h>
55649+#include <soc/rockchip/pm_domains.h>
55650+#include <soc/rockchip/rockchip_dmc.h>
55651 #include <dt-bindings/power/px30-power.h>
55652+#include <dt-bindings/power/rv1126-power.h>
55653+#include <dt-bindings/power/rk1808-power.h>
55654 #include <dt-bindings/power/rk3036-power.h>
55655 #include <dt-bindings/power/rk3066-power.h>
55656 #include <dt-bindings/power/rk3128-power.h>
55657@@ -27,8 +35,11 @@
55658 #include <dt-bindings/power/rk3366-power.h>
55659 #include <dt-bindings/power/rk3368-power.h>
55660 #include <dt-bindings/power/rk3399-power.h>
55661+#include <dt-bindings/power/rk3568-power.h>
55662+#include <dt-bindings/power/rk3588-power.h>
55663 
55664 struct rockchip_domain_info {
55665+	const char *name;
55666 	int pwr_mask;
55667 	int status_mask;
55668 	int req_mask;
55669@@ -37,6 +48,10 @@ struct rockchip_domain_info {
55670 	bool active_wakeup;
55671 	int pwr_w_mask;
55672 	int req_w_mask;
55673+	int repair_status_mask;
55674+	bool keepon_startup;
55675+	u32 pwr_offset;
55676+	u32 req_offset;
55677 };
55678 
55679 struct rockchip_pmu_info {
55680@@ -45,6 +60,7 @@ struct rockchip_pmu_info {
55681 	u32 req_offset;
55682 	u32 idle_offset;
55683 	u32 ack_offset;
55684+	u32 repair_status_offset;
55685 
55686 	u32 core_pwrcnt_offset;
55687 	u32 gpu_pwrcnt_offset;
55688@@ -72,6 +88,9 @@ struct rockchip_pm_domain {
55689 	u32 *qos_save_regs[MAX_QOS_REGS_NUM];
55690 	int num_clks;
55691 	struct clk_bulk_data *clks;
55692+	bool is_ignore_pwr;
55693+	bool is_qos_saved;
55694+	struct regulator *supply;
55695 };
55696 
55697 struct rockchip_pmu {
55698@@ -83,32 +102,89 @@ struct rockchip_pmu {
55699 	struct generic_pm_domain *domains[];
55700 };
55701 
55702+static struct rockchip_pmu *g_pmu;
55703+static bool pm_domain_always_on;
55704+
55705+module_param_named(always_on, pm_domain_always_on, bool, 0644);
55706+MODULE_PARM_DESC(always_on,
55707+		 "Always keep pm domains power on except for system suspend.");
55708+
55709+static void rockchip_pmu_lock(struct rockchip_pm_domain *pd)
55710+{
55711+	mutex_lock(&pd->pmu->mutex);
55712+	rockchip_dmcfreq_lock_nested();
55713+}
55714+
55715+static void rockchip_pmu_unlock(struct rockchip_pm_domain *pd)
55716+{
55717+	rockchip_dmcfreq_unlock();
55718+	mutex_unlock(&pd->pmu->mutex);
55719+}
55720+
55721 #define to_rockchip_pd(gpd) container_of(gpd, struct rockchip_pm_domain, genpd)
55722 
55723-#define DOMAIN(pwr, status, req, idle, ack, wakeup)	\
55724+#define DOMAIN(_name, pwr, status, req, idle, ack, wakeup, keepon)	\
55725 {							\
55726+	.name = _name,					\
55727 	.pwr_mask = (pwr),				\
55728 	.status_mask = (status),			\
55729 	.req_mask = (req),				\
55730 	.idle_mask = (idle),				\
55731 	.ack_mask = (ack),				\
55732 	.active_wakeup = (wakeup),			\
55733+	.keepon_startup = (keepon),			\
55734+}
55735+
55736+#define DOMAIN_M(_name, pwr, status, req, idle, ack, wakeup, keepon)	\
55737+{							\
55738+	.name = _name,					\
55739+	.pwr_w_mask = (pwr) << 16,			\
55740+	.pwr_mask = (pwr),				\
55741+	.status_mask = (status),			\
55742+	.req_w_mask = (req) << 16,			\
55743+	.req_mask = (req),				\
55744+	.idle_mask = (idle),				\
55745+	.ack_mask = (ack),				\
55746+	.active_wakeup = wakeup,			\
55747+	.keepon_startup = keepon,			\
55748+}
55749+
55750+#define DOMAIN_M_O(_name, pwr, status, p_offset, req, idle, ack, r_offset, wakeup, keepon)	\
55751+{							\
55752+	.name = _name,					\
55753+	.pwr_w_mask = (pwr) << 16,			\
55754+	.pwr_mask = (pwr),				\
55755+	.status_mask = (status),			\
55756+	.req_w_mask = (req) << 16,			\
55757+	.req_mask = (req),				\
55758+	.idle_mask = (idle),				\
55759+	.ack_mask = (ack),				\
55760+	.active_wakeup = wakeup,			\
55761+	.keepon_startup = keepon,			\
55762+	.pwr_offset = p_offset,				\
55763+	.req_offset = r_offset,				\
55764 }
55765 
55766-#define DOMAIN_M(pwr, status, req, idle, ack, wakeup)	\
55767+#define DOMAIN_M_O_R(_name, p_offset, pwr, status, r_status, r_offset, req, idle, ack, wakeup, keepon)	\
55768 {							\
55769+	.name = _name,					\
55770+	.pwr_offset = p_offset,				\
55771 	.pwr_w_mask = (pwr) << 16,			\
55772 	.pwr_mask = (pwr),				\
55773 	.status_mask = (status),			\
55774+	.repair_status_mask = (r_status),		\
55775+	.req_offset = r_offset,				\
55776 	.req_w_mask = (req) << 16,			\
55777 	.req_mask = (req),				\
55778 	.idle_mask = (idle),				\
55779 	.ack_mask = (ack),				\
55780 	.active_wakeup = wakeup,			\
55781+	.keepon_startup = keepon,			\
55782 }
55783 
55784-#define DOMAIN_RK3036(req, ack, idle, wakeup)		\
55785+#define DOMAIN_RK3036(_name, req, ack, idle, wakeup)	\
55786 {							\
55787+	.name = _name,					\
55788 	.req_mask = (req),				\
55789 	.req_w_mask = (req) << 16,			\
55790 	.ack_mask = (ack),				\
55791@@ -116,20 +192,53 @@ struct rockchip_pmu {
55792 	.active_wakeup = wakeup,			\
55793 }
55794 
55795-#define DOMAIN_PX30(pwr, status, req, wakeup)		\
55796-	DOMAIN_M(pwr, status, req, (req) << 16, req, wakeup)
55797+#define DOMAIN_PX30(name, pwr, status, req, wakeup)		\
55798+	DOMAIN_M(name, pwr, status, req, (req) << 16, req, wakeup, false)
55799+
55800+#define DOMAIN_PX30_PROTECT(name, pwr, status, req, wakeup)	\
55801+	DOMAIN_M(name, pwr, status, req, (req) << 16, req, wakeup, true)
55802+
55803+#define DOMAIN_RV1126(name, pwr, req, idle, wakeup)		\
55804+	DOMAIN_M(name, pwr, pwr, req, idle, idle, wakeup, false)
55805+
55806+#define DOMAIN_RV1126_PROTECT(name, pwr, req, idle, wakeup)	\
55807+	DOMAIN_M(name, pwr, pwr, req, idle, idle, wakeup, true)
55808+
55809+#define DOMAIN_RV1126_O(name, pwr, req, idle, r_offset, wakeup)	\
55810+	DOMAIN_M_O(name, pwr, pwr, 0, req, idle, idle, r_offset, wakeup, false)
55811+
55812+#define DOMAIN_RK3288(name, pwr, status, req, wakeup)		\
55813+	DOMAIN(name, pwr, status, req, req, (req) << 16, wakeup, false)
55814+
55815+#define DOMAIN_RK3288_PROTECT(name, pwr, status, req, wakeup)	\
55816+	DOMAIN(name, pwr, status, req, req, (req) << 16, wakeup, true)
55817 
55818-#define DOMAIN_RK3288(pwr, status, req, wakeup)		\
55819-	DOMAIN(pwr, status, req, req, (req) << 16, wakeup)
55820+#define DOMAIN_RK3328(name, pwr, status, req, wakeup)		\
55821+	DOMAIN_M(name, pwr, pwr, req, (req) << 10, req, wakeup, false)
55822 
55823-#define DOMAIN_RK3328(pwr, status, req, wakeup)		\
55824-	DOMAIN_M(pwr, pwr, req, (req) << 10, req, wakeup)
55825+#define DOMAIN_RK3368(name, pwr, status, req, wakeup)		\
55826+	DOMAIN(name, pwr, status, req, (req) << 16, req, wakeup, false)
55827 
55828-#define DOMAIN_RK3368(pwr, status, req, wakeup)		\
55829-	DOMAIN(pwr, status, req, (req) << 16, req, wakeup)
55830+#define DOMAIN_RK3368_PROTECT(name, pwr, status, req, wakeup)	\
55831+	DOMAIN(name, pwr, status, req, (req) << 16, req, wakeup, true)
55832 
55833-#define DOMAIN_RK3399(pwr, status, req, wakeup)		\
55834-	DOMAIN(pwr, status, req, req, req, wakeup)
55835+#define DOMAIN_RK3399(name, pwr, status, req, wakeup)		\
55836+	DOMAIN(name, pwr, status, req, req, req, wakeup, false)
55837+
55838+#define DOMAIN_RK3399_PROTECT(name, pwr, status, req, wakeup)	\
55839+	DOMAIN(name, pwr, status, req, req, req, wakeup, true)
55840+
55841+#define DOMAIN_RK3568(name, pwr, req, wakeup)			\
55842+	DOMAIN_M(name, pwr, pwr, req, req, req, wakeup, false)
55843+
55844+#define DOMAIN_RK3568_PROTECT(name, pwr, req, wakeup)		\
55845+	DOMAIN_M(name, pwr, pwr, req, req, req, wakeup, true)
55846+
55847+#define DOMAIN_RK3588(name, p_offset, pwr, status, r_status, r_offset, req, idle, wakeup)	\
55848+	DOMAIN_M_O_R(name, p_offset, pwr, status, r_status, r_offset, req, idle, idle, wakeup, false)
55849+
55850+#define DOMAIN_RK3588_P(name, p_offset, pwr, status, r_status, r_offset, req, idle, wakeup)	\
55851+	DOMAIN_M_O_R(name, p_offset, pwr, status, r_status, r_offset, req, idle, idle, wakeup, true)
55852 
55853 static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
55854 {
55855@@ -155,20 +264,25 @@ static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
55856 	const struct rockchip_domain_info *pd_info = pd->info;
55857 	struct generic_pm_domain *genpd = &pd->genpd;
55858 	struct rockchip_pmu *pmu = pd->pmu;
55859+	u32 pd_req_offset = 0;
55860 	unsigned int target_ack;
55861 	unsigned int val;
55862 	bool is_idle;
55863-	int ret;
55864+	int ret = 0;
55865+
55866+	if (pd_info->req_offset)
55867+		pd_req_offset = pd_info->req_offset;
55868 
55869 	if (pd_info->req_mask == 0)
55870 		return 0;
55871 	else if (pd_info->req_w_mask)
55872-		regmap_write(pmu->regmap, pmu->info->req_offset,
55873+		regmap_write(pmu->regmap, pmu->info->req_offset + pd_req_offset,
55874 			     idle ? (pd_info->req_mask | pd_info->req_w_mask) :
55875 			     pd_info->req_w_mask);
55876 	else
55877-		regmap_update_bits(pmu->regmap, pmu->info->req_offset,
55878-				   pd_info->req_mask, idle ? -1U : 0);
55879+		regmap_update_bits(pmu->regmap, pmu->info->req_offset +
55880+				   pd_req_offset, pd_info->req_mask,
55881+				   idle ? -1U : 0);
55882 
55883 	dsb(sy);
55884 
55885@@ -179,23 +293,49 @@ static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
55886 					0, 10000);
55887 	if (ret) {
55888 		dev_err(pmu->dev,
55889-			"failed to get ack on domain '%s', val=0x%x\n",
55890-			genpd->name, val);
55891-		return ret;
55892+			"failed to get ack on domain '%s', target_idle = %d, target_ack = %d, val=0x%x\n",
55893+			genpd->name, idle, target_ack, val);
55894+		goto error;
55895 	}
55896 
55897 	ret = readx_poll_timeout_atomic(rockchip_pmu_domain_is_idle, pd,
55898 					is_idle, is_idle == idle, 0, 10000);
55899 	if (ret) {
55900 		dev_err(pmu->dev,
55901-			"failed to set idle on domain '%s', val=%d\n",
55902-			genpd->name, is_idle);
55903-		return ret;
55904+			"failed to set idle on domain '%s',  target_idle = %d, val=%d\n",
55905+			genpd->name, idle, is_idle);
55906+		goto error;
55907 	}
55908 
55909-	return 0;
55910+	return ret;
55911+error:
55912+	panic("panic_on_set_idle set ...\n");
55913+	return ret;
55914 }
55915 
55916+int rockchip_pmu_idle_request(struct device *dev, bool idle)
55917+{
55918+	struct generic_pm_domain *genpd;
55919+	struct rockchip_pm_domain *pd;
55920+	int ret;
55921+
55922+	if (IS_ERR_OR_NULL(dev))
55923+		return -EINVAL;
55924+
55925+	if (IS_ERR_OR_NULL(dev->pm_domain))
55926+		return -EINVAL;
55927+
55928+	genpd = pd_to_genpd(dev->pm_domain);
55929+	pd = to_rockchip_pd(genpd);
55930+
55931+	rockchip_pmu_lock(pd);
55932+	ret = rockchip_pmu_set_idle_request(pd, idle);
55933+	rockchip_pmu_unlock(pd);
55934+
55935+	return ret;
55936+}
55937+EXPORT_SYMBOL(rockchip_pmu_idle_request);
55938+
55939 static int rockchip_pmu_save_qos(struct rockchip_pm_domain *pd)
55940 {
55941 	int i;
55942@@ -245,11 +385,63 @@ static int rockchip_pmu_restore_qos(struct rockchip_pm_domain *pd)
55943 	return 0;
55944 }
55945 
55946+int rockchip_save_qos(struct device *dev)
55947+{
55948+	struct generic_pm_domain *genpd;
55949+	struct rockchip_pm_domain *pd;
55950+	int ret;
55951+
55952+	if (IS_ERR_OR_NULL(dev))
55953+		return -EINVAL;
55954+
55955+	if (IS_ERR_OR_NULL(dev->pm_domain))
55956+		return -EINVAL;
55957+
55958+	genpd = pd_to_genpd(dev->pm_domain);
55959+	pd = to_rockchip_pd(genpd);
55960+
55961+	rockchip_pmu_lock(pd);
55962+	ret = rockchip_pmu_save_qos(pd);
55963+	rockchip_pmu_unlock(pd);
55964+
55965+	return ret;
55966+}
55967+EXPORT_SYMBOL(rockchip_save_qos);
55968+
55969+int rockchip_restore_qos(struct device *dev)
55970+{
55971+	struct generic_pm_domain *genpd;
55972+	struct rockchip_pm_domain *pd;
55973+	int ret;
55974+
55975+	if (IS_ERR_OR_NULL(dev))
55976+		return -EINVAL;
55977+
55978+	if (IS_ERR_OR_NULL(dev->pm_domain))
55979+		return -EINVAL;
55980+
55981+	genpd = pd_to_genpd(dev->pm_domain);
55982+	pd = to_rockchip_pd(genpd);
55983+
55984+	rockchip_pmu_lock(pd);
55985+	ret = rockchip_pmu_restore_qos(pd);
55986+	rockchip_pmu_unlock(pd);
55987+
55988+	return ret;
55989+}
55990+EXPORT_SYMBOL(rockchip_restore_qos);
55991+
55992 static bool rockchip_pmu_domain_is_on(struct rockchip_pm_domain *pd)
55993 {
55994 	struct rockchip_pmu *pmu = pd->pmu;
55995 	unsigned int val;
55996 
55997+	if (pd->info->repair_status_mask) {
55998+		regmap_read(pmu->regmap, pmu->info->repair_status_offset, &val);
55999+		/* 1'b1: power on, 1'b0: power off */
56000+		return val & pd->info->repair_status_mask;
56001+	}
56002+
56003 	/* check idle status for idle-only domains */
56004 	if (pd->info->status_mask == 0)
56005 		return !rockchip_pmu_domain_is_idle(pd);
56006@@ -260,76 +452,131 @@ static bool rockchip_pmu_domain_is_on(struct rockchip_pm_domain *pd)
56007 	return !(val & pd->info->status_mask);
56008 }
56009 
56010-static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
56011-					     bool on)
56012+static int rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
56013+					    bool on)
56014 {
56015 	struct rockchip_pmu *pmu = pd->pmu;
56016 	struct generic_pm_domain *genpd = &pd->genpd;
56017+	u32 pd_pwr_offset = 0;
56018 	bool is_on;
56019+	int ret = 0;
56020+
56021+	if (pd->info->pwr_offset)
56022+		pd_pwr_offset = pd->info->pwr_offset;
56023 
56024 	if (pd->info->pwr_mask == 0)
56025-		return;
56026+		return 0;
56027 	else if (pd->info->pwr_w_mask)
56028-		regmap_write(pmu->regmap, pmu->info->pwr_offset,
56029+		regmap_write(pmu->regmap, pmu->info->pwr_offset + pd_pwr_offset,
56030 			     on ? pd->info->pwr_w_mask :
56031 			     (pd->info->pwr_mask | pd->info->pwr_w_mask));
56032 	else
56033-		regmap_update_bits(pmu->regmap, pmu->info->pwr_offset,
56034-				   pd->info->pwr_mask, on ? 0 : -1U);
56035+		regmap_update_bits(pmu->regmap, pmu->info->pwr_offset +
56036+				   pd_pwr_offset, pd->info->pwr_mask,
56037+				   on ? 0 : -1U);
56038 
56039 	dsb(sy);
56040 
56041-	if (readx_poll_timeout_atomic(rockchip_pmu_domain_is_on, pd, is_on,
56042-				      is_on == on, 0, 10000)) {
56043+	ret = readx_poll_timeout_atomic(rockchip_pmu_domain_is_on, pd, is_on,
56044+					is_on == on, 0, 10000);
56045+	if (ret) {
56046 		dev_err(pmu->dev,
56047-			"failed to set domain '%s', val=%d\n",
56048-			genpd->name, is_on);
56049-		return;
56050+			"failed to set domain '%s', target_on= %d, val=%d\n",
56051+			genpd->name, on, is_on);
56052+			goto error;
56053 	}
56054+	return ret;
56055+
56056+error:
56057+	panic("panic_on_set_domain set ...\n");
56058+	return ret;
56059 }
56060 
56061 static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
56062 {
56063 	struct rockchip_pmu *pmu = pd->pmu;
56064-	int ret;
56065+	int ret = 0;
56066+	struct generic_pm_domain *genpd = &pd->genpd;
56067+
56068+	if (pm_domain_always_on && !power_on)
56069+		return 0;
56070 
56071-	mutex_lock(&pmu->mutex);
56072+	rockchip_pmu_lock(pd);
56073 
56074 	if (rockchip_pmu_domain_is_on(pd) != power_on) {
56075+		if (IS_ERR_OR_NULL(pd->supply) &&
56076+		    PTR_ERR(pd->supply) != -ENODEV)
56077+			pd->supply = devm_regulator_get_optional(pd->pmu->dev,
56078+								 genpd->name);
56079+
56080+		if (power_on && !IS_ERR(pd->supply)) {
56081+			ret = regulator_enable(pd->supply);
56082+			if (ret < 0) {
56083+				dev_err(pd->pmu->dev, "failed to set vdd supply enable '%s',\n",
56084+					genpd->name);
56085+				rockchip_pmu_unlock(pd);
56086+				return ret;
56087+			}
56088+		}
56089+
56090 		ret = clk_bulk_enable(pd->num_clks, pd->clks);
56091 		if (ret < 0) {
56092 			dev_err(pmu->dev, "failed to enable clocks\n");
56093-			mutex_unlock(&pmu->mutex);
56094+			rockchip_pmu_unlock(pd);
56095 			return ret;
56096 		}
56097 
56098 		if (!power_on) {
56099 			rockchip_pmu_save_qos(pd);
56100+			pd->is_qos_saved = true;
56101 
56102 			/* if powering down, idle request to NIU first */
56103-			rockchip_pmu_set_idle_request(pd, true);
56104+			ret = rockchip_pmu_set_idle_request(pd, true);
56105+			if (ret) {
56106+				dev_err(pd->pmu->dev, "failed to set idle request '%s',\n",
56107+					genpd->name);
56108+				goto out;
56109+			}
56110 		}
56111 
56112-		rockchip_do_pmu_set_power_domain(pd, power_on);
56113+		ret = rockchip_do_pmu_set_power_domain(pd, power_on);
56114+		if (ret) {
56115+			dev_err(pd->pmu->dev, "failed to set power '%s' = %d,\n",
56116+				genpd->name, power_on);
56117+			goto out;
56118+		}
56119 
56120 		if (power_on) {
56121 			/* if powering up, leave idle mode */
56122-			rockchip_pmu_set_idle_request(pd, false);
56123+			ret = rockchip_pmu_set_idle_request(pd, false);
56124+			if (ret) {
56125+				dev_err(pd->pmu->dev, "failed to set deidle request '%s',\n",
56126+					genpd->name);
56127+				goto out;
56128+			}
56129 
56130-			rockchip_pmu_restore_qos(pd);
56131+			if (pd->is_qos_saved)
56132+				rockchip_pmu_restore_qos(pd);
56133 		}
56134 
56135+out:
56136 		clk_bulk_disable(pd->num_clks, pd->clks);
56137+
56138+		if (!power_on && !IS_ERR(pd->supply))
56139+			ret = regulator_disable(pd->supply);
56140 	}
56141 
56142-	mutex_unlock(&pmu->mutex);
56143-	return 0;
56144+	rockchip_pmu_unlock(pd);
56145+	return ret;
56146 }
56147 
56148 static int rockchip_pd_power_on(struct generic_pm_domain *domain)
56149 {
56150 	struct rockchip_pm_domain *pd = to_rockchip_pd(domain);
56151 
56152+	if (pd->is_ignore_pwr)
56153+		return 0;
56154+
56155 	return rockchip_pd_power(pd, true);
56156 }
56157 
56158@@ -337,8 +584,70 @@ static int rockchip_pd_power_off(struct generic_pm_domain *domain)
56159 {
56160 	struct rockchip_pm_domain *pd = to_rockchip_pd(domain);
56161 
56162+	if (pd->is_ignore_pwr)
56163+		return 0;
56164+
56165+	return rockchip_pd_power(pd, false);
56166+}
56167+
56168+int rockchip_pmu_pd_on(struct device *dev)
56169+{
56170+	struct generic_pm_domain *genpd;
56171+	struct rockchip_pm_domain *pd;
56172+
56173+	if (IS_ERR_OR_NULL(dev))
56174+		return -EINVAL;
56175+
56176+	if (IS_ERR_OR_NULL(dev->pm_domain))
56177+		return -EINVAL;
56178+
56179+	genpd = pd_to_genpd(dev->pm_domain);
56180+	pd = to_rockchip_pd(genpd);
56181+
56182+	return rockchip_pd_power(pd, true);
56183+}
56184+EXPORT_SYMBOL(rockchip_pmu_pd_on);
56185+
56186+int rockchip_pmu_pd_off(struct device *dev)
56187+{
56188+	struct generic_pm_domain *genpd;
56189+	struct rockchip_pm_domain *pd;
56190+
56191+	if (IS_ERR_OR_NULL(dev))
56192+		return -EINVAL;
56193+
56194+	if (IS_ERR_OR_NULL(dev->pm_domain))
56195+		return -EINVAL;
56196+
56197+	genpd = pd_to_genpd(dev->pm_domain);
56198+	pd = to_rockchip_pd(genpd);
56199+
56200 	return rockchip_pd_power(pd, false);
56201 }
56202+EXPORT_SYMBOL(rockchip_pmu_pd_off);
56203+
56204+bool rockchip_pmu_pd_is_on(struct device *dev)
56205+{
56206+	struct generic_pm_domain *genpd;
56207+	struct rockchip_pm_domain *pd;
56208+	bool is_on;
56209+
56210+	if (IS_ERR_OR_NULL(dev))
56211+		return false;
56212+
56213+	if (IS_ERR_OR_NULL(dev->pm_domain))
56214+		return false;
56215+
56216+	genpd = pd_to_genpd(dev->pm_domain);
56217+	pd = to_rockchip_pd(genpd);
56218+
56219+	rockchip_pmu_lock(pd);
56220+	is_on = rockchip_pmu_domain_is_on(pd);
56221+	rockchip_pmu_unlock(pd);
56222+
56223+	return is_on;
56224+}
56225+EXPORT_SYMBOL(rockchip_pmu_pd_is_on);
56226 
56227 static int rockchip_pd_attach_dev(struct generic_pm_domain *genpd,
56228 				  struct device *dev)
56229@@ -378,15 +687,58 @@ static void rockchip_pd_detach_dev(struct generic_pm_domain *genpd,
56230 	pm_clk_destroy(dev);
56231 }
56232 
56233+static void rockchip_pd_qos_init(struct rockchip_pm_domain *pd,
56234+				 bool **qos_is_need_init)
56235+{
56236+	int i, is_pd_on;
56237+
56238+	is_pd_on = rockchip_pmu_domain_is_on(pd);
56239+	if (!is_pd_on)
56240+		rockchip_pd_power(pd, true);
56241+
56242+	for (i = 0; i < pd->num_qos; i++) {
56243+		if (qos_is_need_init[0][i])
56244+			regmap_write(pd->qos_regmap[i],
56245+				     QOS_PRIORITY,
56246+				     pd->qos_save_regs[0][i]);
56247+
56248+		if (qos_is_need_init[1][i])
56249+			regmap_write(pd->qos_regmap[i],
56250+				     QOS_MODE,
56251+				     pd->qos_save_regs[1][i]);
56252+
56253+		if (qos_is_need_init[2][i])
56254+			regmap_write(pd->qos_regmap[i],
56255+				     QOS_BANDWIDTH,
56256+				     pd->qos_save_regs[2][i]);
56257+
56258+		if (qos_is_need_init[3][i])
56259+			regmap_write(pd->qos_regmap[i],
56260+				     QOS_SATURATION,
56261+				     pd->qos_save_regs[3][i]);
56262+
56263+		if (qos_is_need_init[4][i])
56264+			regmap_write(pd->qos_regmap[i],
56265+				     QOS_EXTCONTROL,
56266+				     pd->qos_save_regs[4][i]);
56267+	}
56268+
56269+	if (!is_pd_on)
56270+		rockchip_pd_power(pd, false);
56271+}
56272+
56273 static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
56274 				      struct device_node *node)
56275 {
56276 	const struct rockchip_domain_info *pd_info;
56277 	struct rockchip_pm_domain *pd;
56278 	struct device_node *qos_node;
56279+	int num_qos = 0, num_qos_reg = 0;
56280 	int i, j;
56281-	u32 id;
56282+	u32 id, val;
56283 	int error;
56284+	bool *qos_is_need_init[MAX_QOS_REGS_NUM] = { NULL };
56285+	bool is_qos_need_init = false;
56286 
56287 	error = of_property_read_u32(node, "reg", &id);
56288 	if (error) {
56289@@ -401,6 +753,8 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
56290 			node, id);
56291 		return -EINVAL;
56292 	}
56293+	if (pmu->genpd_data.domains[id])
56294+		return 0;
56295 
56296 	pd_info = &pmu->info->domain_info[id];
56297 	if (!pd_info) {
56298@@ -415,6 +769,8 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
56299 
56300 	pd->info = pd_info;
56301 	pd->pmu = pmu;
56302+	if (!pd_info->pwr_mask)
56303+		pd->is_ignore_pwr = true;
56304 
56305 	pd->num_clks = of_clk_get_parent_count(node);
56306 	if (pd->num_clks > 0) {
56307@@ -443,8 +799,14 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
56308 	if (error)
56309 		goto err_put_clocks;
56310 
56311-	pd->num_qos = of_count_phandle_with_args(node, "pm_qos",
56312-						 NULL);
56313+	num_qos = of_count_phandle_with_args(node, "pm_qos", NULL);
56314+
56315+	for (j = 0; j < num_qos; j++) {
56316+		qos_node = of_parse_phandle(node, "pm_qos", j);
56317+		if (qos_node && of_device_is_available(qos_node))
56318+			pd->num_qos++;
56319+		of_node_put(qos_node);
56320+	}
56321 
56322 	if (pd->num_qos > 0) {
56323 		pd->qos_regmap = devm_kcalloc(pmu->dev, pd->num_qos,
56324@@ -455,55 +817,127 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
56325 			goto err_unprepare_clocks;
56326 		}
56327 
56328-		for (j = 0; j < MAX_QOS_REGS_NUM; j++) {
56329-			pd->qos_save_regs[j] = devm_kcalloc(pmu->dev,
56330-							    pd->num_qos,
56331-							    sizeof(u32),
56332-							    GFP_KERNEL);
56333-			if (!pd->qos_save_regs[j]) {
56334-				error = -ENOMEM;
56335-				goto err_unprepare_clocks;
56336-			}
56337+		pd->qos_save_regs[0] = (u32 *)devm_kmalloc(pmu->dev,
56338+							   sizeof(u32) *
56339+							   MAX_QOS_REGS_NUM *
56340+							   pd->num_qos,
56341+							   GFP_KERNEL);
56342+		if (!pd->qos_save_regs[0]) {
56343+			error = -ENOMEM;
56344+			goto err_unprepare_clocks;
56345+		}
56346+		qos_is_need_init[0] = kzalloc(sizeof(bool) *
56347+					      MAX_QOS_REGS_NUM *
56348+					      pd->num_qos,
56349+					      GFP_KERNEL);
56350+		if (!qos_is_need_init[0]) {
56351+			error = -ENOMEM;
56352+			goto err_unprepare_clocks;
56353+		}
56354+		for (i = 1; i < MAX_QOS_REGS_NUM; i++) {
56355+			pd->qos_save_regs[i] = pd->qos_save_regs[i - 1] +
56356+					       num_qos;
56357+			qos_is_need_init[i] = qos_is_need_init[i - 1] + num_qos;
56358 		}
56359 
56360-		for (j = 0; j < pd->num_qos; j++) {
56361+		for (j = 0; j < num_qos; j++) {
56362 			qos_node = of_parse_phandle(node, "pm_qos", j);
56363 			if (!qos_node) {
56364 				error = -ENODEV;
56365 				goto err_unprepare_clocks;
56366 			}
56367-			pd->qos_regmap[j] = syscon_node_to_regmap(qos_node);
56368-			if (IS_ERR(pd->qos_regmap[j])) {
56369-				error = -ENODEV;
56370-				of_node_put(qos_node);
56371-				goto err_unprepare_clocks;
56372+			if (of_device_is_available(qos_node)) {
56373+				pd->qos_regmap[num_qos_reg] =
56374+					syscon_node_to_regmap(qos_node);
56375+				if (IS_ERR(pd->qos_regmap[num_qos_reg])) {
56376+					error = -ENODEV;
56377+					of_node_put(qos_node);
56378+					goto err_unprepare_clocks;
56379+				}
56380+				if (!of_property_read_u32(qos_node,
56381+							  "priority-init",
56382+							  &val)) {
56383+					pd->qos_save_regs[0][j] = val;
56384+					qos_is_need_init[0][j] = true;
56385+					is_qos_need_init = true;
56386+				}
56387+
56388+				if (!of_property_read_u32(qos_node,
56389+							  "mode-init",
56390+							  &val)) {
56391+					pd->qos_save_regs[1][j] = val;
56392+					qos_is_need_init[1][j] = true;
56393+					is_qos_need_init = true;
56394+				}
56395+
56396+				if (!of_property_read_u32(qos_node,
56397+							  "bandwidth-init",
56398+							  &val)) {
56399+					pd->qos_save_regs[2][j] = val;
56400+					qos_is_need_init[2][j] = true;
56401+					is_qos_need_init = true;
56402+				}
56403+
56404+				if (!of_property_read_u32(qos_node,
56405+							  "saturation-init",
56406+							  &val)) {
56407+					pd->qos_save_regs[3][j] = val;
56408+					qos_is_need_init[3][j] = true;
56409+					is_qos_need_init = true;
56410+				}
56411+
56412+				if (!of_property_read_u32(qos_node,
56413+							  "extcontrol-init",
56414+							  &val)) {
56415+					pd->qos_save_regs[4][j] = val;
56416+					qos_is_need_init[4][j] = true;
56417+					is_qos_need_init = true;
56418+				}
56419+
56420+				num_qos_reg++;
56421 			}
56422 			of_node_put(qos_node);
56423+			if (num_qos_reg > pd->num_qos)
56424+				goto err_unprepare_clocks;
56425 		}
56426 	}
56427 
56428-	error = rockchip_pd_power(pd, true);
56429-	if (error) {
56430-		dev_err(pmu->dev,
56431-			"failed to power on domain '%pOFn': %d\n",
56432-			node, error);
56433-		goto err_unprepare_clocks;
56434-	}
56435-
56436-	pd->genpd.name = node->name;
56437+	if (pd->info->name)
56438+		pd->genpd.name = pd->info->name;
56439+	else
56440+		pd->genpd.name = kbasename(node->full_name);
56441 	pd->genpd.power_off = rockchip_pd_power_off;
56442 	pd->genpd.power_on = rockchip_pd_power_on;
56443 	pd->genpd.attach_dev = rockchip_pd_attach_dev;
56444 	pd->genpd.detach_dev = rockchip_pd_detach_dev;
56445-	pd->genpd.flags = GENPD_FLAG_PM_CLK;
56446 	if (pd_info->active_wakeup)
56447 		pd->genpd.flags |= GENPD_FLAG_ACTIVE_WAKEUP;
56448-	pm_genpd_init(&pd->genpd, NULL, false);
56449+#ifndef MODULE
56450+	if (pd_info->keepon_startup) {
56451+		pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON;
56452+		if (!rockchip_pmu_domain_is_on(pd)) {
56453+			error = rockchip_pd_power(pd, true);
56454+			if (error) {
56455+				dev_err(pmu->dev,
56456+					"failed to power on domain '%s': %d\n",
56457+					node->name, error);
56458+				goto err_unprepare_clocks;
56459+			}
56460+		}
56461+	}
56462+#endif
56463+	if (is_qos_need_init)
56464+		rockchip_pd_qos_init(pd, &qos_is_need_init[0]);
56465+
56466+	kfree(qos_is_need_init[0]);
56467+
56468+	pm_genpd_init(&pd->genpd, NULL, !rockchip_pmu_domain_is_on(pd));
56469 
56470 	pmu->genpd_data.domains[id] = &pd->genpd;
56471 	return 0;
56472 
56473 err_unprepare_clocks:
56474+	kfree(qos_is_need_init[0]);
56475 	clk_bulk_unprepare(pd->num_clks, pd->clks);
56476 err_put_clocks:
56477 	clk_bulk_put(pd->num_clks, pd->clks);
56478@@ -527,9 +961,9 @@ static void rockchip_pm_remove_one_domain(struct rockchip_pm_domain *pd)
56479 	clk_bulk_put(pd->num_clks, pd->clks);
56480 
56481 	/* protect the zeroing of pm->num_clks */
56482-	mutex_lock(&pd->pmu->mutex);
56483+	rockchip_pmu_lock(pd);
56484 	pd->num_clks = 0;
56485-	mutex_unlock(&pd->pmu->mutex);
56486+	rockchip_pmu_unlock(pd);
56487 
56488 	/* devm will free our memory */
56489 }
56490@@ -566,6 +1000,7 @@ static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu,
56491 {
56492 	struct device_node *np;
56493 	struct generic_pm_domain *child_domain, *parent_domain;
56494+	struct rockchip_pm_domain *child_pd, *parent_pd;
56495 	int error;
56496 
56497 	for_each_child_of_node(parent, np) {
56498@@ -606,6 +1041,17 @@ static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu,
56499 				parent_domain->name, child_domain->name);
56500 		}
56501 
56502+		/*
56503+		 * If child_pd doesn't do idle request or power on/off,
56504+		 * parent_pd may fail to do power on/off, so if parent_pd
56505+		 * need to power on/off, child_pd can't ignore to do idle
56506+		 * request and power on/off.
56507+		 */
56508+		child_pd = to_rockchip_pd(child_domain);
56509+		parent_pd = to_rockchip_pd(parent_domain);
56510+		if (!parent_pd->is_ignore_pwr)
56511+			child_pd->is_ignore_pwr = false;
56512+
56513 		rockchip_pm_add_subdomain(pmu, np);
56514 	}
56515 
56516@@ -616,6 +1062,75 @@ static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu,
56517 	return error;
56518 }
56519 
56520+#ifndef MODULE
56521+static void rockchip_pd_keepon_do_release(struct generic_pm_domain *genpd,
56522+					  struct rockchip_pm_domain *pd)
56523+{
56524+	struct pm_domain_data *pm_data;
56525+	int enable_count;
56526+
56527+	pd->genpd.flags &= (~GENPD_FLAG_ALWAYS_ON);
56528+	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
56529+		if (!atomic_read(&pm_data->dev->power.usage_count)) {
56530+			enable_count = 0;
56531+			if (!pm_runtime_enabled(pm_data->dev)) {
56532+				pm_runtime_enable(pm_data->dev);
56533+				enable_count = 1;
56534+			}
56535+			pm_runtime_get_sync(pm_data->dev);
56536+			pm_runtime_put_sync(pm_data->dev);
56537+			if (enable_count)
56538+				pm_runtime_disable(pm_data->dev);
56539+		}
56540+	}
56541+}
56542+
56543+static int __init rockchip_pd_keepon_release(void)
56544+{
56545+	struct generic_pm_domain *genpd;
56546+	struct rockchip_pm_domain *pd;
56547+	int i;
56548+
56549+	if (!g_pmu)
56550+		return 0;
56551+
56552+	for (i = 0; i < g_pmu->genpd_data.num_domains; i++) {
56553+		genpd = g_pmu->genpd_data.domains[i];
56554+		if (genpd) {
56555+			pd = to_rockchip_pd(genpd);
56556+			if (pd->info->keepon_startup)
56557+				rockchip_pd_keepon_do_release(genpd, pd);
56558+		}
56559+	}
56560+	return 0;
56561+}
56562+late_initcall_sync(rockchip_pd_keepon_release);
56563+#endif
56564+
56565+static void __iomem *pd_base;
56566+
56567+void rockchip_dump_pmu(void)
56568+{
56569+	if (pd_base) {
56570+		pr_warn("PMU:\n");
56571+		print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
56572+			       32, 4, pd_base,
56573+			       0x100, false);
56574+	}
56575+}
56576+EXPORT_SYMBOL_GPL(rockchip_dump_pmu);
56577+
56578+static int rockchip_pmu_panic(struct notifier_block *this,
56579+			     unsigned long ev, void *ptr)
56580+{
56581+	rockchip_dump_pmu();
56582+	return NOTIFY_DONE;
56583+}
56584+
56585+static struct notifier_block pmu_panic_block = {
56586+	.notifier_call = rockchip_pmu_panic,
56587+};
56588+
56589 static int rockchip_pm_domain_probe(struct platform_device *pdev)
56590 {
56591 	struct device *dev = &pdev->dev;
56592@@ -626,6 +1141,7 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
56593 	const struct of_device_id *match;
56594 	const struct rockchip_pmu_info *pmu_info;
56595 	int error;
56596+	void __iomem *reg_base;
56597 
56598 	if (!np) {
56599 		dev_err(dev, "device tree node not found\n");
56600@@ -666,6 +1182,14 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
56601 		return PTR_ERR(pmu->regmap);
56602 	}
56603 
56604+	reg_base = of_iomap(parent->of_node, 0);
56605+	if (!reg_base) {
56606+		dev_err(dev, "%s: could not map pmu region\n", __func__);
56607+		return -ENOMEM;
56608+	}
56609+
56610+	pd_base = reg_base;
56611+
56612 	/*
56613 	 * Configure power up and down transition delays for CORE
56614 	 * and GPU domains.
56615@@ -708,6 +1232,10 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
56616 		goto err_out;
56617 	}
56618 
56619+	atomic_notifier_chain_register(&panic_notifier_list,
56620+				       &pmu_panic_block);
56621+
56622+	g_pmu = pmu;
56623 	return 0;
56624 
56625 err_out:
56626@@ -716,129 +1244,194 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
56627 }
56628 
56629 static const struct rockchip_domain_info px30_pm_domains[] = {
56630-	[PX30_PD_USB]		= DOMAIN_PX30(BIT(5),  BIT(5),  BIT(10), false),
56631-	[PX30_PD_SDCARD]	= DOMAIN_PX30(BIT(8),  BIT(8),  BIT(9),  false),
56632-	[PX30_PD_GMAC]		= DOMAIN_PX30(BIT(10), BIT(10), BIT(6),  false),
56633-	[PX30_PD_MMC_NAND]	= DOMAIN_PX30(BIT(11), BIT(11), BIT(5),  false),
56634-	[PX30_PD_VPU]		= DOMAIN_PX30(BIT(12), BIT(12), BIT(14), false),
56635-	[PX30_PD_VO]		= DOMAIN_PX30(BIT(13), BIT(13), BIT(7),  false),
56636-	[PX30_PD_VI]		= DOMAIN_PX30(BIT(14), BIT(14), BIT(8),  false),
56637-	[PX30_PD_GPU]		= DOMAIN_PX30(BIT(15), BIT(15), BIT(2),  false),
56638+	[PX30_PD_USB]		= DOMAIN_PX30("usb",        BIT(5),  BIT(5),  BIT(10), true),
56639+	[PX30_PD_SDCARD]	= DOMAIN_PX30("sdcard",     BIT(8),  BIT(8),  BIT(9),  false),
56640+	[PX30_PD_GMAC]		= DOMAIN_PX30("gmac",       BIT(10), BIT(10), BIT(6),  false),
56641+	[PX30_PD_MMC_NAND]	= DOMAIN_PX30("mmc_nand",   BIT(11), BIT(11), BIT(5),  false),
56642+	[PX30_PD_VPU]		= DOMAIN_PX30("vpu",        BIT(12), BIT(12), BIT(14), false),
56643+	[PX30_PD_VO]		= DOMAIN_PX30_PROTECT("vo", BIT(13), BIT(13), BIT(7),  false),
56644+	[PX30_PD_VI]		= DOMAIN_PX30_PROTECT("vi", BIT(14), BIT(14), BIT(8),  false),
56645+	[PX30_PD_GPU]		= DOMAIN_PX30("gpu",        BIT(15), BIT(15), BIT(2),  false),
56646+};
56647+
56648+static const struct rockchip_domain_info rv1126_pm_domains[] = {
56649+	[RV1126_PD_CRYPTO]	= DOMAIN_RV1126_O("crypto",   BIT(10), BIT(4),  BIT(20), 0x4, false),
56650+	[RV1126_PD_VEPU]	= DOMAIN_RV1126("vepu",       BIT(2),  BIT(9),  BIT(9),  false),
56651+	[RV1126_PD_VI]		= DOMAIN_RV1126("vi",         BIT(4),  BIT(6),  BIT(6),  false),
56652+	[RV1126_PD_VO]		= DOMAIN_RV1126_PROTECT("vo", BIT(5),  BIT(7),  BIT(7),  false),
56653+	[RV1126_PD_ISPP]	= DOMAIN_RV1126("ispp",       BIT(1),  BIT(8),  BIT(8),  false),
56654+	[RV1126_PD_VDPU]	= DOMAIN_RV1126("vdpu",       BIT(3),  BIT(10), BIT(10), false),
56655+	[RV1126_PD_NVM]		= DOMAIN_RV1126("nvm",        BIT(7),  BIT(11), BIT(11), false),
56656+	[RV1126_PD_SDIO]	= DOMAIN_RV1126("sdio",       BIT(8),  BIT(13), BIT(13), false),
56657+	[RV1126_PD_USB]		= DOMAIN_RV1126("usb",        BIT(9),  BIT(15), BIT(15), true),
56658+	[RV1126_PD_NPU]		= DOMAIN_RV1126_O("npu",      BIT(0),  BIT(2),  BIT(18), 0x4, false),
56659+};
56660+
56661+static const struct rockchip_domain_info rk1808_pm_domains[] = {
56662+	[RK1808_VD_NPU]		= DOMAIN_PX30("npu",         BIT(15), BIT(15), BIT(2), false),
56663+	[RK1808_PD_PCIE]	= DOMAIN_PX30("pcie",        BIT(9),  BIT(9),  BIT(4), true),
56664+	[RK1808_PD_VPU]		= DOMAIN_PX30("vpu",         BIT(13), BIT(13), BIT(7), false),
56665+	[RK1808_PD_VIO]		= DOMAIN_PX30_PROTECT("vio", BIT(14), BIT(14), BIT(8), false),
56666 };
56667 
56668 static const struct rockchip_domain_info rk3036_pm_domains[] = {
56669-	[RK3036_PD_MSCH]	= DOMAIN_RK3036(BIT(14), BIT(23), BIT(30), true),
56670-	[RK3036_PD_CORE]	= DOMAIN_RK3036(BIT(13), BIT(17), BIT(24), false),
56671-	[RK3036_PD_PERI]	= DOMAIN_RK3036(BIT(12), BIT(18), BIT(25), false),
56672-	[RK3036_PD_VIO]		= DOMAIN_RK3036(BIT(11), BIT(19), BIT(26), false),
56673-	[RK3036_PD_VPU]		= DOMAIN_RK3036(BIT(10), BIT(20), BIT(27), false),
56674-	[RK3036_PD_GPU]		= DOMAIN_RK3036(BIT(9),  BIT(21), BIT(28), false),
56675-	[RK3036_PD_SYS]		= DOMAIN_RK3036(BIT(8),  BIT(22), BIT(29), false),
56676+	[RK3036_PD_MSCH]	= DOMAIN_RK3036("msch", BIT(14), BIT(23), BIT(30), true),
56677+	[RK3036_PD_CORE]	= DOMAIN_RK3036("core", BIT(13), BIT(17), BIT(24), false),
56678+	[RK3036_PD_PERI]	= DOMAIN_RK3036("peri", BIT(12), BIT(18), BIT(25), false),
56679+	[RK3036_PD_VIO]		= DOMAIN_RK3036("vio",  BIT(11), BIT(19), BIT(26), false),
56680+	[RK3036_PD_VPU]		= DOMAIN_RK3036("vpu",  BIT(10), BIT(20), BIT(27), false),
56681+	[RK3036_PD_GPU]		= DOMAIN_RK3036("gpu",  BIT(9),  BIT(21), BIT(28), false),
56682+	[RK3036_PD_SYS]		= DOMAIN_RK3036("sys",  BIT(8),  BIT(22), BIT(29), false),
56683 };
56684 
56685 static const struct rockchip_domain_info rk3066_pm_domains[] = {
56686-	[RK3066_PD_GPU]		= DOMAIN(BIT(9), BIT(9), BIT(3), BIT(24), BIT(29), false),
56687-	[RK3066_PD_VIDEO]	= DOMAIN(BIT(8), BIT(8), BIT(4), BIT(23), BIT(28), false),
56688-	[RK3066_PD_VIO]		= DOMAIN(BIT(7), BIT(7), BIT(5), BIT(22), BIT(27), false),
56689-	[RK3066_PD_PERI]	= DOMAIN(BIT(6), BIT(6), BIT(2), BIT(25), BIT(30), false),
56690-	[RK3066_PD_CPU]		= DOMAIN(0,      BIT(5), BIT(1), BIT(26), BIT(31), false),
56691+	[RK3066_PD_GPU]		= DOMAIN("gpu",   BIT(9), BIT(9), BIT(3), BIT(24), BIT(29), false, false),
56692+	[RK3066_PD_VIDEO]	= DOMAIN("video", BIT(8), BIT(8), BIT(4), BIT(23), BIT(28), false, false),
56693+	[RK3066_PD_VIO]		= DOMAIN("vio",   BIT(7), BIT(7), BIT(5), BIT(22), BIT(27), false, true),
56694+	[RK3066_PD_PERI]	= DOMAIN("peri",  BIT(6), BIT(6), BIT(2), BIT(25), BIT(30), false, false),
56695+	[RK3066_PD_CPU]		= DOMAIN("cpu",   0,      BIT(5), BIT(1), BIT(26), BIT(31), false, false),
56696 };
56697 
56698 static const struct rockchip_domain_info rk3128_pm_domains[] = {
56699-	[RK3128_PD_CORE]	= DOMAIN_RK3288(BIT(0), BIT(0), BIT(4), false),
56700-	[RK3128_PD_MSCH]	= DOMAIN_RK3288(0,      0,      BIT(6), true),
56701-	[RK3128_PD_VIO]		= DOMAIN_RK3288(BIT(3), BIT(3), BIT(2), false),
56702-	[RK3128_PD_VIDEO]	= DOMAIN_RK3288(BIT(2), BIT(2), BIT(1), false),
56703-	[RK3128_PD_GPU]		= DOMAIN_RK3288(BIT(1), BIT(1), BIT(3), false),
56704+	[RK3128_PD_CORE]        = DOMAIN_RK3288("core",        BIT(0), BIT(0), BIT(4), false),
56705+	[RK3128_PD_MSCH]        = DOMAIN_RK3288("msch",        0,      0,      BIT(6), true),
56706+	[RK3128_PD_VIO]         = DOMAIN_RK3288_PROTECT("vio", BIT(3), BIT(3), BIT(2), false),
56707+	[RK3128_PD_VIDEO]       = DOMAIN_RK3288("video",       BIT(2), BIT(2), BIT(1), false),
56708+	[RK3128_PD_GPU]         = DOMAIN_RK3288("gpu",         BIT(1), BIT(1), BIT(3), false),
56709 };
56710 
56711 static const struct rockchip_domain_info rk3188_pm_domains[] = {
56712-	[RK3188_PD_GPU]		= DOMAIN(BIT(9), BIT(9), BIT(3), BIT(24), BIT(29), false),
56713-	[RK3188_PD_VIDEO]	= DOMAIN(BIT(8), BIT(8), BIT(4), BIT(23), BIT(28), false),
56714-	[RK3188_PD_VIO]		= DOMAIN(BIT(7), BIT(7), BIT(5), BIT(22), BIT(27), false),
56715-	[RK3188_PD_PERI]	= DOMAIN(BIT(6), BIT(6), BIT(2), BIT(25), BIT(30), false),
56716-	[RK3188_PD_CPU]		= DOMAIN(BIT(5), BIT(5), BIT(1), BIT(26), BIT(31), false),
56717+	[RK3188_PD_GPU]         = DOMAIN("gpu",   BIT(9), BIT(9), BIT(3), BIT(24), BIT(29), false, false),
56718+	[RK3188_PD_VIDEO]	= DOMAIN("video", BIT(8), BIT(8), BIT(4), BIT(23), BIT(28), false, false),
56719+	[RK3188_PD_VIO]		= DOMAIN("vio",   BIT(7), BIT(7), BIT(5), BIT(22), BIT(27), false, true),
56720+	[RK3188_PD_PERI]	= DOMAIN("peri",  BIT(6), BIT(6), BIT(2), BIT(25), BIT(30), false, false),
56721+	[RK3188_PD_CPU]		= DOMAIN("cpu",   BIT(5), BIT(5), BIT(1), BIT(26), BIT(31), false, false),
56722 };
56723 
56724 static const struct rockchip_domain_info rk3228_pm_domains[] = {
56725-	[RK3228_PD_CORE]	= DOMAIN_RK3036(BIT(0),  BIT(0),  BIT(16), true),
56726-	[RK3228_PD_MSCH]	= DOMAIN_RK3036(BIT(1),  BIT(1),  BIT(17), true),
56727-	[RK3228_PD_BUS]		= DOMAIN_RK3036(BIT(2),  BIT(2),  BIT(18), true),
56728-	[RK3228_PD_SYS]		= DOMAIN_RK3036(BIT(3),  BIT(3),  BIT(19), true),
56729-	[RK3228_PD_VIO]		= DOMAIN_RK3036(BIT(4),  BIT(4),  BIT(20), false),
56730-	[RK3228_PD_VOP]		= DOMAIN_RK3036(BIT(5),  BIT(5),  BIT(21), false),
56731-	[RK3228_PD_VPU]		= DOMAIN_RK3036(BIT(6),  BIT(6),  BIT(22), false),
56732-	[RK3228_PD_RKVDEC]	= DOMAIN_RK3036(BIT(7),  BIT(7),  BIT(23), false),
56733-	[RK3228_PD_GPU]		= DOMAIN_RK3036(BIT(8),  BIT(8),  BIT(24), false),
56734-	[RK3228_PD_PERI]	= DOMAIN_RK3036(BIT(9),  BIT(9),  BIT(25), true),
56735-	[RK3228_PD_GMAC]	= DOMAIN_RK3036(BIT(10), BIT(10), BIT(26), false),
56736+	[RK3228_PD_CORE]	= DOMAIN_RK3036("core", BIT(0),  BIT(0),  BIT(16), true),
56737+	[RK3228_PD_MSCH]	= DOMAIN_RK3036("msch", BIT(1),  BIT(1),  BIT(17), true),
56738+	[RK3228_PD_BUS]		= DOMAIN_RK3036("bus",  BIT(2),  BIT(2),  BIT(18), true),
56739+	[RK3228_PD_SYS]		= DOMAIN_RK3036("sys",  BIT(3),  BIT(3),  BIT(19), true),
56740+	[RK3228_PD_VIO]		= DOMAIN_RK3036("vio",  BIT(4),  BIT(4),  BIT(20), false),
56741+	[RK3228_PD_VOP]		= DOMAIN_RK3036("vop",  BIT(5),  BIT(5),  BIT(21), false),
56742+	[RK3228_PD_VPU]		= DOMAIN_RK3036("vpu",  BIT(6),  BIT(6),  BIT(22), false),
56743+	[RK3228_PD_RKVDEC]	= DOMAIN_RK3036("vdec", BIT(7),  BIT(7),  BIT(23), false),
56744+	[RK3228_PD_GPU]		= DOMAIN_RK3036("gpu",  BIT(8),  BIT(8),  BIT(24), false),
56745+	[RK3228_PD_PERI]	= DOMAIN_RK3036("peri", BIT(9),  BIT(9),  BIT(25), true),
56746+	[RK3228_PD_GMAC]	= DOMAIN_RK3036("gmac", BIT(10), BIT(10), BIT(26), false),
56747 };
56748 
56749 static const struct rockchip_domain_info rk3288_pm_domains[] = {
56750-	[RK3288_PD_VIO]		= DOMAIN_RK3288(BIT(7),  BIT(7),  BIT(4), false),
56751-	[RK3288_PD_HEVC]	= DOMAIN_RK3288(BIT(14), BIT(10), BIT(9), false),
56752-	[RK3288_PD_VIDEO]	= DOMAIN_RK3288(BIT(8),  BIT(8),  BIT(3), false),
56753-	[RK3288_PD_GPU]		= DOMAIN_RK3288(BIT(9),  BIT(9),  BIT(2), false),
56754+	[RK3288_PD_VIO]		= DOMAIN_RK3288_PROTECT("vio", BIT(7),  BIT(7),  BIT(4), false),
56755+	[RK3288_PD_HEVC]	= DOMAIN_RK3288("hevc",        BIT(14), BIT(10), BIT(9), false),
56756+	[RK3288_PD_VIDEO]	= DOMAIN_RK3288("video",       BIT(8),  BIT(8),  BIT(3), false),
56757+	[RK3288_PD_GPU]		= DOMAIN_RK3288("gpu",         BIT(9),  BIT(9),  BIT(2), false),
56758 };
56759 
56760 static const struct rockchip_domain_info rk3328_pm_domains[] = {
56761-	[RK3328_PD_CORE]	= DOMAIN_RK3328(0, BIT(0), BIT(0), false),
56762-	[RK3328_PD_GPU]		= DOMAIN_RK3328(0, BIT(1), BIT(1), false),
56763-	[RK3328_PD_BUS]		= DOMAIN_RK3328(0, BIT(2), BIT(2), true),
56764-	[RK3328_PD_MSCH]	= DOMAIN_RK3328(0, BIT(3), BIT(3), true),
56765-	[RK3328_PD_PERI]	= DOMAIN_RK3328(0, BIT(4), BIT(4), true),
56766-	[RK3328_PD_VIDEO]	= DOMAIN_RK3328(0, BIT(5), BIT(5), false),
56767-	[RK3328_PD_HEVC]	= DOMAIN_RK3328(0, BIT(6), BIT(6), false),
56768-	[RK3328_PD_VIO]		= DOMAIN_RK3328(0, BIT(8), BIT(8), false),
56769-	[RK3328_PD_VPU]		= DOMAIN_RK3328(0, BIT(9), BIT(9), false),
56770+	[RK3328_PD_CORE]	= DOMAIN_RK3328("core",  0, BIT(0), BIT(0), false),
56771+	[RK3328_PD_GPU]		= DOMAIN_RK3328("gpu",   0, BIT(1), BIT(1), false),
56772+	[RK3328_PD_BUS]		= DOMAIN_RK3328("bus",   0, BIT(2), BIT(2), true),
56773+	[RK3328_PD_MSCH]	= DOMAIN_RK3328("msch",  0, BIT(3), BIT(3), true),
56774+	[RK3328_PD_PERI]	= DOMAIN_RK3328("peri",  0, BIT(4), BIT(4), true),
56775+	[RK3328_PD_VIDEO]	= DOMAIN_RK3328("video", 0, BIT(5), BIT(5), false),
56776+	[RK3328_PD_HEVC]	= DOMAIN_RK3328("hevc",  0, BIT(6), BIT(6), false),
56777+	[RK3328_PD_VIO]		= DOMAIN_RK3328("vio",   0, BIT(8), BIT(8), false),
56778+	[RK3328_PD_VPU]		= DOMAIN_RK3328("vpu",   0, BIT(9), BIT(9), false),
56779 };
56780 
56781 static const struct rockchip_domain_info rk3366_pm_domains[] = {
56782-	[RK3366_PD_PERI]	= DOMAIN_RK3368(BIT(10), BIT(10), BIT(6), true),
56783-	[RK3366_PD_VIO]		= DOMAIN_RK3368(BIT(14), BIT(14), BIT(8), false),
56784-	[RK3366_PD_VIDEO]	= DOMAIN_RK3368(BIT(13), BIT(13), BIT(7), false),
56785-	[RK3366_PD_RKVDEC]	= DOMAIN_RK3368(BIT(11), BIT(11), BIT(7), false),
56786-	[RK3366_PD_WIFIBT]	= DOMAIN_RK3368(BIT(8),  BIT(8),  BIT(9), false),
56787-	[RK3366_PD_VPU]		= DOMAIN_RK3368(BIT(12), BIT(12), BIT(7), false),
56788-	[RK3366_PD_GPU]		= DOMAIN_RK3368(BIT(15), BIT(15), BIT(2), false),
56789+	[RK3366_PD_PERI]	= DOMAIN_RK3368("peri",        BIT(10), BIT(10), BIT(6), true),
56790+	[RK3366_PD_VIO]		= DOMAIN_RK3368_PROTECT("vio", BIT(14), BIT(14), BIT(8), false),
56791+	[RK3366_PD_VIDEO]	= DOMAIN_RK3368("video",       BIT(13), BIT(13), BIT(7), false),
56792+	[RK3366_PD_RKVDEC]	= DOMAIN_RK3368("rkvdec",      BIT(11), BIT(11), BIT(7), false),
56793+	[RK3366_PD_WIFIBT]	= DOMAIN_RK3368("wifibt",      BIT(8),  BIT(8),  BIT(9), false),
56794+	[RK3366_PD_VPU]		= DOMAIN_RK3368("vpu",         BIT(12), BIT(12), BIT(7), false),
56795+	[RK3366_PD_GPU]		= DOMAIN_RK3368("gpu",         BIT(15), BIT(15), BIT(2), false),
56796 };
56797 
56798 static const struct rockchip_domain_info rk3368_pm_domains[] = {
56799-	[RK3368_PD_PERI]	= DOMAIN_RK3368(BIT(13), BIT(12), BIT(6), true),
56800-	[RK3368_PD_VIO]		= DOMAIN_RK3368(BIT(15), BIT(14), BIT(8), false),
56801-	[RK3368_PD_VIDEO]	= DOMAIN_RK3368(BIT(14), BIT(13), BIT(7), false),
56802-	[RK3368_PD_GPU_0]	= DOMAIN_RK3368(BIT(16), BIT(15), BIT(2), false),
56803-	[RK3368_PD_GPU_1]	= DOMAIN_RK3368(BIT(17), BIT(16), BIT(2), false),
56804+	[RK3368_PD_PERI]	= DOMAIN_RK3368("peri",        BIT(13), BIT(12), BIT(6), true),
56805+	[RK3368_PD_VIO]		= DOMAIN_RK3368_PROTECT("vio", BIT(15), BIT(14), BIT(8), false),
56806+	[RK3368_PD_VIDEO]	= DOMAIN_RK3368("video",       BIT(14), BIT(13), BIT(7), false),
56807+	[RK3368_PD_GPU_0]	= DOMAIN_RK3368("gpu_0",       BIT(16), BIT(15), BIT(2), false),
56808+	[RK3368_PD_GPU_1]	= DOMAIN_RK3368("gpu_1",       BIT(17), BIT(16), BIT(2), false),
56809 };
56810 
56811 static const struct rockchip_domain_info rk3399_pm_domains[] = {
56812-	[RK3399_PD_TCPD0]	= DOMAIN_RK3399(BIT(8),  BIT(8),  0,	   false),
56813-	[RK3399_PD_TCPD1]	= DOMAIN_RK3399(BIT(9),  BIT(9),  0,	   false),
56814-	[RK3399_PD_CCI]		= DOMAIN_RK3399(BIT(10), BIT(10), 0,	   true),
56815-	[RK3399_PD_CCI0]	= DOMAIN_RK3399(0,	 0,	  BIT(15), true),
56816-	[RK3399_PD_CCI1]	= DOMAIN_RK3399(0,	 0,	  BIT(16), true),
56817-	[RK3399_PD_PERILP]	= DOMAIN_RK3399(BIT(11), BIT(11), BIT(1),  true),
56818-	[RK3399_PD_PERIHP]	= DOMAIN_RK3399(BIT(12), BIT(12), BIT(2),  true),
56819-	[RK3399_PD_CENTER]	= DOMAIN_RK3399(BIT(13), BIT(13), BIT(14), true),
56820-	[RK3399_PD_VIO]		= DOMAIN_RK3399(BIT(14), BIT(14), BIT(17), false),
56821-	[RK3399_PD_GPU]		= DOMAIN_RK3399(BIT(15), BIT(15), BIT(0),  false),
56822-	[RK3399_PD_VCODEC]	= DOMAIN_RK3399(BIT(16), BIT(16), BIT(3),  false),
56823-	[RK3399_PD_VDU]		= DOMAIN_RK3399(BIT(17), BIT(17), BIT(4),  false),
56824-	[RK3399_PD_RGA]		= DOMAIN_RK3399(BIT(18), BIT(18), BIT(5),  false),
56825-	[RK3399_PD_IEP]		= DOMAIN_RK3399(BIT(19), BIT(19), BIT(6),  false),
56826-	[RK3399_PD_VO]		= DOMAIN_RK3399(BIT(20), BIT(20), 0,	   false),
56827-	[RK3399_PD_VOPB]	= DOMAIN_RK3399(0,	 0,	  BIT(7),  false),
56828-	[RK3399_PD_VOPL]	= DOMAIN_RK3399(0, 	 0,	  BIT(8),  false),
56829-	[RK3399_PD_ISP0]	= DOMAIN_RK3399(BIT(22), BIT(22), BIT(9),  false),
56830-	[RK3399_PD_ISP1]	= DOMAIN_RK3399(BIT(23), BIT(23), BIT(10), false),
56831-	[RK3399_PD_HDCP]	= DOMAIN_RK3399(BIT(24), BIT(24), BIT(11), false),
56832-	[RK3399_PD_GMAC]	= DOMAIN_RK3399(BIT(25), BIT(25), BIT(23), true),
56833-	[RK3399_PD_EMMC]	= DOMAIN_RK3399(BIT(26), BIT(26), BIT(24), true),
56834-	[RK3399_PD_USB3]	= DOMAIN_RK3399(BIT(27), BIT(27), BIT(12), true),
56835-	[RK3399_PD_EDP]		= DOMAIN_RK3399(BIT(28), BIT(28), BIT(22), false),
56836-	[RK3399_PD_GIC]		= DOMAIN_RK3399(BIT(29), BIT(29), BIT(27), true),
56837-	[RK3399_PD_SD]		= DOMAIN_RK3399(BIT(30), BIT(30), BIT(28), true),
56838-	[RK3399_PD_SDIOAUDIO]	= DOMAIN_RK3399(BIT(31), BIT(31), BIT(29), true),
56839+	[RK3399_PD_TCPD0]	= DOMAIN_RK3399("tcpd0",        BIT(8),  BIT(8),  0,       false),
56840+	[RK3399_PD_TCPD1]	= DOMAIN_RK3399("tcpd1",        BIT(9),  BIT(9),  0,       false),
56841+	[RK3399_PD_CCI]		= DOMAIN_RK3399("cci",          BIT(10), BIT(10), 0,       true),
56842+	[RK3399_PD_CCI0]	= DOMAIN_RK3399("cci0",         0,       0,       BIT(15), true),
56843+	[RK3399_PD_CCI1]	= DOMAIN_RK3399("cci1",         0,       0,       BIT(16), true),
56844+	[RK3399_PD_PERILP]	= DOMAIN_RK3399("perilp",       BIT(11), BIT(11), BIT(1),  true),
56845+	[RK3399_PD_PERIHP]	= DOMAIN_RK3399("perihp",       BIT(12), BIT(12), BIT(2),  true),
56846+	[RK3399_PD_CENTER]	= DOMAIN_RK3399("center",       BIT(13), BIT(13), BIT(14), true),
56847+	[RK3399_PD_VIO]		= DOMAIN_RK3399_PROTECT("vio",  BIT(14), BIT(14), BIT(17), false),
56848+	[RK3399_PD_GPU]		= DOMAIN_RK3399("gpu",          BIT(15), BIT(15), BIT(0),  false),
56849+	[RK3399_PD_VCODEC]	= DOMAIN_RK3399("vcodec",       BIT(16), BIT(16), BIT(3),  false),
56850+	[RK3399_PD_VDU]		= DOMAIN_RK3399("vdu",          BIT(17), BIT(17), BIT(4),  false),
56851+	[RK3399_PD_RGA]		= DOMAIN_RK3399("rga",          BIT(18), BIT(18), BIT(5),  false),
56852+	[RK3399_PD_IEP]		= DOMAIN_RK3399("iep",          BIT(19), BIT(19), BIT(6),  false),
56853+	[RK3399_PD_VO]		= DOMAIN_RK3399_PROTECT("vo",   BIT(20), BIT(20), 0,       false),
56854+	[RK3399_PD_VOPB]	= DOMAIN_RK3399_PROTECT("vopb", 0,       0,       BIT(7),  false),
56855+	[RK3399_PD_VOPL]	= DOMAIN_RK3399_PROTECT("vopl", 0,       0,       BIT(8),  false),
56856+	[RK3399_PD_ISP0]	= DOMAIN_RK3399("isp0",         BIT(22), BIT(22), BIT(9),  false),
56857+	[RK3399_PD_ISP1]	= DOMAIN_RK3399("isp1",         BIT(23), BIT(23), BIT(10), false),
56858+	[RK3399_PD_HDCP]	= DOMAIN_RK3399_PROTECT("hdcp", BIT(24), BIT(24), BIT(11), false),
56859+	[RK3399_PD_GMAC]	= DOMAIN_RK3399("gmac",         BIT(25), BIT(25), BIT(23), true),
56860+	[RK3399_PD_EMMC]	= DOMAIN_RK3399("emmc",         BIT(26), BIT(26), BIT(24), true),
56861+	[RK3399_PD_USB3]	= DOMAIN_RK3399("usb3",         BIT(27), BIT(27), BIT(12), true),
56862+	[RK3399_PD_EDP]		= DOMAIN_RK3399_PROTECT("edp",  BIT(28), BIT(28), BIT(22), false),
56863+	[RK3399_PD_GIC]		= DOMAIN_RK3399("gic",          BIT(29), BIT(29), BIT(27), true),
56864+	[RK3399_PD_SD]		= DOMAIN_RK3399("sd",           BIT(30), BIT(30), BIT(28), true),
56865+	[RK3399_PD_SDIOAUDIO]	= DOMAIN_RK3399("sdioaudio",    BIT(31), BIT(31), BIT(29), true),
56866+};
56867+
56868+static const struct rockchip_domain_info rk3568_pm_domains[] = {
56869+	[RK3568_PD_NPU]		= DOMAIN_RK3568("npu",        BIT(1), BIT(2),  false),
56870+	[RK3568_PD_GPU]		= DOMAIN_RK3568("gpu",        BIT(0), BIT(1),  false),
56871+	[RK3568_PD_VI]		= DOMAIN_RK3568("vi",         BIT(6), BIT(3),  false),
56872+	[RK3568_PD_VO]		= DOMAIN_RK3568_PROTECT("vo", BIT(7), BIT(4),  false),
56873+	[RK3568_PD_RGA]		= DOMAIN_RK3568("rga",        BIT(5), BIT(5),  false),
56874+	[RK3568_PD_VPU]		= DOMAIN_RK3568("vpu",        BIT(2), BIT(6),  false),
56875+	[RK3568_PD_RKVDEC]	= DOMAIN_RK3568("rkvdec",     BIT(4), BIT(8),  false),
56876+	[RK3568_PD_RKVENC]	= DOMAIN_RK3568("rkvenc",     BIT(3), BIT(7),  false),
56877+	[RK3568_PD_PIPE]	= DOMAIN_RK3568("pipe",       BIT(8), BIT(11), false),
56878+};
56879+
56880+static const struct rockchip_domain_info rk3588_pm_domains[] = {
56881+					     /* name       p_offset pwr  status   r_status r_offset req  idle     wakeup */
56882+	[RK3588_PD_GPU]		= DOMAIN_RK3588("gpu",     0x0, BIT(0),  0,       BIT(1),  0x0, BIT(0),  BIT(0),  false),
56883+	[RK3588_PD_NPU]		= DOMAIN_RK3588("npu",     0x0, BIT(1),  BIT(1),  0,       0x0, 0,       0,       false),
56884+	[RK3588_PD_VCODEC]	= DOMAIN_RK3588("vcodec",  0x0, BIT(2),  BIT(2),  0,       0x0, 0,       0,       false),
56885+	[RK3588_PD_NPUTOP]	= DOMAIN_RK3588("nputop",  0x0, BIT(3),  0,       BIT(2),  0x0, BIT(1),  BIT(1),  false),
56886+	[RK3588_PD_NPU1]	= DOMAIN_RK3588("npu1",    0x0, BIT(4),  0,       BIT(3),  0x0, BIT(2),  BIT(2),  false),
56887+	[RK3588_PD_NPU2]	= DOMAIN_RK3588("npu2",    0x0, BIT(5),  0,       BIT(4),  0x0, BIT(3),  BIT(3),  false),
56888+	[RK3588_PD_VENC0]	= DOMAIN_RK3588("venc0",   0x0, BIT(6),  0,       BIT(5),  0x0, BIT(4),  BIT(4),  false),
56889+	[RK3588_PD_VENC1]	= DOMAIN_RK3588("venc1",   0x0, BIT(7),  0,       BIT(6),  0x0, BIT(5),  BIT(5),  false),
56890+	[RK3588_PD_RKVDEC0]	= DOMAIN_RK3588("rkvdec0", 0x0, BIT(8),  0,       BIT(7),  0x0, BIT(6),  BIT(6),  false),
56891+	[RK3588_PD_RKVDEC1]	= DOMAIN_RK3588("rkvdec1", 0x0, BIT(9),  0,       BIT(8),  0x0, BIT(7),  BIT(7),  false),
56892+	[RK3588_PD_VDPU]	= DOMAIN_RK3588("vdpu",    0x0, BIT(10), 0,       BIT(9),  0x0, BIT(8),  BIT(8),  false),
56893+	[RK3588_PD_RGA30]	= DOMAIN_RK3588("rga30",   0x0, BIT(11), 0,       BIT(10), 0x0, 0,       0,       false),
56894+	[RK3588_PD_AV1]		= DOMAIN_RK3588("av1",     0x0, BIT(12), 0,       BIT(11), 0x0, BIT(9),  BIT(9),  false),
56895+	[RK3588_PD_VI]		= DOMAIN_RK3588("vi",      0x0, BIT(13), 0,       BIT(12), 0x0, BIT(10), BIT(10), false),
56896+	[RK3588_PD_FEC]		= DOMAIN_RK3588("fec",     0x0, BIT(14), 0,       BIT(13), 0x0, 0,       0,       false),
56897+	[RK3588_PD_ISP1]	= DOMAIN_RK3588("isp1",    0x0, BIT(15), 0,       BIT(14), 0x0, BIT(11), BIT(11), false),
56898+	[RK3588_PD_RGA31]	= DOMAIN_RK3588("rga31",   0x4, BIT(0),  0,       BIT(15), 0x0, BIT(12), BIT(12), false),
56899+	[RK3588_PD_VOP]		= DOMAIN_RK3588_P("vop",   0x4, BIT(1),  0,       BIT(16), 0x0, BIT(13) | BIT(14), BIT(13) | BIT(14), false),
56900+	[RK3588_PD_VO0]		= DOMAIN_RK3588_P("vo0",   0x4, BIT(2),  0,       BIT(17), 0x0, BIT(15), BIT(15), false),
56901+	[RK3588_PD_VO1]		= DOMAIN_RK3588_P("vo1",   0x4, BIT(3),  0,       BIT(18), 0x4, BIT(0),  BIT(16), false),
56902+	[RK3588_PD_AUDIO]	= DOMAIN_RK3588("audio",   0x4, BIT(4),  0,       BIT(19), 0x4, BIT(1),  BIT(17), false),
56903+	[RK3588_PD_PHP]		= DOMAIN_RK3588("php",     0x4, BIT(5),  0,       BIT(20), 0x4, BIT(5),  BIT(21), false),
56904+	[RK3588_PD_GMAC]	= DOMAIN_RK3588("gmac",    0x4, BIT(6),  0,       BIT(21), 0x0, 0,       0,       false),
56905+	[RK3588_PD_PCIE]	= DOMAIN_RK3588("pcie",    0x4, BIT(7),  0,       BIT(22), 0x0, 0,       0,       true),
56906+	[RK3588_PD_NVM]		= DOMAIN_RK3588("nvm",     0x4, BIT(8),  BIT(24), 0,       0x4, BIT(2),  BIT(18), false),
56907+	[RK3588_PD_NVM0]	= DOMAIN_RK3588("nvm0",    0x4, BIT(9),  0,       BIT(23), 0x0, 0,       0,       false),
56908+	[RK3588_PD_SDIO]	= DOMAIN_RK3588("sdio",    0x4, BIT(10), 0,       BIT(24), 0x4, BIT(3),  BIT(19), false),
56909+	[RK3588_PD_USB]		= DOMAIN_RK3588("usb",     0x4, BIT(11), 0,       BIT(25), 0x4, BIT(4),  BIT(20), true),
56910+	[RK3588_PD_SDMMC]	= DOMAIN_RK3588("sdmmc",   0x4, BIT(13), 0,       BIT(26), 0x0, 0,       0,       false),
56911 };
56912 
56913 static const struct rockchip_pmu_info px30_pmu = {
56914@@ -852,6 +1445,28 @@ static const struct rockchip_pmu_info px30_pmu = {
56915 	.domain_info = px30_pm_domains,
56916 };
56917 
56918+static const struct rockchip_pmu_info rv1126_pmu = {
56919+	.pwr_offset = 0x110,
56920+	.status_offset = 0x108,
56921+	.req_offset = 0xc0,
56922+	.idle_offset = 0xd8,
56923+	.ack_offset = 0xd0,
56924+
56925+	.num_domains = ARRAY_SIZE(rv1126_pm_domains),
56926+	.domain_info = rv1126_pm_domains,
56927+};
56928+
56929+static const struct rockchip_pmu_info rk1808_pmu = {
56930+	.pwr_offset = 0x18,
56931+	.status_offset = 0x20,
56932+	.req_offset = 0x64,
56933+	.idle_offset = 0x6c,
56934+	.ack_offset = 0x6c,
56935+
56936+	.num_domains = ARRAY_SIZE(rk1808_pm_domains),
56937+	.domain_info = rk1808_pm_domains,
56938+};
56939+
56940 static const struct rockchip_pmu_info rk3036_pmu = {
56941 	.req_offset = 0x148,
56942 	.idle_offset = 0x14c,
56943@@ -976,11 +1591,42 @@ static const struct rockchip_pmu_info rk3399_pmu = {
56944 	.domain_info = rk3399_pm_domains,
56945 };
56946 
56947+static const struct rockchip_pmu_info rk3568_pmu = {
56948+	.pwr_offset = 0xa0,
56949+	.status_offset = 0x98,
56950+	.req_offset = 0x50,
56951+	.idle_offset = 0x68,
56952+	.ack_offset = 0x60,
56953+
56954+	.num_domains = ARRAY_SIZE(rk3568_pm_domains),
56955+	.domain_info = rk3568_pm_domains,
56956+};
56957+
56958+static const struct rockchip_pmu_info rk3588_pmu = {
56959+	.pwr_offset = 0x14c,
56960+	.status_offset = 0x180,
56961+	.req_offset = 0x10c,
56962+	.idle_offset = 0x120,
56963+	.ack_offset = 0x118,
56964+	.repair_status_offset = 0x290,
56965+
56966+	.num_domains = ARRAY_SIZE(rk3588_pm_domains),
56967+	.domain_info = rk3588_pm_domains,
56968+};
56969+
56970 static const struct of_device_id rockchip_pm_domain_dt_match[] = {
56971 	{
56972 		.compatible = "rockchip,px30-power-controller",
56973 		.data = (void *)&px30_pmu,
56974 	},
56975+	{
56976+		.compatible = "rockchip,rv1126-power-controller",
56977+		.data = (void *)&rv1126_pmu,
56978+	},
56979+	{
56980+		.compatible = "rockchip,rk1808-power-controller",
56981+		.data = (void *)&rk1808_pmu,
56982+	},
56983 	{
56984 		.compatible = "rockchip,rk3036-power-controller",
56985 		.data = (void *)&rk3036_pmu,
56986@@ -1021,8 +1667,17 @@ static const struct of_device_id rockchip_pm_domain_dt_match[] = {
56987 		.compatible = "rockchip,rk3399-power-controller",
56988 		.data = (void *)&rk3399_pmu,
56989 	},
56990+	{
56991+		.compatible = "rockchip,rk3568-power-controller",
56992+		.data = (void *)&rk3568_pmu,
56993+	},
56994+	{
56995+		.compatible = "rockchip,rk3588-power-controller",
56996+		.data = (void *)&rk3588_pmu,
56997+	},
56998 	{ /* sentinel */ },
56999 };
57000+MODULE_DEVICE_TABLE(of, rockchip_pm_domain_dt_match);
57001 
57002 static struct platform_driver rockchip_pm_domain_driver = {
57003 	.probe = rockchip_pm_domain_probe,
57004@@ -1043,3 +1698,12 @@ static int __init rockchip_pm_domain_drv_register(void)
57005 	return platform_driver_register(&rockchip_pm_domain_driver);
57006 }
57007 postcore_initcall(rockchip_pm_domain_drv_register);
57008+
57009+static void __exit rockchip_pm_domain_drv_unregister(void)
57010+{
57011+	platform_driver_unregister(&rockchip_pm_domain_driver);
57012+}
57013+module_exit(rockchip_pm_domain_drv_unregister);
57014+
57015+MODULE_DESCRIPTION("ROCKCHIP PM Domain Driver");
57016+MODULE_LICENSE("GPL");
57017diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
57018index a9f97023d..f744c64f1 100644
57019--- a/drivers/spi/spi-rockchip.c
57020+++ b/drivers/spi/spi-rockchip.c
57021@@ -10,6 +10,7 @@
57022 #include <linux/module.h>
57023 #include <linux/of.h>
57024 #include <linux/pinctrl/consumer.h>
57025+#include <linux/pinctrl/devinfo.h>
57026 #include <linux/platform_device.h>
57027 #include <linux/spi/spi.h>
57028 #include <linux/pm_runtime.h>
57029@@ -107,6 +108,8 @@
57030 #define CR0_OPM_MASTER				0x0
57031 #define CR0_OPM_SLAVE				0x1
57032 
57033+#define CR0_SOI_OFFSET				23
57034+
57035 #define CR0_MTM_OFFSET				0x21
57036 
57037 /* Bit fields in SER, 2bit */
57038@@ -116,13 +119,14 @@
57039 #define BAUDR_SCKDV_MIN				2
57040 #define BAUDR_SCKDV_MAX				65534
57041 
57042-/* Bit fields in SR, 5bit */
57043-#define SR_MASK						0x1f
57044+/* Bit fields in SR, 6bit */
57045+#define SR_MASK						0x3f
57046 #define SR_BUSY						(1 << 0)
57047 #define SR_TF_FULL					(1 << 1)
57048 #define SR_TF_EMPTY					(1 << 2)
57049 #define SR_RF_EMPTY					(1 << 3)
57050 #define SR_RF_FULL					(1 << 4)
57051+#define SR_SLAVE_TX_BUSY				(1 << 5)
57052 
57053 /* Bit fields in ISR, IMR, ISR, RISR, 5bit */
57054 #define INT_MASK					0x1f
57055@@ -130,7 +134,8 @@
57056 #define INT_TF_OVERFLOW				(1 << 1)
57057 #define INT_RF_UNDERFLOW			(1 << 2)
57058 #define INT_RF_OVERFLOW				(1 << 3)
57059-#define INT_RF_FULL					(1 << 4)
57060+#define INT_RF_FULL				(1 << 4)
57061+#define INT_CS_INACTIVE				(1 << 6)
57062 
57063 /* Bit fields in ICR, 4bit */
57064 #define ICR_MASK					0x0f
57065@@ -149,6 +154,8 @@
57066 
57067 /* sclk_out: spi master internal logic in rk3x can support 50Mhz */
57068 #define MAX_SCLK_OUT				50000000U
57069+/* max sclk of driver strength 4mA */
57070+#define IO_DRIVER_4MA_MAX_SCLK_OUT	24000000U
57071 
57072 /*
57073  * SPI_CTRLR1 is 16-bits, so we should support lengths of 0xffff + 1. However,
57074@@ -156,7 +163,8 @@
57075  */
57076 #define ROCKCHIP_SPI_MAX_TRANLEN		0xffff
57077 
57078-#define ROCKCHIP_SPI_MAX_CS_NUM			2
57079+/* 2 for native cs, 2 for cs-gpio */
57080+#define ROCKCHIP_SPI_MAX_CS_NUM			4
57081 #define ROCKCHIP_SPI_VER2_TYPE1			0x05EC0002
57082 #define ROCKCHIP_SPI_VER2_TYPE2			0x00110002
57083 
57084@@ -187,7 +195,10 @@ struct rockchip_spi {
57085 
57086 	bool cs_asserted[ROCKCHIP_SPI_MAX_CS_NUM];
57087 
57088+	struct pinctrl_state *high_speed_state;
57089 	bool slave_abort;
57090+	bool cs_inactive; /* spi slave tansmition stop when cs inactive */
57091+	struct spi_transfer *xfer; /* Store xfer temporarily */
57092 };
57093 
57094 static inline void spi_enable_chip(struct rockchip_spi *rs, bool enable)
57095@@ -195,13 +206,19 @@ static inline void spi_enable_chip(struct rockchip_spi *rs, bool enable)
57096 	writel_relaxed((enable ? 1U : 0U), rs->regs + ROCKCHIP_SPI_SSIENR);
57097 }
57098 
57099-static inline void wait_for_idle(struct rockchip_spi *rs)
57100+static inline void wait_for_tx_idle(struct rockchip_spi *rs, bool slave_mode)
57101 {
57102 	unsigned long timeout = jiffies + msecs_to_jiffies(5);
57103 
57104 	do {
57105-		if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY))
57106-			return;
57107+		if (slave_mode) {
57108+			if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_SLAVE_TX_BUSY) &&
57109+			    !((readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY)))
57110+				return;
57111+		} else {
57112+			if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY))
57113+				return;
57114+		}
57115 	} while (!time_after(jiffies, timeout));
57116 
57117 	dev_warn(rs->dev, "spi controller is in busy state!\n");
57118@@ -226,7 +243,7 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
57119 {
57120 	struct spi_controller *ctlr = spi->controller;
57121 	struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
57122-	bool cs_asserted = !enable;
57123+	bool cs_asserted = spi->mode & SPI_CS_HIGH ? enable : !enable;
57124 
57125 	/* Return immediately for no-op */
57126 	if (cs_asserted == rs->cs_asserted[spi->chip_select])
57127@@ -236,11 +253,15 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
57128 		/* Keep things powered as long as CS is asserted */
57129 		pm_runtime_get_sync(rs->dev);
57130 
57131-		ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER,
57132-				      BIT(spi->chip_select));
57133+		if (spi->cs_gpiod)
57134+			ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER, 1);
57135+		else
57136+			ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER, BIT(spi->chip_select));
57137 	} else {
57138-		ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER,
57139-				      BIT(spi->chip_select));
57140+		if (spi->cs_gpiod)
57141+			ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER, 1);
57142+		else
57143+			ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER, BIT(spi->chip_select));
57144 
57145 		/* Drop reference from when we first asserted CS */
57146 		pm_runtime_put(rs->dev);
57147@@ -259,8 +280,9 @@ static void rockchip_spi_handle_err(struct spi_controller *ctlr,
57148 	 */
57149 	spi_enable_chip(rs, false);
57150 
57151-	/* make sure all interrupts are masked */
57152+	/* make sure all interrupts are masked and status cleared */
57153 	writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
57154+	writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
57155 
57156 	if (atomic_read(&rs->state) & TXDMA)
57157 		dmaengine_terminate_async(ctlr->dma_tx);
57158@@ -327,6 +349,15 @@ static irqreturn_t rockchip_spi_isr(int irq, void *dev_id)
57159 	struct spi_controller *ctlr = dev_id;
57160 	struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
57161 
57162+	/* When int_cs_inactive comes, spi slave abort */
57163+	if (rs->cs_inactive && readl_relaxed(rs->regs + ROCKCHIP_SPI_IMR) & INT_CS_INACTIVE) {
57164+		ctlr->slave_abort(ctlr);
57165+		writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
57166+		writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
57167+
57168+		return IRQ_HANDLED;
57169+	}
57170+
57171 	if (rs->tx_left)
57172 		rockchip_spi_pio_writer(rs);
57173 
57174@@ -334,6 +365,7 @@ static irqreturn_t rockchip_spi_isr(int irq, void *dev_id)
57175 	if (!rs->rx_left) {
57176 		spi_enable_chip(rs, false);
57177 		writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
57178+		writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
57179 		spi_finalize_current_transfer(ctlr);
57180 	}
57181 
57182@@ -341,14 +373,18 @@ static irqreturn_t rockchip_spi_isr(int irq, void *dev_id)
57183 }
57184 
57185 static int rockchip_spi_prepare_irq(struct rockchip_spi *rs,
57186-		struct spi_transfer *xfer)
57187+				    struct spi_controller *ctlr,
57188+				    struct spi_transfer *xfer)
57189 {
57190 	rs->tx = xfer->tx_buf;
57191 	rs->rx = xfer->rx_buf;
57192 	rs->tx_left = rs->tx ? xfer->len / rs->n_bytes : 0;
57193 	rs->rx_left = xfer->len / rs->n_bytes;
57194 
57195-	writel_relaxed(INT_RF_FULL, rs->regs + ROCKCHIP_SPI_IMR);
57196+	if (rs->cs_inactive)
57197+		writel_relaxed(INT_RF_FULL | INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR);
57198+	else
57199+		writel_relaxed(INT_RF_FULL, rs->regs + ROCKCHIP_SPI_IMR);
57200 	spi_enable_chip(rs, true);
57201 
57202 	if (rs->tx_left)
57203@@ -367,6 +403,9 @@ static void rockchip_spi_dma_rxcb(void *data)
57204 	if (state & TXDMA && !rs->slave_abort)
57205 		return;
57206 
57207+	if (rs->cs_inactive)
57208+		writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
57209+
57210 	spi_enable_chip(rs, false);
57211 	spi_finalize_current_transfer(ctlr);
57212 }
57213@@ -381,7 +420,7 @@ static void rockchip_spi_dma_txcb(void *data)
57214 		return;
57215 
57216 	/* Wait until the FIFO data completely. */
57217-	wait_for_idle(rs);
57218+	wait_for_tx_idle(rs, ctlr->slave);
57219 
57220 	spi_enable_chip(rs, false);
57221 	spi_finalize_current_transfer(ctlr);
57222@@ -407,14 +446,16 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
57223 
57224 	atomic_set(&rs->state, 0);
57225 
57226+	rs->tx = xfer->tx_buf;
57227+	rs->rx = xfer->rx_buf;
57228+
57229 	rxdesc = NULL;
57230 	if (xfer->rx_buf) {
57231 		struct dma_slave_config rxconf = {
57232 			.direction = DMA_DEV_TO_MEM,
57233 			.src_addr = rs->dma_addr_rx,
57234 			.src_addr_width = rs->n_bytes,
57235-			.src_maxburst = rockchip_spi_calc_burst_size(xfer->len /
57236-								     rs->n_bytes),
57237+			.src_maxburst = rockchip_spi_calc_burst_size(xfer->len / rs->n_bytes),
57238 		};
57239 
57240 		dmaengine_slave_config(ctlr->dma_rx, &rxconf);
57241@@ -458,10 +499,13 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
57242 	/* rx must be started before tx due to spi instinct */
57243 	if (rxdesc) {
57244 		atomic_or(RXDMA, &rs->state);
57245-		dmaengine_submit(rxdesc);
57246+		ctlr->dma_rx->cookie = dmaengine_submit(rxdesc);
57247 		dma_async_issue_pending(ctlr->dma_rx);
57248 	}
57249 
57250+	if (rs->cs_inactive)
57251+		writel_relaxed(INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR);
57252+
57253 	spi_enable_chip(rs, true);
57254 
57255 	if (txdesc) {
57256@@ -493,6 +537,8 @@ static int rockchip_spi_config(struct rockchip_spi *rs,
57257 	cr0 |= (spi->mode & 0x3U) << CR0_SCPH_OFFSET;
57258 	if (spi->mode & SPI_LSB_FIRST)
57259 		cr0 |= CR0_FBM_LSB << CR0_FBM_OFFSET;
57260+	if (spi->mode & SPI_CS_HIGH)
57261+		cr0 |= BIT(spi->chip_select) << CR0_SOI_OFFSET;
57262 
57263 	if (xfer->rx_buf && xfer->tx_buf)
57264 		cr0 |= CR0_XFM_TR << CR0_XFM_OFFSET;
57265@@ -531,6 +577,19 @@ static int rockchip_spi_config(struct rockchip_spi *rs,
57266 			dmacr |= RF_DMA_EN;
57267 	}
57268 
57269+	/*
57270+	 * If speed is larger than IO_DRIVER_4MA_MAX_SCLK_OUT,
57271+	 * set higher driver strength.
57272+	 */
57273+	if (rs->high_speed_state) {
57274+		if (rs->freq > IO_DRIVER_4MA_MAX_SCLK_OUT)
57275+			pinctrl_select_state(rs->dev->pins->p,
57276+					     rs->high_speed_state);
57277+		else
57278+			pinctrl_select_state(rs->dev->pins->p,
57279+					     rs->dev->pins->default_state);
57280+	}
57281+
57282 	writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0);
57283 	writel_relaxed(cr1, rs->regs + ROCKCHIP_SPI_CTRLR1);
57284 
57285@@ -538,12 +597,12 @@ static int rockchip_spi_config(struct rockchip_spi *rs,
57286 	 * interrupt exactly when the fifo is full doesn't seem to work,
57287 	 * so we need the strict inequality here
57288 	 */
57289-	if (xfer->len < rs->fifo_len)
57290-		writel_relaxed(xfer->len - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
57291+	if ((xfer->len / rs->n_bytes) < rs->fifo_len)
57292+		writel_relaxed(xfer->len / rs->n_bytes - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
57293 	else
57294 		writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
57295 
57296-	writel_relaxed(rs->fifo_len / 2, rs->regs + ROCKCHIP_SPI_DMATDLR);
57297+	writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_DMATDLR);
57298 	writel_relaxed(rockchip_spi_calc_burst_size(xfer->len / rs->n_bytes) - 1,
57299 		       rs->regs + ROCKCHIP_SPI_DMARDLR);
57300 	writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR);
57301@@ -566,7 +625,43 @@ static size_t rockchip_spi_max_transfer_size(struct spi_device *spi)
57302 static int rockchip_spi_slave_abort(struct spi_controller *ctlr)
57303 {
57304 	struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
57305+	u32 rx_fifo_left;
57306+	struct dma_tx_state state;
57307+	enum dma_status status;
57308+
57309+	/* Get current dma rx point */
57310+	if (atomic_read(&rs->state) & RXDMA) {
57311+		dmaengine_pause(ctlr->dma_rx);
57312+		status = dmaengine_tx_status(ctlr->dma_rx, ctlr->dma_rx->cookie, &state);
57313+		if (status == DMA_ERROR) {
57314+			rs->rx = rs->xfer->rx_buf;
57315+			rs->xfer->len = 0;
57316+			rx_fifo_left = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
57317+			for (; rx_fifo_left; rx_fifo_left--)
57318+				readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
57319+			goto out;
57320+		} else {
57321+			rs->rx += rs->xfer->len - rs->n_bytes * state.residue;
57322+		}
57323+	}
57324 
57325+	/* Get the valid data left in rx fifo and set rs->xfer->len real rx size */
57326+	if (rs->rx) {
57327+		rx_fifo_left = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
57328+		for (; rx_fifo_left; rx_fifo_left--) {
57329+			u32 rxw = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
57330+
57331+			if (rs->n_bytes == 1)
57332+				*(u8 *)rs->rx = (u8)rxw;
57333+			else
57334+				*(u16 *)rs->rx = (u16)rxw;
57335+			rs->rx += rs->n_bytes;
57336+		}
57337+
57338+		rs->xfer->len = (unsigned int)(rs->rx - rs->xfer->rx_buf);
57339+	}
57340+
57341+out:
57342 	if (atomic_read(&rs->state) & RXDMA)
57343 		dmaengine_terminate_sync(ctlr->dma_rx);
57344 	if (atomic_read(&rs->state) & TXDMA)
57345@@ -608,7 +703,7 @@ static int rockchip_spi_transfer_one(
57346 	}
57347 
57348 	rs->n_bytes = xfer->bits_per_word <= 8 ? 1 : 2;
57349-
57350+	rs->xfer = xfer;
57351 	use_dma = ctlr->can_dma ? ctlr->can_dma(ctlr, spi, xfer) : false;
57352 
57353 	ret = rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->slave);
57354@@ -618,7 +713,7 @@ static int rockchip_spi_transfer_one(
57355 	if (use_dma)
57356 		return rockchip_spi_prepare_dma(rs, ctlr, xfer);
57357 
57358-	return rockchip_spi_prepare_irq(rs, xfer);
57359+	return rockchip_spi_prepare_irq(rs, ctlr, xfer);
57360 }
57361 
57362 static bool rockchip_spi_can_dma(struct spi_controller *ctlr,
57363@@ -635,6 +730,26 @@ static bool rockchip_spi_can_dma(struct spi_controller *ctlr,
57364 	return xfer->len / bytes_per_word >= rs->fifo_len;
57365 }
57366 
57367+static int rockchip_spi_setup(struct spi_device *spi)
57368+{
57369+	struct rockchip_spi *rs = spi_controller_get_devdata(spi->controller);
57370+	u32 cr0;
57371+
57372+	pm_runtime_get_sync(rs->dev);
57373+
57374+	cr0 = readl_relaxed(rs->regs + ROCKCHIP_SPI_CTRLR0);
57375+
57376+	cr0 |= ((spi->mode & 0x3) << CR0_SCPH_OFFSET);
57377+	if (spi->mode & SPI_CS_HIGH)
57378+		cr0 |= BIT(spi->chip_select) << CR0_SOI_OFFSET;
57379+
57380+	writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0);
57381+
57382+	pm_runtime_put(rs->dev);
57383+
57384+	return 0;
57385+}
57386+
57387 static int rockchip_spi_probe(struct platform_device *pdev)
57388 {
57389 	int ret;
57390@@ -644,6 +759,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
57391 	struct device_node *np = pdev->dev.of_node;
57392 	u32 rsd_nsecs, num_cs;
57393 	bool slave_mode;
57394+	struct pinctrl *pinctrl = NULL;
57395 
57396 	slave_mode = of_property_read_bool(np, "spi-slave");
57397 
57398@@ -760,6 +876,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
57399 	ctlr->min_speed_hz = rs->freq / BAUDR_SCKDV_MAX;
57400 	ctlr->max_speed_hz = min(rs->freq / BAUDR_SCKDV_MIN, MAX_SCLK_OUT);
57401 
57402+	ctlr->setup = rockchip_spi_setup;
57403 	ctlr->set_cs = rockchip_spi_set_cs;
57404 	ctlr->transfer_one = rockchip_spi_transfer_one;
57405 	ctlr->max_transfer_size = rockchip_spi_max_transfer_size;
57406@@ -792,6 +909,28 @@ static int rockchip_spi_probe(struct platform_device *pdev)
57407 		ctlr->can_dma = rockchip_spi_can_dma;
57408 	}
57409 
57410+	switch (readl_relaxed(rs->regs + ROCKCHIP_SPI_VERSION)) {
57411+	case ROCKCHIP_SPI_VER2_TYPE2:
57412+		ctlr->mode_bits |= SPI_CS_HIGH;
57413+		if (ctlr->can_dma && slave_mode)
57414+			rs->cs_inactive = true;
57415+		else
57416+			rs->cs_inactive = false;
57417+		break;
57418+	default:
57419+		rs->cs_inactive = false;
57420+		break;
57421+	}
57422+
57423+	pinctrl = devm_pinctrl_get(&pdev->dev);
57424+	if (!IS_ERR(pinctrl)) {
57425+		rs->high_speed_state = pinctrl_lookup_state(pinctrl, "high_speed");
57426+		if (IS_ERR_OR_NULL(rs->high_speed_state)) {
57427+			dev_warn(&pdev->dev, "no high_speed pinctrl state\n");
57428+			rs->high_speed_state = NULL;
57429+		}
57430+	}
57431+
57432 	ret = devm_spi_register_controller(&pdev->dev, ctlr);
57433 	if (ret < 0) {
57434 		dev_err(&pdev->dev, "Failed to register controller\n");
57435@@ -847,14 +986,14 @@ static int rockchip_spi_suspend(struct device *dev)
57436 {
57437 	int ret;
57438 	struct spi_controller *ctlr = dev_get_drvdata(dev);
57439+	struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
57440 
57441 	ret = spi_controller_suspend(ctlr);
57442 	if (ret < 0)
57443 		return ret;
57444 
57445-	ret = pm_runtime_force_suspend(dev);
57446-	if (ret < 0)
57447-		return ret;
57448+	clk_disable_unprepare(rs->spiclk);
57449+	clk_disable_unprepare(rs->apb_pclk);
57450 
57451 	pinctrl_pm_select_sleep_state(dev);
57452 
57453@@ -869,10 +1008,14 @@ static int rockchip_spi_resume(struct device *dev)
57454 
57455 	pinctrl_pm_select_default_state(dev);
57456 
57457-	ret = pm_runtime_force_resume(dev);
57458+	ret = clk_prepare_enable(rs->apb_pclk);
57459 	if (ret < 0)
57460 		return ret;
57461 
57462+	ret = clk_prepare_enable(rs->spiclk);
57463+	if (ret < 0)
57464+		clk_disable_unprepare(rs->apb_pclk);
57465+
57466 	ret = spi_controller_resume(ctlr);
57467 	if (ret < 0) {
57468 		clk_disable_unprepare(rs->spiclk);
57469@@ -914,7 +1057,7 @@ static int rockchip_spi_runtime_resume(struct device *dev)
57470 #endif /* CONFIG_PM */
57471 
57472 static const struct dev_pm_ops rockchip_spi_pm = {
57473-	SET_SYSTEM_SLEEP_PM_OPS(rockchip_spi_suspend, rockchip_spi_resume)
57474+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_spi_suspend, rockchip_spi_resume)
57475 	SET_RUNTIME_PM_OPS(rockchip_spi_runtime_suspend,
57476 			   rockchip_spi_runtime_resume, NULL)
57477 };
57478@@ -930,7 +1073,9 @@ static const struct of_device_id rockchip_spi_dt_match[] = {
57479 	{ .compatible = "rockchip,rk3328-spi", },
57480 	{ .compatible = "rockchip,rk3368-spi", },
57481 	{ .compatible = "rockchip,rk3399-spi", },
57482+	{ .compatible = "rockchip,rv1106-spi", },
57483 	{ .compatible = "rockchip,rv1108-spi", },
57484+	{ .compatible = "rockchip,rv1126-spi", },
57485 	{ },
57486 };
57487 MODULE_DEVICE_TABLE(of, rockchip_spi_dt_match);
57488diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
57489index aee960a7d..e08de32ea 100644
57490--- a/drivers/spi/spidev.c
57491+++ b/drivers/spi/spidev.c
57492@@ -691,6 +691,7 @@ static const struct of_device_id spidev_dt_ids[] = {
57493 	{ .compatible = "lwn,bk4" },
57494 	{ .compatible = "dh,dhcom-board" },
57495 	{ .compatible = "menlo,m53cpld" },
57496+	{ .compatible = "rockchip,spidev" },
57497 	{},
57498 };
57499 MODULE_DEVICE_TABLE(of, spidev_dt_ids);
57500diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
57501index 3b66cd0b0..ca5df9abd 100644
57502--- a/drivers/staging/android/Makefile
57503+++ b/drivers/staging/android/Makefile
57504@@ -4,3 +4,4 @@ ccflags-y += -I$(src)			# needed for trace events
57505 obj-y					+= ion/
57506 
57507 obj-$(CONFIG_ASHMEM)			+= ashmem.o
57508+obj-$(CONFIG_DEBUG_KINFO)	+= debug_kinfo.o
57509diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
57510index aa9e0e31e..a328dec78 100644
57511--- a/drivers/thermal/rockchip_thermal.c
57512+++ b/drivers/thermal/rockchip_thermal.c
57513@@ -18,6 +18,7 @@
57514 #include <linux/thermal.h>
57515 #include <linux/mfd/syscon.h>
57516 #include <linux/pinctrl/consumer.h>
57517+#include <linux/nvmem-consumer.h>
57518 
57519 /*
57520  * If the temperature over a period of time High,
57521@@ -26,7 +27,7 @@
57522  */
57523 enum tshut_mode {
57524 	TSHUT_MODE_CRU = 0,
57525-	TSHUT_MODE_GPIO,
57526+	TSHUT_MODE_OTP,
57527 };
57528 
57529 /*
57530@@ -61,22 +62,26 @@ enum adc_sort_mode {
57531 #include "thermal_hwmon.h"
57532 
57533 /**
57534- * The max sensors is two in rockchip SoCs.
57535- * Two sensors: CPU and GPU sensor.
57536+ * The max sensors is seven in rockchip SoCs.
57537  */
57538-#define SOC_MAX_SENSORS	2
57539+#define SOC_MAX_SENSORS	7
57540 
57541 /**
57542  * struct chip_tsadc_table - hold information about chip-specific differences
57543  * @id: conversion table
57544  * @length: size of conversion table
57545  * @data_mask: mask to apply on data inputs
57546+ * @kNum: linear parameter k
57547+ * @bNum: linear parameter b
57548  * @mode: sort mode of this adc variant (incrementing or decrementing)
57549  */
57550 struct chip_tsadc_table {
57551 	const struct tsadc_table *id;
57552 	unsigned int length;
57553 	u32 data_mask;
57554+	/* Tsadc is linear, using linear parameters */
57555+	int kNum;
57556+	int bNum;
57557 	enum adc_sort_mode mode;
57558 };
57559 
57560@@ -94,6 +99,8 @@ struct chip_tsadc_table {
57561  * @set_alarm_temp: set the high temperature interrupt
57562  * @set_tshut_temp: set the hardware-controlled shutdown temperature
57563  * @set_tshut_mode: set the hardware-controlled shutdown mode
57564+ * @get_trim_code: get the trim code by otp value
57565+ * @trim_temp: get trim temp by trim code
57566  * @table: the chip-specific conversion table
57567  */
57568 struct rockchip_tsadc_chip {
57569@@ -119,7 +126,11 @@ struct rockchip_tsadc_chip {
57570 			      int chn, void __iomem *reg, int temp);
57571 	int (*set_tshut_temp)(const struct chip_tsadc_table *table,
57572 			      int chn, void __iomem *reg, int temp);
57573-	void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m);
57574+	void (*set_tshut_mode)(struct regmap *grf, int chn,
57575+			       void __iomem *reg, enum tshut_mode m);
57576+	int (*get_trim_code)(struct platform_device *pdev,
57577+			     int code, int trim_base);
57578+	int (*trim_temp)(struct platform_device *pdev);
57579 
57580 	/* Per-table methods */
57581 	struct chip_tsadc_table table;
57582@@ -143,13 +154,18 @@ struct rockchip_thermal_sensor {
57583  * @pdev: platform device of thermal
57584  * @reset: the reset controller of tsadc
57585  * @sensors: array of thermal sensors
57586- * @clk: the controller clock is divided by the exteral 24MHz
57587- * @pclk: the advanced peripherals bus clock
57588+ * @clk: the bulk clk of tsadc, include controller clock and peripherals bus clock
57589+ * @num_clks: the number of tsadc clks
57590  * @grf: the general register file will be used to do static set by software
57591  * @regs: the base address of tsadc controller
57592  * @tshut_temp: the hardware-controlled shutdown temperature value
57593+ * @trim: trimmed value
57594  * @tshut_mode: the hardware-controlled shutdown mode (0:CRU 1:GPIO)
57595  * @tshut_polarity: the hardware-controlled active polarity (0:LOW 1:HIGH)
57596+ * @pinctrl: the pinctrl of tsadc
57597+ * @gpio_state: pinctrl select gpio function
57598+ * @otp_state: pinctrl select otp out function
57599+ * @panic_nb: panic notifier block
57600  */
57601 struct rockchip_thermal_data {
57602 	const struct rockchip_tsadc_chip *chip;
57603@@ -158,15 +174,21 @@ struct rockchip_thermal_data {
57604 
57605 	struct rockchip_thermal_sensor sensors[SOC_MAX_SENSORS];
57606 
57607-	struct clk *clk;
57608-	struct clk *pclk;
57609+	struct clk_bulk_data *clks;
57610+	int num_clks;
57611 
57612 	struct regmap *grf;
57613 	void __iomem *regs;
57614 
57615 	int tshut_temp;
57616+	int trim;
57617 	enum tshut_mode tshut_mode;
57618 	enum tshut_polarity tshut_polarity;
57619+	struct pinctrl *pinctrl;
57620+	struct pinctrl_state *gpio_state;
57621+	struct pinctrl_state *otp_state;
57622+
57623+	struct notifier_block panic_nb;
57624 };
57625 
57626 /**
57627@@ -180,29 +202,49 @@ struct rockchip_thermal_data {
57628 #define TSADCV2_AUTO_CON			0x04
57629 #define TSADCV2_INT_EN				0x08
57630 #define TSADCV2_INT_PD				0x0c
57631+#define TSADCV3_AUTO_SRC_CON			0x0c
57632+#define TSADCV3_HT_INT_EN			0x14
57633+#define TSADCV3_HSHUT_GPIO_INT_EN		0x18
57634+#define TSADCV3_HSHUT_CRU_INT_EN		0x1c
57635+#define TSADCV3_INT_PD				0x24
57636+#define TSADCV3_HSHUT_PD			0x28
57637 #define TSADCV2_DATA(chn)			(0x20 + (chn) * 0x04)
57638 #define TSADCV2_COMP_INT(chn)		        (0x30 + (chn) * 0x04)
57639 #define TSADCV2_COMP_SHUT(chn)		        (0x40 + (chn) * 0x04)
57640+#define TSADCV3_DATA(chn)			(0x2c + (chn) * 0x04)
57641+#define TSADCV3_COMP_INT(chn)		        (0x6c + (chn) * 0x04)
57642+#define TSADCV3_COMP_SHUT(chn)		        (0x10c + (chn) * 0x04)
57643 #define TSADCV2_HIGHT_INT_DEBOUNCE		0x60
57644 #define TSADCV2_HIGHT_TSHUT_DEBOUNCE		0x64
57645+#define TSADCV3_HIGHT_INT_DEBOUNCE		0x14c
57646+#define TSADCV3_HIGHT_TSHUT_DEBOUNCE		0x150
57647 #define TSADCV2_AUTO_PERIOD			0x68
57648 #define TSADCV2_AUTO_PERIOD_HT			0x6c
57649+#define TSADCV3_AUTO_PERIOD			0x154
57650+#define TSADCV3_AUTO_PERIOD_HT			0x158
57651 
57652 #define TSADCV2_AUTO_EN				BIT(0)
57653+#define TSADCV2_AUTO_EN_MASK			BIT(16)
57654 #define TSADCV2_AUTO_SRC_EN(chn)		BIT(4 + (chn))
57655+#define TSADCV3_AUTO_SRC_EN(chn)		BIT(chn)
57656+#define TSADCV3_AUTO_SRC_EN_MASK(chn)		BIT(16 + chn)
57657 #define TSADCV2_AUTO_TSHUT_POLARITY_HIGH	BIT(8)
57658+#define TSADCV2_AUTO_TSHUT_POLARITY_MASK	BIT(24)
57659 
57660 #define TSADCV3_AUTO_Q_SEL_EN			BIT(1)
57661 
57662 #define TSADCV2_INT_SRC_EN(chn)			BIT(chn)
57663+#define TSADCV2_INT_SRC_EN_MASK(chn)		BIT(16 + (chn))
57664 #define TSADCV2_SHUT_2GPIO_SRC_EN(chn)		BIT(4 + (chn))
57665 #define TSADCV2_SHUT_2CRU_SRC_EN(chn)		BIT(8 + (chn))
57666 
57667 #define TSADCV2_INT_PD_CLEAR_MASK		~BIT(8)
57668 #define TSADCV3_INT_PD_CLEAR_MASK		~BIT(16)
57669+#define TSADCV4_INT_PD_CLEAR_MASK		0xffffffff
57670 
57671 #define TSADCV2_DATA_MASK			0xfff
57672 #define TSADCV3_DATA_MASK			0x3ff
57673+#define TSADCV4_DATA_MASK			0x1ff
57674 
57675 #define TSADCV2_HIGHT_INT_DEBOUNCE_COUNT	4
57676 #define TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT	4
57677@@ -210,8 +252,13 @@ struct rockchip_thermal_data {
57678 #define TSADCV2_AUTO_PERIOD_HT_TIME		50  /* 50ms */
57679 #define TSADCV3_AUTO_PERIOD_TIME		1875 /* 2.5ms */
57680 #define TSADCV3_AUTO_PERIOD_HT_TIME		1875 /* 2.5ms */
57681+#define TSADCV5_AUTO_PERIOD_TIME		1622 /* 2.5ms */
57682+#define TSADCV5_AUTO_PERIOD_HT_TIME		1622 /* 2.5ms */
57683+#define TSADCV6_AUTO_PERIOD_TIME		5000 /* 2.5ms */
57684+#define TSADCV6_AUTO_PERIOD_HT_TIME		5000 /* 2.5ms */
57685 
57686 #define TSADCV2_USER_INTER_PD_SOC		0x340 /* 13 clocks */
57687+#define TSADCV5_USER_INTER_PD_SOC		0xfc0 /* 97us, at least 90us */
57688 
57689 #define GRF_SARADC_TESTBIT			0x0e644
57690 #define GRF_TSADC_TESTBIT_L			0x0e648
57691@@ -219,13 +266,33 @@ struct rockchip_thermal_data {
57692 
57693 #define PX30_GRF_SOC_CON2			0x0408
57694 
57695+#define RK1808_BUS_GRF_SOC_CON0			0x0400
57696+
57697+#define RK3568_GRF_TSADC_CON			0x0600
57698+#define RK3568_GRF_TSADC_ANA_REG0		(0x10001 << 0)
57699+#define RK3568_GRF_TSADC_ANA_REG1		(0x10001 << 1)
57700+#define RK3568_GRF_TSADC_ANA_REG2		(0x10001 << 2)
57701+#define RK3568_GRF_TSADC_TSEN			(0x10001 << 8)
57702+
57703+#define RV1126_GRF0_TSADC_CON			0x0100
57704+
57705+#define RV1126_GRF0_TSADC_TRM			(0xff0077 << 0)
57706+#define RV1126_GRF0_TSADC_SHUT_2CRU		(0x30003 << 10)
57707+#define RV1126_GRF0_TSADC_SHUT_2GPIO		(0x70007 << 12)
57708+
57709 #define GRF_SARADC_TESTBIT_ON			(0x10001 << 2)
57710 #define GRF_TSADC_TESTBIT_H_ON			(0x10001 << 2)
57711+#define GRF_TSADC_BANDGAP_CHOPPER_EN		(0x10001 << 2)
57712 #define GRF_TSADC_VCM_EN_L			(0x10001 << 7)
57713 #define GRF_TSADC_VCM_EN_H			(0x10001 << 7)
57714 
57715 #define GRF_CON_TSADC_CH_INV			(0x10001 << 1)
57716 
57717+#define MIN_TEMP				(-40000)
57718+#define LOWEST_TEMP				(-273000)
57719+#define MAX_TEMP				(125000)
57720+#define MAX_ENV_TEMP				(85000)
57721+
57722 /**
57723  * struct tsadc_table - code to temperature conversion table
57724  * @code: the value of adc channel
57725@@ -241,6 +308,7 @@ struct tsadc_table {
57726 	int temp;
57727 };
57728 
57729+
57730 static const struct tsadc_table rv1108_table[] = {
57731 	{0, -40000},
57732 	{374, -40000},
57733@@ -280,6 +348,45 @@ static const struct tsadc_table rv1108_table[] = {
57734 	{TSADCV2_DATA_MASK, 125000},
57735 };
57736 
57737+static const struct tsadc_table rk1808_code_table[] = {
57738+	{0, -40000},
57739+	{3455, -40000},
57740+	{3463, -35000},
57741+	{3471, -30000},
57742+	{3479, -25000},
57743+	{3487, -20000},
57744+	{3495, -15000},
57745+	{3503, -10000},
57746+	{3511, -5000},
57747+	{3519, 0},
57748+	{3527, 5000},
57749+	{3535, 10000},
57750+	{3543, 15000},
57751+	{3551, 20000},
57752+	{3559, 25000},
57753+	{3567, 30000},
57754+	{3576, 35000},
57755+	{3584, 40000},
57756+	{3592, 45000},
57757+	{3600, 50000},
57758+	{3609, 55000},
57759+	{3617, 60000},
57760+	{3625, 65000},
57761+	{3633, 70000},
57762+	{3642, 75000},
57763+	{3650, 80000},
57764+	{3659, 85000},
57765+	{3667, 90000},
57766+	{3675, 95000},
57767+	{3684, 100000},
57768+	{3692, 105000},
57769+	{3701, 110000},
57770+	{3709, 115000},
57771+	{3718, 120000},
57772+	{3726, 125000},
57773+	{TSADCV2_DATA_MASK, 125000},
57774+};
57775+
57776 static const struct tsadc_table rk3228_code_table[] = {
57777 	{0, -40000},
57778 	{588, -40000},
57779@@ -474,6 +581,54 @@ static const struct tsadc_table rk3399_code_table[] = {
57780 	{TSADCV3_DATA_MASK, 125000},
57781 };
57782 
57783+static const struct tsadc_table rk3568_code_table[] = {
57784+	{0, -40000},
57785+	{1584, -40000},
57786+	{1620, -35000},
57787+	{1652, -30000},
57788+	{1688, -25000},
57789+	{1720, -20000},
57790+	{1756, -15000},
57791+	{1788, -10000},
57792+	{1824, -5000},
57793+	{1856, 0},
57794+	{1892, 5000},
57795+	{1924, 10000},
57796+	{1956, 15000},
57797+	{1992, 20000},
57798+	{2024, 25000},
57799+	{2060, 30000},
57800+	{2092, 35000},
57801+	{2128, 40000},
57802+	{2160, 45000},
57803+	{2196, 50000},
57804+	{2228, 55000},
57805+	{2264, 60000},
57806+	{2300, 65000},
57807+	{2332, 70000},
57808+	{2368, 75000},
57809+	{2400, 80000},
57810+	{2436, 85000},
57811+	{2468, 90000},
57812+	{2500, 95000},
57813+	{2536, 100000},
57814+	{2572, 105000},
57815+	{2604, 110000},
57816+	{2636, 115000},
57817+	{2672, 120000},
57818+	{2704, 125000},
57819+	{TSADCV2_DATA_MASK, 125000},
57820+};
57821+
57822+static const struct tsadc_table rk3588_code_table[] = {
57823+	{0, -40000},
57824+	{220, -40000},
57825+	{285, 25000},
57826+	{345, 85000},
57827+	{385, 125000},
57828+	{TSADCV4_DATA_MASK, 125000},
57829+};
57830+
57831 static u32 rk_tsadcv2_temp_to_code(const struct chip_tsadc_table *table,
57832 				   int temp)
57833 {
57834@@ -482,6 +637,9 @@ static u32 rk_tsadcv2_temp_to_code(const struct chip_tsadc_table *table,
57835 	unsigned int denom;
57836 	u32 error = table->data_mask;
57837 
57838+	if (table->kNum)
57839+		return (((temp / 1000) * table->kNum) / 1000 + table->bNum);
57840+
57841 	low = 0;
57842 	high = (table->length - 1) - 1; /* ignore the last check for table */
57843 	mid = (high + low) / 2;
57844@@ -535,6 +693,13 @@ static int rk_tsadcv2_code_to_temp(const struct chip_tsadc_table *table,
57845 	unsigned int num;
57846 	unsigned long denom;
57847 
57848+	if (table->kNum) {
57849+		*temp = (((int)code - table->bNum) * 10000 / table->kNum) * 100;
57850+		if (*temp < MIN_TEMP || *temp > MAX_TEMP)
57851+			return -EAGAIN;
57852+		return 0;
57853+	}
57854+
57855 	WARN_ON(table->length < 2);
57856 
57857 	switch (table->mode) {
57858@@ -701,6 +866,89 @@ static void rk_tsadcv4_initialize(struct regmap *grf, void __iomem *regs,
57859 	regmap_write(grf, PX30_GRF_SOC_CON2, GRF_CON_TSADC_CH_INV);
57860 }
57861 
57862+static void rk_tsadcv5_initialize(struct regmap *grf, void __iomem *regs,
57863+				  enum tshut_polarity tshut_polarity)
57864+{
57865+	if (tshut_polarity == TSHUT_HIGH_ACTIVE)
57866+		writel_relaxed(0U | TSADCV2_AUTO_TSHUT_POLARITY_HIGH,
57867+			       regs + TSADCV2_AUTO_CON);
57868+	else
57869+		writel_relaxed(0U & ~TSADCV2_AUTO_TSHUT_POLARITY_HIGH,
57870+			       regs + TSADCV2_AUTO_CON);
57871+
57872+	writel_relaxed(TSADCV5_USER_INTER_PD_SOC, regs + TSADCV2_USER_CON);
57873+
57874+	writel_relaxed(TSADCV5_AUTO_PERIOD_TIME, regs + TSADCV2_AUTO_PERIOD);
57875+	writel_relaxed(TSADCV2_HIGHT_INT_DEBOUNCE_COUNT,
57876+		       regs + TSADCV2_HIGHT_INT_DEBOUNCE);
57877+	writel_relaxed(TSADCV5_AUTO_PERIOD_HT_TIME,
57878+		       regs + TSADCV2_AUTO_PERIOD_HT);
57879+	writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT,
57880+		       regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE);
57881+
57882+	if (!IS_ERR(grf))
57883+		regmap_write(grf, RK1808_BUS_GRF_SOC_CON0,
57884+			     GRF_TSADC_BANDGAP_CHOPPER_EN);
57885+}
57886+
57887+static void rk_tsadcv6_initialize(struct regmap *grf, void __iomem *regs,
57888+				  enum tshut_polarity tshut_polarity)
57889+{
57890+	rk_tsadcv2_initialize(grf, regs, tshut_polarity);
57891+
57892+	if (!IS_ERR(grf))
57893+		regmap_write(grf, RV1126_GRF0_TSADC_CON,
57894+			     RV1126_GRF0_TSADC_TRM);
57895+}
57896+
57897+static void rk_tsadcv7_initialize(struct regmap *grf, void __iomem *regs,
57898+				  enum tshut_polarity tshut_polarity)
57899+{
57900+	writel_relaxed(TSADCV5_USER_INTER_PD_SOC, regs + TSADCV2_USER_CON);
57901+	writel_relaxed(TSADCV5_AUTO_PERIOD_TIME, regs + TSADCV2_AUTO_PERIOD);
57902+	writel_relaxed(TSADCV2_HIGHT_INT_DEBOUNCE_COUNT,
57903+		       regs + TSADCV2_HIGHT_INT_DEBOUNCE);
57904+	writel_relaxed(TSADCV5_AUTO_PERIOD_HT_TIME,
57905+		       regs + TSADCV2_AUTO_PERIOD_HT);
57906+	writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT,
57907+		       regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE);
57908+
57909+	if (tshut_polarity == TSHUT_HIGH_ACTIVE)
57910+		writel_relaxed(0U | TSADCV2_AUTO_TSHUT_POLARITY_HIGH,
57911+			       regs + TSADCV2_AUTO_CON);
57912+	else
57913+		writel_relaxed(0U & ~TSADCV2_AUTO_TSHUT_POLARITY_HIGH,
57914+			       regs + TSADCV2_AUTO_CON);
57915+
57916+	if (!IS_ERR(grf)) {
57917+		regmap_write(grf, RK3568_GRF_TSADC_CON, RK3568_GRF_TSADC_TSEN);
57918+		udelay(15);
57919+		regmap_write(grf, RK3568_GRF_TSADC_CON, RK3568_GRF_TSADC_ANA_REG0);
57920+		regmap_write(grf, RK3568_GRF_TSADC_CON, RK3568_GRF_TSADC_ANA_REG1);
57921+		regmap_write(grf, RK3568_GRF_TSADC_CON, RK3568_GRF_TSADC_ANA_REG2);
57922+		usleep_range(100, 200);
57923+	}
57924+}
57925+
57926+static void rk_tsadcv8_initialize(struct regmap *grf, void __iomem *regs,
57927+				  enum tshut_polarity tshut_polarity)
57928+{
57929+	writel_relaxed(TSADCV6_AUTO_PERIOD_TIME, regs + TSADCV3_AUTO_PERIOD);
57930+	writel_relaxed(TSADCV6_AUTO_PERIOD_HT_TIME,
57931+		       regs + TSADCV3_AUTO_PERIOD_HT);
57932+	writel_relaxed(TSADCV2_HIGHT_INT_DEBOUNCE_COUNT,
57933+		       regs + TSADCV3_HIGHT_INT_DEBOUNCE);
57934+	writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT,
57935+		       regs + TSADCV3_HIGHT_TSHUT_DEBOUNCE);
57936+	if (tshut_polarity == TSHUT_HIGH_ACTIVE)
57937+		writel_relaxed(TSADCV2_AUTO_TSHUT_POLARITY_HIGH |
57938+			       TSADCV2_AUTO_TSHUT_POLARITY_MASK,
57939+			       regs + TSADCV2_AUTO_CON);
57940+	else
57941+		writel_relaxed(TSADCV2_AUTO_TSHUT_POLARITY_MASK,
57942+			       regs + TSADCV2_AUTO_CON);
57943+}
57944+
57945 static void rk_tsadcv2_irq_ack(void __iomem *regs)
57946 {
57947 	u32 val;
57948@@ -717,6 +965,17 @@ static void rk_tsadcv3_irq_ack(void __iomem *regs)
57949 	writel_relaxed(val & TSADCV3_INT_PD_CLEAR_MASK, regs + TSADCV2_INT_PD);
57950 }
57951 
57952+static void rk_tsadcv4_irq_ack(void __iomem *regs)
57953+{
57954+	u32 val;
57955+
57956+	val = readl_relaxed(regs + TSADCV3_INT_PD);
57957+	writel_relaxed(val & TSADCV4_INT_PD_CLEAR_MASK, regs + TSADCV3_INT_PD);
57958+	val = readl_relaxed(regs + TSADCV3_HSHUT_PD);
57959+	writel_relaxed(val & TSADCV3_INT_PD_CLEAR_MASK,
57960+		       regs + TSADCV3_HSHUT_PD);
57961+}
57962+
57963 static void rk_tsadcv2_control(void __iomem *regs, bool enable)
57964 {
57965 	u32 val;
57966@@ -752,6 +1011,18 @@ static void rk_tsadcv3_control(void __iomem *regs, bool enable)
57967 	writel_relaxed(val, regs + TSADCV2_AUTO_CON);
57968 }
57969 
57970+static void rk_tsadcv4_control(void __iomem *regs, bool enable)
57971+{
57972+	u32 val;
57973+
57974+	if (enable)
57975+		val = TSADCV2_AUTO_EN | TSADCV2_AUTO_EN_MASK;
57976+	else
57977+		val = TSADCV2_AUTO_EN_MASK;
57978+
57979+	writel_relaxed(val, regs + TSADCV2_AUTO_CON);
57980+}
57981+
57982 static int rk_tsadcv2_get_temp(const struct chip_tsadc_table *table,
57983 			       int chn, void __iomem *regs, int *temp)
57984 {
57985@@ -762,6 +1033,16 @@ static int rk_tsadcv2_get_temp(const struct chip_tsadc_table *table,
57986 	return rk_tsadcv2_code_to_temp(table, val, temp);
57987 }
57988 
57989+static int rk_tsadcv4_get_temp(const struct chip_tsadc_table *table,
57990+			       int chn, void __iomem *regs, int *temp)
57991+{
57992+	u32 val;
57993+
57994+	val = readl_relaxed(regs + TSADCV3_DATA(chn));
57995+
57996+	return rk_tsadcv2_code_to_temp(table, val, temp);
57997+}
57998+
57999 static int rk_tsadcv2_alarm_temp(const struct chip_tsadc_table *table,
58000 				 int chn, void __iomem *regs, int temp)
58001 {
58002@@ -796,6 +1077,33 @@ static int rk_tsadcv2_alarm_temp(const struct chip_tsadc_table *table,
58003 	return 0;
58004 }
58005 
58006+static int rk_tsadcv3_alarm_temp(const struct chip_tsadc_table *table,
58007+				 int chn, void __iomem *regs, int temp)
58008+{
58009+	u32 alarm_value;
58010+
58011+	/*
58012+	 * In some cases, some sensors didn't need the trip points, the
58013+	 * set_trips will pass {-INT_MAX, INT_MAX} to trigger tsadc alarm
58014+	 * in the end, ignore this case and disable the high temperature
58015+	 * interrupt.
58016+	 */
58017+	if (temp == INT_MAX) {
58018+		writel_relaxed(TSADCV2_INT_SRC_EN_MASK(chn),
58019+			       regs + TSADCV3_HT_INT_EN);
58020+		return 0;
58021+	}
58022+	/* Make sure the value is valid */
58023+	alarm_value = rk_tsadcv2_temp_to_code(table, temp);
58024+	if (alarm_value == table->data_mask)
58025+		return -ERANGE;
58026+	writel_relaxed(alarm_value & table->data_mask,
58027+		       regs + TSADCV3_COMP_INT(chn));
58028+	writel_relaxed(TSADCV2_INT_SRC_EN(chn) | TSADCV2_INT_SRC_EN_MASK(chn),
58029+		       regs + TSADCV3_HT_INT_EN);
58030+	return 0;
58031+}
58032+
58033 static int rk_tsadcv2_tshut_temp(const struct chip_tsadc_table *table,
58034 				 int chn, void __iomem *regs, int temp)
58035 {
58036@@ -815,13 +1123,33 @@ static int rk_tsadcv2_tshut_temp(const struct chip_tsadc_table *table,
58037 	return 0;
58038 }
58039 
58040-static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs,
58041+static int rk_tsadcv3_tshut_temp(const struct chip_tsadc_table *table,
58042+				 int chn, void __iomem *regs, int temp)
58043+{
58044+	u32 tshut_value;
58045+
58046+	/* Make sure the value is valid */
58047+	tshut_value = rk_tsadcv2_temp_to_code(table, temp);
58048+	if (tshut_value == table->data_mask)
58049+		return -ERANGE;
58050+
58051+	writel_relaxed(tshut_value, regs + TSADCV3_COMP_SHUT(chn));
58052+
58053+	/* TSHUT will be valid */
58054+	writel_relaxed(TSADCV3_AUTO_SRC_EN(chn) | TSADCV3_AUTO_SRC_EN_MASK(chn),
58055+		       regs + TSADCV3_AUTO_SRC_CON);
58056+
58057+	return 0;
58058+}
58059+
58060+static void rk_tsadcv2_tshut_mode(struct regmap *grf, int chn,
58061+				  void __iomem *regs,
58062 				  enum tshut_mode mode)
58063 {
58064 	u32 val;
58065 
58066 	val = readl_relaxed(regs + TSADCV2_INT_EN);
58067-	if (mode == TSHUT_MODE_GPIO) {
58068+	if (mode == TSHUT_MODE_OTP) {
58069 		val &= ~TSADCV2_SHUT_2CRU_SRC_EN(chn);
58070 		val |= TSADCV2_SHUT_2GPIO_SRC_EN(chn);
58071 	} else {
58072@@ -832,6 +1160,68 @@ static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs,
58073 	writel_relaxed(val, regs + TSADCV2_INT_EN);
58074 }
58075 
58076+static void rk_tsadcv3_tshut_mode(struct regmap *grf, int chn,
58077+				  void __iomem *regs,
58078+				  enum tshut_mode mode)
58079+{
58080+	u32 val;
58081+
58082+	val = readl_relaxed(regs + TSADCV2_INT_EN);
58083+	if (mode == TSHUT_MODE_OTP) {
58084+		val &= ~TSADCV2_SHUT_2CRU_SRC_EN(chn);
58085+		val |= TSADCV2_SHUT_2GPIO_SRC_EN(chn);
58086+		if (!IS_ERR(grf))
58087+			regmap_write(grf, RV1126_GRF0_TSADC_CON,
58088+				     RV1126_GRF0_TSADC_SHUT_2GPIO);
58089+	} else {
58090+		val &= ~TSADCV2_SHUT_2GPIO_SRC_EN(chn);
58091+		val |= TSADCV2_SHUT_2CRU_SRC_EN(chn);
58092+		if (!IS_ERR(grf))
58093+			regmap_write(grf, RV1126_GRF0_TSADC_CON,
58094+				     RV1126_GRF0_TSADC_SHUT_2CRU);
58095+	}
58096+
58097+	writel_relaxed(val, regs + TSADCV2_INT_EN);
58098+}
58099+
58100+static void rk_tsadcv4_tshut_mode(struct regmap *grf, int chn,
58101+				  void __iomem *regs,
58102+				  enum tshut_mode mode)
58103+{
58104+	u32 val_gpio, val_cru;
58105+
58106+	if (mode == TSHUT_MODE_OTP) {
58107+		val_gpio = TSADCV2_INT_SRC_EN(chn) | TSADCV2_INT_SRC_EN_MASK(chn);
58108+		val_cru = TSADCV2_INT_SRC_EN_MASK(chn);
58109+	} else {
58110+		val_cru = TSADCV2_INT_SRC_EN(chn) | TSADCV2_INT_SRC_EN_MASK(chn);
58111+		val_gpio = TSADCV2_INT_SRC_EN_MASK(chn);
58112+	}
58113+	writel_relaxed(val_gpio, regs + TSADCV3_HSHUT_GPIO_INT_EN);
58114+	writel_relaxed(val_cru, regs + TSADCV3_HSHUT_CRU_INT_EN);
58115+}
58116+
58117+static int rk_tsadcv1_get_trim_code(struct platform_device *pdev,
58118+				    int code, int trim_base)
58119+{
58120+	struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev);
58121+	const struct chip_tsadc_table *table = &thermal->chip->table;
58122+	u32 base_code;
58123+	int trim_code;
58124+
58125+	base_code = trim_base * table->kNum / 1000 + table->bNum;
58126+	trim_code = code - base_code - 10;
58127+
58128+	return trim_code;
58129+}
58130+
58131+static int rk_tsadcv1_trim_temp(struct platform_device *pdev)
58132+{
58133+	struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev);
58134+
58135+	return thermal->trim * 500;
58136+}
58137+
58138 static const struct rockchip_tsadc_chip px30_tsadc_data = {
58139 	.chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
58140 	.chn_id[SENSOR_GPU] = 1, /* gpu sensor is channel 1 */
58141@@ -860,7 +1250,7 @@ static const struct rockchip_tsadc_chip rv1108_tsadc_data = {
58142 	.chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
58143 	.chn_num = 1, /* one channel for tsadc */
58144 
58145-	.tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
58146+	.tshut_mode = TSHUT_MODE_OTP, /* default TSHUT via GPIO give PMIC */
58147 	.tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
58148 	.tshut_temp = 95000,
58149 
58150@@ -880,11 +1270,61 @@ static const struct rockchip_tsadc_chip rv1108_tsadc_data = {
58151 	},
58152 };
58153 
58154+static const struct rockchip_tsadc_chip rv1126_tsadc_data = {
58155+	.chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
58156+	.chn_num = 1, /* one channel for tsadc */
58157+
58158+	.tshut_mode = TSHUT_MODE_CRU, /* default TSHUT via CRU */
58159+	.tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
58160+	.tshut_temp = 95000,
58161+
58162+	.initialize = rk_tsadcv6_initialize,
58163+	.irq_ack = rk_tsadcv3_irq_ack,
58164+	.control = rk_tsadcv2_control,
58165+	.get_temp = rk_tsadcv2_get_temp,
58166+	.set_alarm_temp = rk_tsadcv2_alarm_temp,
58167+	.set_tshut_temp = rk_tsadcv2_tshut_temp,
58168+	.set_tshut_mode = rk_tsadcv3_tshut_mode,
58169+	.get_trim_code = rk_tsadcv1_get_trim_code,
58170+	.trim_temp = rk_tsadcv1_trim_temp,
58171+
58172+	.table = {
58173+		.kNum = 2263,
58174+		.bNum = 2704,
58175+		.data_mask = TSADCV2_DATA_MASK,
58176+		.mode = ADC_INCREMENT,
58177+	},
58178+};
58179+
58180+static const struct rockchip_tsadc_chip rk1808_tsadc_data = {
58181+	.chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
58182+	.chn_num = 1, /* one channel for tsadc */
58183+
58184+	.tshut_mode = TSHUT_MODE_OTP, /* default TSHUT via GPIO give PMIC */
58185+	.tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
58186+	.tshut_temp = 95000,
58187+
58188+	.initialize = rk_tsadcv5_initialize,
58189+	.irq_ack = rk_tsadcv3_irq_ack,
58190+	.control = rk_tsadcv3_control,
58191+	.get_temp = rk_tsadcv2_get_temp,
58192+	.set_alarm_temp = rk_tsadcv2_alarm_temp,
58193+	.set_tshut_temp = rk_tsadcv2_tshut_temp,
58194+	.set_tshut_mode = rk_tsadcv2_tshut_mode,
58195+
58196+	.table = {
58197+		.id = rk1808_code_table,
58198+		.length = ARRAY_SIZE(rk1808_code_table),
58199+		.data_mask = TSADCV2_DATA_MASK,
58200+		.mode = ADC_INCREMENT,
58201+	},
58202+};
58203+
58204 static const struct rockchip_tsadc_chip rk3228_tsadc_data = {
58205 	.chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
58206 	.chn_num = 1, /* one channel for tsadc */
58207 
58208-	.tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
58209+	.tshut_mode = TSHUT_MODE_OTP, /* default TSHUT via GPIO give PMIC */
58210 	.tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
58211 	.tshut_temp = 95000,
58212 
58213@@ -909,7 +1349,7 @@ static const struct rockchip_tsadc_chip rk3288_tsadc_data = {
58214 	.chn_id[SENSOR_GPU] = 2, /* gpu sensor is channel 2 */
58215 	.chn_num = 2, /* two channels for tsadc */
58216 
58217-	.tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
58218+	.tshut_mode = TSHUT_MODE_OTP, /* default TSHUT via GPIO give PMIC */
58219 	.tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
58220 	.tshut_temp = 95000,
58221 
58222@@ -957,7 +1397,7 @@ static const struct rockchip_tsadc_chip rk3366_tsadc_data = {
58223 	.chn_id[SENSOR_GPU] = 1, /* gpu sensor is channel 1 */
58224 	.chn_num = 2, /* two channels for tsadc */
58225 
58226-	.tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
58227+	.tshut_mode = TSHUT_MODE_OTP, /* default TSHUT via GPIO give PMIC */
58228 	.tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
58229 	.tshut_temp = 95000,
58230 
58231@@ -982,7 +1422,7 @@ static const struct rockchip_tsadc_chip rk3368_tsadc_data = {
58232 	.chn_id[SENSOR_GPU] = 1, /* gpu sensor is channel 1 */
58233 	.chn_num = 2, /* two channels for tsadc */
58234 
58235-	.tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
58236+	.tshut_mode = TSHUT_MODE_OTP, /* default TSHUT via GPIO give PMIC */
58237 	.tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
58238 	.tshut_temp = 95000,
58239 
58240@@ -1007,7 +1447,7 @@ static const struct rockchip_tsadc_chip rk3399_tsadc_data = {
58241 	.chn_id[SENSOR_GPU] = 1, /* gpu sensor is channel 1 */
58242 	.chn_num = 2, /* two channels for tsadc */
58243 
58244-	.tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
58245+	.tshut_mode = TSHUT_MODE_OTP, /* default TSHUT via GPIO give PMIC */
58246 	.tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
58247 	.tshut_temp = 95000,
58248 
58249@@ -1027,6 +1467,53 @@ static const struct rockchip_tsadc_chip rk3399_tsadc_data = {
58250 	},
58251 };
58252 
58253+static const struct rockchip_tsadc_chip rk3568_tsadc_data = {
58254+	.chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
58255+	.chn_id[SENSOR_GPU] = 1, /* gpu sensor is channel 1 */
58256+	.chn_num = 2, /* two channels for tsadc */
58257+
58258+	.tshut_mode = TSHUT_MODE_OTP, /* default TSHUT via GPIO give PMIC */
58259+	.tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
58260+	.tshut_temp = 95000,
58261+
58262+	.initialize = rk_tsadcv7_initialize,
58263+	.irq_ack = rk_tsadcv3_irq_ack,
58264+	.control = rk_tsadcv3_control,
58265+	.get_temp = rk_tsadcv2_get_temp,
58266+	.set_alarm_temp = rk_tsadcv2_alarm_temp,
58267+	.set_tshut_temp = rk_tsadcv2_tshut_temp,
58268+	.set_tshut_mode = rk_tsadcv2_tshut_mode,
58269+
58270+	.table = {
58271+		.id = rk3568_code_table,
58272+		.length = ARRAY_SIZE(rk3568_code_table),
58273+		.data_mask = TSADCV2_DATA_MASK,
58274+		.mode = ADC_INCREMENT,
58275+	},
58276+};
58277+
58278+static const struct rockchip_tsadc_chip rk3588_tsadc_data = {
58279+	/* top, big_core0, big_core1, little_core, center, gpu, npu */
58280+	.chn_id = {0, 1, 2, 3, 4, 5, 6},
58281+	.chn_num = 7, /* seven channels for tsadc */
58282+	.tshut_mode = TSHUT_MODE_OTP, /* default TSHUT via GPIO give PMIC */
58283+	.tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
58284+	.tshut_temp = 95000,
58285+	.initialize = rk_tsadcv8_initialize,
58286+	.irq_ack = rk_tsadcv4_irq_ack,
58287+	.control = rk_tsadcv4_control,
58288+	.get_temp = rk_tsadcv4_get_temp,
58289+	.set_alarm_temp = rk_tsadcv3_alarm_temp,
58290+	.set_tshut_temp = rk_tsadcv3_tshut_temp,
58291+	.set_tshut_mode = rk_tsadcv4_tshut_mode,
58292+	.table = {
58293+		.id = rk3588_code_table,
58294+		.length = ARRAY_SIZE(rk3588_code_table),
58295+		.data_mask = TSADCV4_DATA_MASK,
58296+		.mode = ADC_INCREMENT,
58297+	},
58298+};
58299+
58300 static const struct of_device_id of_rockchip_thermal_match[] = {
58301 	{	.compatible = "rockchip,px30-tsadc",
58302 		.data = (void *)&px30_tsadc_data,
58303@@ -1035,6 +1522,14 @@ static const struct of_device_id of_rockchip_thermal_match[] = {
58304 		.compatible = "rockchip,rv1108-tsadc",
58305 		.data = (void *)&rv1108_tsadc_data,
58306 	},
58307+	{
58308+		.compatible = "rockchip,rv1126-tsadc",
58309+		.data = (void *)&rv1126_tsadc_data,
58310+	},
58311+	{
58312+		.compatible = "rockchip,rk1808-tsadc",
58313+		.data = (void *)&rk1808_tsadc_data,
58314+	},
58315 	{
58316 		.compatible = "rockchip,rk3228-tsadc",
58317 		.data = (void *)&rk3228_tsadc_data,
58318@@ -1059,6 +1554,14 @@ static const struct of_device_id of_rockchip_thermal_match[] = {
58319 		.compatible = "rockchip,rk3399-tsadc",
58320 		.data = (void *)&rk3399_tsadc_data,
58321 	},
58322+	{
58323+		.compatible = "rockchip,rk3568-tsadc",
58324+		.data = (void *)&rk3568_tsadc_data,
58325+	},
58326+	{
58327+		.compatible = "rockchip,rk3588-tsadc",
58328+		.data = (void *)&rk3588_tsadc_data,
58329+	},
58330 	{ /* end */ },
58331 };
58332 MODULE_DEVICE_TABLE(of, of_rockchip_thermal_match);
58333@@ -1099,6 +1602,9 @@ static int rockchip_thermal_set_trips(void *_sensor, int low, int high)
58334 	dev_dbg(&thermal->pdev->dev, "%s: sensor %d: low: %d, high %d\n",
58335 		__func__, sensor->id, low, high);
58336 
58337+	if (tsadc->trim_temp)
58338+		high += tsadc->trim_temp(thermal->pdev);
58339+
58340 	return tsadc->set_alarm_temp(&tsadc->table,
58341 				     sensor->id, thermal->regs, high);
58342 }
58343@@ -1112,6 +1618,8 @@ static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
58344 
58345 	retval = tsadc->get_temp(&tsadc->table,
58346 				 sensor->id, thermal->regs, out_temp);
58347+	if (tsadc->trim_temp)
58348+		*out_temp -= tsadc->trim_temp(thermal->pdev);
58349 	dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n",
58350 		sensor->id, *out_temp, retval);
58351 
58352@@ -1123,11 +1631,52 @@ static const struct thermal_zone_of_device_ops rockchip_of_thermal_ops = {
58353 	.set_trips = rockchip_thermal_set_trips,
58354 };
58355 
58356+static void thermal_pinctrl_select_otp(struct rockchip_thermal_data *thermal)
58357+{
58358+	if (!IS_ERR(thermal->pinctrl) && !IS_ERR_OR_NULL(thermal->otp_state))
58359+		pinctrl_select_state(thermal->pinctrl,
58360+				     thermal->otp_state);
58361+}
58362+
58363+static void thermal_pinctrl_select_gpio(struct rockchip_thermal_data *thermal)
58364+{
58365+	if (!IS_ERR(thermal->pinctrl) && !IS_ERR_OR_NULL(thermal->gpio_state))
58366+		pinctrl_select_state(thermal->pinctrl,
58367+				     thermal->gpio_state);
58368+}
58369+
58370+static int rockchip_get_efuse_value(struct device_node *np, char *porp_name,
58371+				    int *value)
58372+{
58373+	struct nvmem_cell *cell;
58374+	unsigned char *buf;
58375+	size_t len;
58376+
58377+	cell = of_nvmem_cell_get(np, porp_name);
58378+	if (IS_ERR(cell))
58379+		return PTR_ERR(cell);
58380+
58381+	buf = (unsigned char *)nvmem_cell_read(cell, &len);
58382+
58383+	nvmem_cell_put(cell);
58384+
58385+	if (IS_ERR(buf))
58386+		return PTR_ERR(buf);
58387+
58388+	*value = buf[0];
58389+
58390+	kfree(buf);
58391+
58392+	return 0;
58393+}
58394+
58395 static int rockchip_configure_from_dt(struct device *dev,
58396 				      struct device_node *np,
58397 				      struct rockchip_thermal_data *thermal)
58398 {
58399+	const struct rockchip_tsadc_chip *tsadc = thermal->chip;
58400 	u32 shut_temp, tshut_mode, tshut_polarity;
58401+	int trim_l = 0, trim_h = 0, trim_bsae = 0;
58402 
58403 	if (of_property_read_u32(np, "rockchip,hw-tshut-temp", &shut_temp)) {
58404 		dev_warn(dev,
58405@@ -1146,7 +1695,7 @@ static int rockchip_configure_from_dt(struct device *dev,
58406 	if (of_property_read_u32(np, "rockchip,hw-tshut-mode", &tshut_mode)) {
58407 		dev_warn(dev,
58408 			 "Missing tshut mode property, using default (%s)\n",
58409-			 thermal->chip->tshut_mode == TSHUT_MODE_GPIO ?
58410+			 thermal->chip->tshut_mode == TSHUT_MODE_OTP ?
58411 				"gpio" : "cru");
58412 		thermal->tshut_mode = thermal->chip->tshut_mode;
58413 	} else {
58414@@ -1183,6 +1732,29 @@ static int rockchip_configure_from_dt(struct device *dev,
58415 	if (IS_ERR(thermal->grf))
58416 		dev_warn(dev, "Missing rockchip,grf property\n");
58417 
58418+	if (tsadc->trim_temp && tsadc->get_trim_code) {
58419+		/* The tsadc won't to handle the error in here
58420+		 * since some SoCs didn't need this property.
58421+		 * rv1126 need trim tsadc.
58422+		 */
58423+		if (rockchip_get_efuse_value(np, "trim_l", &trim_l))
58424+			dev_warn(dev, "Missing trim_l property\n");
58425+		if (rockchip_get_efuse_value(np, "trim_h", &trim_h))
58426+			dev_warn(dev, "Missing trim_h property\n");
58427+		if (rockchip_get_efuse_value(np, "trim_base", &trim_bsae))
58428+			dev_warn(dev, "Missing trim_base property\n");
58429+
58430+		if (trim_l && trim_h && trim_bsae) {
58431+			thermal->trim = tsadc->get_trim_code(thermal->pdev,
58432+							     (trim_h << 8) |
58433+							     trim_l,
58434+							     trim_bsae);
58435+			dev_info(dev, "tsadc trimmed value = %d\n",
58436+				 thermal->trim);
58437+			thermal->tshut_temp += tsadc->trim_temp(thermal->pdev);
58438+		}
58439+	}
58440+
58441 	return 0;
58442 }
58443 
58444@@ -1195,7 +1767,8 @@ rockchip_thermal_register_sensor(struct platform_device *pdev,
58445 	const struct rockchip_tsadc_chip *tsadc = thermal->chip;
58446 	int error;
58447 
58448-	tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode);
58449+	tsadc->set_tshut_mode(thermal->grf, id, thermal->regs,
58450+			      thermal->tshut_mode);
58451 
58452 	error = tsadc->set_tshut_temp(&tsadc->table, id, thermal->regs,
58453 			      thermal->tshut_temp);
58454@@ -1228,6 +1801,43 @@ static void rockchip_thermal_reset_controller(struct reset_control *reset)
58455 	reset_control_deassert(reset);
58456 }
58457 
58458+static void rockchip_dump_temperature(struct rockchip_thermal_data *thermal)
58459+{
58460+	struct platform_device *pdev;
58461+	int i;
58462+
58463+	if (!thermal)
58464+		return;
58465+
58466+	pdev = thermal->pdev;
58467+
58468+	for (i = 0; i < thermal->chip->chn_num; i++) {
58469+		struct rockchip_thermal_sensor *sensor = &thermal->sensors[i];
58470+		struct thermal_zone_device *tz = sensor->tzd;
58471+
58472+		if (tz->temperature != THERMAL_TEMP_INVALID)
58473+			dev_warn(&pdev->dev, "channal %d: temperature(%d C)\n",
58474+				 i, tz->temperature / 1000);
58475+	}
58476+
58477+	if (thermal->regs) {
58478+		pr_warn("THERMAL REGS:\n");
58479+		print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
58480+			       32, 4, thermal->regs, 0x88, false);
58481+	}
58482+}
58483+
58484+static int rockchip_thermal_panic(struct notifier_block *this,
58485+				  unsigned long ev, void *ptr)
58486+{
58487+	struct rockchip_thermal_data *thermal;
58488+
58489+	thermal = container_of(this, struct rockchip_thermal_data, panic_nb);
58490+	rockchip_dump_temperature(thermal);
58491+
58492+	return NOTIFY_DONE;
58493+}
58494+
58495 static int rockchip_thermal_probe(struct platform_device *pdev)
58496 {
58497 	struct device_node *np = pdev->dev.of_node;
58498@@ -1262,40 +1872,26 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
58499 	if (IS_ERR(thermal->regs))
58500 		return PTR_ERR(thermal->regs);
58501 
58502-	thermal->reset = devm_reset_control_get(&pdev->dev, "tsadc-apb");
58503+	thermal->reset = devm_reset_control_array_get(&pdev->dev, false, false);
58504 	if (IS_ERR(thermal->reset)) {
58505-		error = PTR_ERR(thermal->reset);
58506-		dev_err(&pdev->dev, "failed to get tsadc reset: %d\n", error);
58507-		return error;
58508-	}
58509-
58510-	thermal->clk = devm_clk_get(&pdev->dev, "tsadc");
58511-	if (IS_ERR(thermal->clk)) {
58512-		error = PTR_ERR(thermal->clk);
58513-		dev_err(&pdev->dev, "failed to get tsadc clock: %d\n", error);
58514-		return error;
58515+		if (PTR_ERR(thermal->reset) != -EPROBE_DEFER)
58516+			dev_err(&pdev->dev, "failed to get tsadc reset lines\n");
58517+		return PTR_ERR(thermal->reset);
58518 	}
58519 
58520-	thermal->pclk = devm_clk_get(&pdev->dev, "apb_pclk");
58521-	if (IS_ERR(thermal->pclk)) {
58522-		error = PTR_ERR(thermal->pclk);
58523-		dev_err(&pdev->dev, "failed to get apb_pclk clock: %d\n",
58524-			error);
58525-		return error;
58526-	}
58527+	thermal->num_clks = devm_clk_bulk_get_all(&pdev->dev, &thermal->clks);
58528+	if (thermal->num_clks < 1)
58529+		return -ENODEV;
58530 
58531-	error = clk_prepare_enable(thermal->clk);
58532+	error = clk_bulk_prepare_enable(thermal->num_clks, thermal->clks);
58533 	if (error) {
58534-		dev_err(&pdev->dev, "failed to enable converter clock: %d\n",
58535+		dev_err(&pdev->dev, "failed to prepare enable tsadc bulk clks: %d\n",
58536 			error);
58537 		return error;
58538 	}
58539+	platform_set_drvdata(pdev, thermal);
58540 
58541-	error = clk_prepare_enable(thermal->pclk);
58542-	if (error) {
58543-		dev_err(&pdev->dev, "failed to enable pclk: %d\n", error);
58544-		goto err_disable_clk;
58545-	}
58546+	thermal->chip->control(thermal->regs, false);
58547 
58548 	rockchip_thermal_reset_controller(thermal->reset);
58549 
58550@@ -1303,12 +1899,30 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
58551 	if (error) {
58552 		dev_err(&pdev->dev, "failed to parse device tree data: %d\n",
58553 			error);
58554-		goto err_disable_pclk;
58555+		goto err_disable_clocks;
58556 	}
58557 
58558 	thermal->chip->initialize(thermal->grf, thermal->regs,
58559 				  thermal->tshut_polarity);
58560 
58561+	if (thermal->tshut_mode == TSHUT_MODE_OTP) {
58562+		thermal->pinctrl = devm_pinctrl_get(&pdev->dev);
58563+		if (IS_ERR(thermal->pinctrl))
58564+			dev_err(&pdev->dev, "failed to find thermal pinctrl\n");
58565+
58566+		thermal->gpio_state = pinctrl_lookup_state(thermal->pinctrl,
58567+							   "gpio");
58568+		if (IS_ERR_OR_NULL(thermal->gpio_state))
58569+			dev_err(&pdev->dev, "failed to find thermal gpio state\n");
58570+
58571+		thermal->otp_state = pinctrl_lookup_state(thermal->pinctrl,
58572+							  "otpout");
58573+		if (IS_ERR_OR_NULL(thermal->otp_state))
58574+			dev_err(&pdev->dev, "failed to find thermal otpout state\n");
58575+
58576+		thermal_pinctrl_select_otp(thermal);
58577+	}
58578+
58579 	for (i = 0; i < thermal->chip->chn_num; i++) {
58580 		error = rockchip_thermal_register_sensor(pdev, thermal,
58581 						&thermal->sensors[i],
58582@@ -1317,7 +1931,7 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
58583 			dev_err(&pdev->dev,
58584 				"failed to register sensor[%d] : error = %d\n",
58585 				i, error);
58586-			goto err_disable_pclk;
58587+			goto err_disable_clocks;
58588 		}
58589 	}
58590 
58591@@ -1328,7 +1942,7 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
58592 	if (error) {
58593 		dev_err(&pdev->dev,
58594 			"failed to request tsadc irq: %d\n", error);
58595-		goto err_disable_pclk;
58596+		goto err_disable_clocks;
58597 	}
58598 
58599 	thermal->chip->control(thermal->regs, true);
58600@@ -1343,14 +1957,16 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
58601 				 i, error);
58602 	}
58603 
58604-	platform_set_drvdata(pdev, thermal);
58605+	thermal->panic_nb.notifier_call = rockchip_thermal_panic;
58606+	atomic_notifier_chain_register(&panic_notifier_list,
58607+				       &thermal->panic_nb);
58608+
58609+	dev_info(&pdev->dev, "tsadc is probed successfully!\n");
58610 
58611 	return 0;
58612 
58613-err_disable_pclk:
58614-	clk_disable_unprepare(thermal->pclk);
58615-err_disable_clk:
58616-	clk_disable_unprepare(thermal->clk);
58617+err_disable_clocks:
58618+	clk_bulk_disable_unprepare(thermal->num_clks, thermal->clks);
58619 
58620 	return error;
58621 }
58622@@ -1369,12 +1985,28 @@ static int rockchip_thermal_remove(struct platform_device *pdev)
58623 
58624 	thermal->chip->control(thermal->regs, false);
58625 
58626-	clk_disable_unprepare(thermal->pclk);
58627-	clk_disable_unprepare(thermal->clk);
58628+	clk_bulk_disable_unprepare(thermal->num_clks, thermal->clks);
58629 
58630 	return 0;
58631 }
58632 
58633+static void rockchip_thermal_shutdown(struct platform_device *pdev)
58634+{
58635+	struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev);
58636+	int i;
58637+
58638+	for (i = 0; i < thermal->chip->chn_num; i++) {
58639+		int id = thermal->sensors[i].id;
58640+
58641+		if (thermal->tshut_mode != TSHUT_MODE_CRU)
58642+			thermal->chip->set_tshut_mode(thermal->grf, id,
58643+						      thermal->regs,
58644+						      TSHUT_MODE_CRU);
58645+	}
58646+	if (thermal->tshut_mode == TSHUT_MODE_OTP)
58647+		thermal_pinctrl_select_gpio(thermal);
58648+}
58649+
58650 static int __maybe_unused rockchip_thermal_suspend(struct device *dev)
58651 {
58652 	struct rockchip_thermal_data *thermal = dev_get_drvdata(dev);
58653@@ -1385,10 +2017,10 @@ static int __maybe_unused rockchip_thermal_suspend(struct device *dev)
58654 
58655 	thermal->chip->control(thermal->regs, false);
58656 
58657-	clk_disable(thermal->pclk);
58658-	clk_disable(thermal->clk);
58659+	clk_bulk_disable(thermal->num_clks, thermal->clks);
58660 
58661-	pinctrl_pm_select_sleep_state(dev);
58662+	if (thermal->tshut_mode == TSHUT_MODE_OTP)
58663+		thermal_pinctrl_select_gpio(thermal);
58664 
58665 	return 0;
58666 }
58667@@ -1399,13 +2031,10 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev)
58668 	int i;
58669 	int error;
58670 
58671-	error = clk_enable(thermal->clk);
58672-	if (error)
58673-		return error;
58674-
58675-	error = clk_enable(thermal->pclk);
58676+	error = clk_bulk_enable(thermal->num_clks, thermal->clks);
58677 	if (error) {
58678-		clk_disable(thermal->clk);
58679+		dev_err(dev, "failed to enable tsadc bulk clks: %d\n",
58680+			error);
58681 		return error;
58682 	}
58683 
58684@@ -1417,7 +2046,7 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev)
58685 	for (i = 0; i < thermal->chip->chn_num; i++) {
58686 		int id = thermal->sensors[i].id;
58687 
58688-		thermal->chip->set_tshut_mode(id, thermal->regs,
58689+		thermal->chip->set_tshut_mode(thermal->grf, id, thermal->regs,
58690 					      thermal->tshut_mode);
58691 
58692 		error = thermal->chip->set_tshut_temp(&thermal->chip->table,
58693@@ -1433,7 +2062,8 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev)
58694 	for (i = 0; i < thermal->chip->chn_num; i++)
58695 		rockchip_thermal_toggle_sensor(&thermal->sensors[i], true);
58696 
58697-	pinctrl_pm_select_default_state(dev);
58698+	if (thermal->tshut_mode == TSHUT_MODE_OTP)
58699+		thermal_pinctrl_select_otp(thermal);
58700 
58701 	return 0;
58702 }
58703@@ -1449,6 +2079,7 @@ static struct platform_driver rockchip_thermal_driver = {
58704 	},
58705 	.probe = rockchip_thermal_probe,
58706 	.remove = rockchip_thermal_remove,
58707+	.shutdown = rockchip_thermal_shutdown,
58708 };
58709 
58710 module_platform_driver(rockchip_thermal_driver);
58711diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
58712index b6dc9003b..b7b690972 100644
58713--- a/drivers/tty/serial/8250/8250.h
58714+++ b/drivers/tty/serial/8250/8250.h
58715@@ -48,6 +48,9 @@ struct uart_8250_dma {
58716 	unsigned char		tx_running;
58717 	unsigned char		tx_err;
58718 	unsigned char		rx_running;
58719+#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
58720+	size_t			rx_index;
58721+#endif
58722 };
58723 
58724 struct old_serial_port {
58725@@ -158,6 +161,9 @@ static inline bool serial8250_set_THRI(struct uart_8250_port *up)
58726 	if (up->ier & UART_IER_THRI)
58727 		return false;
58728 	up->ier |= UART_IER_THRI;
58729+#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
58730+	up->ier |= UART_IER_PTIME;
58731+#endif
58732 	serial_out(up, UART_IER, up->ier);
58733 	return true;
58734 }
58735@@ -167,6 +173,9 @@ static inline bool serial8250_clear_THRI(struct uart_8250_port *up)
58736 	if (!(up->ier & UART_IER_THRI))
58737 		return false;
58738 	up->ier &= ~UART_IER_THRI;
58739+#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
58740+	up->ier &= ~UART_IER_PTIME;
58741+#endif
58742 	serial_out(up, UART_IER, up->ier);
58743 	return true;
58744 }
58745@@ -327,6 +336,9 @@ static inline int is_omap1510_8250(struct uart_8250_port *pt)
58746 #ifdef CONFIG_SERIAL_8250_DMA
58747 extern int serial8250_tx_dma(struct uart_8250_port *);
58748 extern int serial8250_rx_dma(struct uart_8250_port *);
58749+#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
58750+extern int serial8250_start_rx_dma(struct uart_8250_port *);
58751+#endif
58752 extern void serial8250_rx_dma_flush(struct uart_8250_port *);
58753 extern int serial8250_request_dma(struct uart_8250_port *);
58754 extern void serial8250_release_dma(struct uart_8250_port *);
58755@@ -335,10 +347,12 @@ static inline int serial8250_tx_dma(struct uart_8250_port *p)
58756 {
58757 	return -1;
58758 }
58759+#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
58760 static inline int serial8250_rx_dma(struct uart_8250_port *p)
58761 {
58762 	return -1;
58763 }
58764+#endif
58765 static inline void serial8250_rx_dma_flush(struct uart_8250_port *p) { }
58766 static inline int serial8250_request_dma(struct uart_8250_port *p)
58767 {
58768diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
58769index 0a7e9491b..760886a2c 100644
58770--- a/drivers/tty/serial/8250/8250_core.c
58771+++ b/drivers/tty/serial/8250/8250_core.c
58772@@ -565,6 +565,7 @@ static void __init serial8250_isa_init_ports(void)
58773 static void __init
58774 serial8250_register_ports(struct uart_driver *drv, struct device *dev)
58775 {
58776+#ifndef CONFIG_ARCH_ROCKCHIP
58777 	int i;
58778 
58779 	for (i = 0; i < nr_uarts; i++) {
58780@@ -584,6 +585,7 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev)
58781 		serial8250_apply_quirks(up);
58782 		uart_add_one_port(drv, &up->port);
58783 	}
58784+#endif
58785 }
58786 
58787 #ifdef CONFIG_SERIAL_8250_CONSOLE
58788@@ -1031,6 +1033,9 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
58789 		uart->rs485_stop_tx	= up->rs485_stop_tx;
58790 		uart->dma		= up->dma;
58791 
58792+#ifdef CONFIG_ARCH_ROCKCHIP
58793+		uart->port.line		= up->port.line;
58794+#endif
58795 		/* Take tx_loadsz from fifosize if it wasn't set separately */
58796 		if (uart->port.fifosize && !uart->tx_loadsz)
58797 			uart->tx_loadsz = uart->port.fifosize;
58798@@ -1254,7 +1259,11 @@ static void __exit serial8250_exit(void)
58799 #endif
58800 }
58801 
58802+#ifdef CONFIG_ROCKCHIP_THUNDER_BOOT
58803+rootfs_initcall(serial8250_init);
58804+#else
58805 module_init(serial8250_init);
58806+#endif
58807 module_exit(serial8250_exit);
58808 
58809 MODULE_LICENSE("GPL");
58810diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
58811index 33ce4b218..e18bbdd6d 100644
58812--- a/drivers/tty/serial/8250/8250_dma.c
58813+++ b/drivers/tty/serial/8250/8250_dma.c
58814@@ -11,6 +11,12 @@
58815 
58816 #include "8250.h"
58817 
58818+#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
58819+#define MAX_TX_BYTES		64
58820+#define MAX_FIFO_SIZE		64
58821+#define UART_RFL_16550A		0x21
58822+#endif
58823+
58824 static void __dma_tx_complete(void *param)
58825 {
58826 	struct uart_8250_port	*p = param;
58827@@ -40,6 +46,39 @@ static void __dma_tx_complete(void *param)
58828 	spin_unlock_irqrestore(&p->port.lock, flags);
58829 }
58830 
58831+#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
58832+
58833+static void __dma_rx_complete(void *param)
58834+{
58835+	struct uart_8250_port	*p = param;
58836+	struct uart_8250_dma	*dma = p->dma;
58837+	struct tty_port		*tty_port = &p->port.state->port;
58838+	struct dma_tx_state	state;
58839+	unsigned int		count = 0, cur_index = 0;
58840+
58841+	dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
58842+	cur_index = dma->rx_size - state.residue;
58843+
58844+	if (cur_index == dma->rx_index)
58845+		return;
58846+	else if (cur_index > dma->rx_index)
58847+		count = cur_index - dma->rx_index;
58848+	else
58849+		count = dma->rx_size - dma->rx_index;
58850+
58851+	tty_insert_flip_string(tty_port, dma->rx_buf + dma->rx_index, count);
58852+
58853+	if (cur_index < dma->rx_index) {
58854+		tty_insert_flip_string(tty_port, dma->rx_buf, cur_index);
58855+		count += cur_index;
58856+	}
58857+
58858+	p->port.icount.rx += count;
58859+	dma->rx_index = cur_index;
58860+}
58861+
58862+#else
58863+
58864 static void __dma_rx_complete(void *param)
58865 {
58866 	struct uart_8250_port	*p = param;
58867@@ -79,6 +118,8 @@ static void dma_rx_complete(void *param)
58868 	spin_unlock_irqrestore(&p->port.lock, flags);
58869 }
58870 
58871+#endif
58872+
58873 int serial8250_tx_dma(struct uart_8250_port *p)
58874 {
58875 	struct uart_8250_dma		*dma = p->dma;
58876@@ -104,6 +145,12 @@ int serial8250_tx_dma(struct uart_8250_port *p)
58877 		return 0;
58878 	}
58879 
58880+#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
58881+	if (dma->tx_size < MAX_TX_BYTES) {
58882+		ret = -EBUSY;
58883+		goto err;
58884+	}
58885+#endif
58886 	dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
58887 
58888 	desc = dmaengine_prep_slave_single(dma->txchan,
58889@@ -135,6 +182,64 @@ int serial8250_tx_dma(struct uart_8250_port *p)
58890 	return ret;
58891 }
58892 
58893+#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
58894+
58895+int serial8250_rx_dma(struct uart_8250_port *p)
58896+{
58897+	unsigned int rfl, i = 0, fcr = 0, cur_index = 0;
58898+	unsigned char buf[MAX_FIFO_SIZE];
58899+	struct uart_port	*port = &p->port;
58900+	struct tty_port		*tty_port = &p->port.state->port;
58901+	struct dma_tx_state	state;
58902+	struct uart_8250_dma	*dma = p->dma;
58903+
58904+	fcr = UART_FCR_ENABLE_FIFO | UART_FCR_T_TRIG_10 | UART_FCR_R_TRIG_11;
58905+	serial_port_out(port, UART_FCR, fcr);
58906+
58907+	do {
58908+		dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
58909+		cur_index = dma->rx_size - state.residue;
58910+	} while (cur_index % dma->rxconf.src_maxburst);
58911+
58912+	rfl = serial_port_in(port, UART_RFL_16550A);
58913+	while (i < rfl)
58914+		buf[i++] = serial_port_in(port, UART_RX);
58915+
58916+	__dma_rx_complete(p);
58917+
58918+	tty_insert_flip_string(tty_port, buf, i);
58919+	p->port.icount.rx += i;
58920+	tty_flip_buffer_push(tty_port);
58921+
58922+	if (fcr)
58923+		serial_port_out(port, UART_FCR, p->fcr);
58924+	return 0;
58925+}
58926+
58927+int serial8250_start_rx_dma(struct uart_8250_port *p)
58928+{
58929+	struct uart_8250_dma		*dma = p->dma;
58930+	struct dma_async_tx_descriptor	*desc;
58931+
58932+	desc = dmaengine_prep_dma_cyclic(dma->rxchan, dma->rx_addr,
58933+					 dma->rx_size, dma->rx_size,
58934+					 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT |
58935+					 DMA_CTRL_ACK);
58936+	if (!desc)
58937+		return -EBUSY;
58938+
58939+	dma->rx_running = 1;
58940+	desc->callback = NULL;
58941+	desc->callback_param = NULL;
58942+
58943+	dma->rx_cookie = dmaengine_submit(desc);
58944+	dma_async_issue_pending(dma->rxchan);
58945+	dma->rx_index = 0;
58946+	return 0;
58947+}
58948+
58949+#else
58950+
58951 int serial8250_rx_dma(struct uart_8250_port *p)
58952 {
58953 	struct uart_8250_dma		*dma = p->dma;
58954@@ -160,6 +265,8 @@ int serial8250_rx_dma(struct uart_8250_port *p)
58955 	return 0;
58956 }
58957 
58958+#endif
58959+
58960 void serial8250_rx_dma_flush(struct uart_8250_port *p)
58961 {
58962 	struct uart_8250_dma *dma = p->dma;
58963@@ -187,11 +294,19 @@ int serial8250_request_dma(struct uart_8250_port *p)
58964 	dma->rxconf.direction		= DMA_DEV_TO_MEM;
58965 	dma->rxconf.src_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
58966 	dma->rxconf.src_addr		= rx_dma_addr + UART_RX;
58967-
58968+#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
58969+	if ((p->port.fifosize / 4) < 16)
58970+		dma->rxconf.src_maxburst = p->port.fifosize / 4;
58971+	else
58972+		dma->rxconf.src_maxburst = 16;
58973+#endif
58974 	dma->txconf.direction		= DMA_MEM_TO_DEV;
58975 	dma->txconf.dst_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
58976 	dma->txconf.dst_addr		= tx_dma_addr + UART_TX;
58977 
58978+#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
58979+	dma->txconf.dst_maxburst	= 16;
58980+#endif
58981 	dma_cap_zero(mask);
58982 	dma_cap_set(DMA_SLAVE, mask);
58983 
58984@@ -214,50 +329,51 @@ int serial8250_request_dma(struct uart_8250_port *p)
58985 
58986 	dmaengine_slave_config(dma->rxchan, &dma->rxconf);
58987 
58988-	/* Get a channel for TX */
58989-	dma->txchan = dma_request_slave_channel_compat(mask,
58990-						       dma->fn, dma->tx_param,
58991-						       p->port.dev, "tx");
58992-	if (!dma->txchan) {
58993-		ret = -ENODEV;
58994-		goto release_rx;
58995-	}
58996-
58997-	/* 8250 tx dma requires dmaengine driver to support terminate */
58998-	ret = dma_get_slave_caps(dma->txchan, &caps);
58999-	if (ret)
59000-		goto err;
59001-	if (!caps.cmd_terminate) {
59002-		ret = -EINVAL;
59003-		goto err;
59004-	}
59005-
59006-	dmaengine_slave_config(dma->txchan, &dma->txconf);
59007-
59008 	/* RX buffer */
59009+#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
59010+	if (!dma->rx_size)
59011+		dma->rx_size = PAGE_SIZE * 2;
59012+#else
59013 	if (!dma->rx_size)
59014 		dma->rx_size = PAGE_SIZE;
59015+#endif
59016 
59017 	dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
59018 					&dma->rx_addr, GFP_KERNEL);
59019 	if (!dma->rx_buf) {
59020 		ret = -ENOMEM;
59021-		goto err;
59022+		goto release_rx;
59023 	}
59024 
59025-	/* TX buffer */
59026-	dma->tx_addr = dma_map_single(dma->txchan->device->dev,
59027-					p->port.state->xmit.buf,
59028-					UART_XMIT_SIZE,
59029-					DMA_TO_DEVICE);
59030-	if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
59031-		dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
59032-				  dma->rx_buf, dma->rx_addr);
59033-		ret = -ENOMEM;
59034-		goto err;
59035-	}
59036+	/* Get a channel for TX */
59037+	dma->txchan = dma_request_slave_channel_compat(mask,
59038+						       dma->fn, dma->tx_param,
59039+						       p->port.dev, "tx");
59040+	if (dma->txchan) {
59041+		dmaengine_slave_config(dma->txchan, &dma->txconf);
59042+
59043+		/* TX buffer */
59044+		dma->tx_addr = dma_map_single(dma->txchan->device->dev,
59045+						p->port.state->xmit.buf,
59046+						UART_XMIT_SIZE,
59047+						DMA_TO_DEVICE);
59048+		if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
59049+			dma_free_coherent(dma->rxchan->device->dev,
59050+					  dma->rx_size, dma->rx_buf,
59051+					  dma->rx_addr);
59052+			dma_release_channel(dma->txchan);
59053+			dma->txchan = NULL;
59054+		}
59055 
59056-	dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");
59057+		dev_info_ratelimited(p->port.dev, "got rx and tx dma channels\n");
59058+	} else {
59059+		dev_info_ratelimited(p->port.dev, "got rx dma channels only\n");
59060+ 	}
59061+
59062+#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
59063+	/* start dma for rx*/
59064+	serial8250_start_rx_dma(p);
59065+#endif
59066 
59067 	return 0;
59068 err:
59069@@ -282,13 +398,17 @@ void serial8250_release_dma(struct uart_8250_port *p)
59070 	dma_release_channel(dma->rxchan);
59071 	dma->rxchan = NULL;
59072 
59073-	/* Release TX resources */
59074-	dmaengine_terminate_sync(dma->txchan);
59075-	dma_unmap_single(dma->txchan->device->dev, dma->tx_addr,
59076-			 UART_XMIT_SIZE, DMA_TO_DEVICE);
59077-	dma_release_channel(dma->txchan);
59078-	dma->txchan = NULL;
59079-	dma->tx_running = 0;
59080+#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
59081+	dma->rx_running = 0;
59082+#endif
59083+	if (dma->txchan) {
59084+		dmaengine_terminate_sync(dma->txchan);
59085+		dma_unmap_single(dma->txchan->device->dev, dma->tx_addr,
59086+				 UART_XMIT_SIZE, DMA_TO_DEVICE);
59087+		dma_release_channel(dma->txchan);
59088+		dma->txchan = NULL;
59089+		dma->tx_running = 0;
59090+	}
59091 
59092 	dev_dbg_ratelimited(p->port.dev, "dma channels released\n");
59093 }
59094diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
59095index ace221afe..5968b7714 100644
59096--- a/drivers/tty/serial/8250/8250_dw.c
59097+++ b/drivers/tty/serial/8250/8250_dw.c
59098@@ -29,10 +29,15 @@
59099 
59100 #include <asm/byteorder.h>
59101 
59102+#ifdef MODULE
59103+#include "8250_dwlib.c"
59104+#else
59105 #include "8250_dwlib.h"
59106+#endif
59107 
59108 /* Offsets for the DesignWare specific registers */
59109 #define DW_UART_USR	0x1f /* UART Status Register */
59110+#define DW_UART_RFL	0x21 /* UART Receive Fifo Level Register */
59111 
59112 /* DesignWare specific register fields */
59113 #define DW_UART_MCR_SIRE		BIT(6)
59114@@ -49,6 +54,11 @@ struct dw8250_data {
59115 	struct work_struct	clk_work;
59116 	struct reset_control	*rst;
59117 
59118+#ifdef CONFIG_ARCH_ROCKCHIP
59119+	int			irq;
59120+	int			irq_wake;
59121+	int			enable_wakeup;
59122+#endif
59123 	unsigned int		skip_autocfg:1;
59124 	unsigned int		uart_16550_compatible:1;
59125 };
59126@@ -238,10 +248,9 @@ static unsigned int dw8250_serial_in32be(struct uart_port *p, int offset)
59127 
59128 static int dw8250_handle_irq(struct uart_port *p)
59129 {
59130-	struct uart_8250_port *up = up_to_u8250p(p);
59131 	struct dw8250_data *d = to_dw8250_data(p->private_data);
59132 	unsigned int iir = p->serial_in(p, UART_IIR);
59133-	unsigned int status;
59134+	unsigned int status, usr, rfl;
59135 	unsigned long flags;
59136 
59137 	/*
59138@@ -250,15 +259,14 @@ static int dw8250_handle_irq(struct uart_port *p)
59139 	 * data available.  If we see such a case then we'll do a bogus
59140 	 * read.  If we don't do this then the "RX TIMEOUT" interrupt will
59141 	 * fire forever.
59142-	 *
59143-	 * This problem has only been observed so far when not in DMA mode
59144-	 * so we limit the workaround only to non-DMA mode.
59145 	 */
59146-	if (!up->dma && ((iir & 0x3f) == UART_IIR_RX_TIMEOUT)) {
59147+	if ((iir & 0x3f) == UART_IIR_RX_TIMEOUT) {
59148 		spin_lock_irqsave(&p->lock, flags);
59149+		usr = p->serial_in(p, d->usr_reg);
59150 		status = p->serial_in(p, UART_LSR);
59151 
59152-		if (!(status & (UART_LSR_DR | UART_LSR_BI)))
59153+		rfl = p->serial_in(p, DW_UART_RFL);
59154+		if (!(status & (UART_LSR_DR | UART_LSR_BI)) && !(usr & 0x1) && (rfl == 0))
59155 			(void) p->serial_in(p, UART_RX);
59156 
59157 		spin_unlock_irqrestore(&p->lock, flags);
59158@@ -332,12 +340,49 @@ dw8250_do_pm(struct uart_port *port, unsigned int state, unsigned int old)
59159 static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
59160 			       struct ktermios *old)
59161 {
59162+#ifndef CONFIG_ARCH_ROCKCHIP
59163 	unsigned long newrate = tty_termios_baud_rate(termios) * 16;
59164+#endif
59165 	struct dw8250_data *d = to_dw8250_data(p->private_data);
59166 	long rate;
59167+#ifdef CONFIG_ARCH_ROCKCHIP
59168+	unsigned int baud = tty_termios_baud_rate(termios);
59169+	unsigned int rate_temp, diff;
59170+#endif
59171 	int ret;
59172 
59173 	clk_disable_unprepare(d->clk);
59174+#ifdef CONFIG_ARCH_ROCKCHIP
59175+	if (baud <= 115200)
59176+		rate = 24000000;
59177+	else if (baud == 230400)
59178+		rate = baud * 16 * 2;
59179+	else if (baud == 1152000)
59180+		rate = baud * 16 * 2;
59181+	else
59182+		rate = baud * 16;
59183+
59184+	ret = clk_set_rate(d->clk, rate);
59185+	rate_temp = clk_get_rate(d->clk);
59186+	diff = rate * 20 / 1000;
59187+	/*
59188+	 * If rate_temp is not equal to rate, is means fractional frequency
59189+	 * division is failed. Then use Integer frequency division, and
59190+	 * the baud rate error must be under -+2%
59191+	 */
59192+	if ((rate_temp < rate) && ((rate - rate_temp) > diff)) {
59193+		ret = clk_set_rate(d->clk, rate + diff);
59194+		rate_temp = clk_get_rate(d->clk);
59195+		if ((rate_temp < rate) && ((rate - rate_temp) > diff))
59196+			dev_info(p->dev, "set rate:%ld, but get rate:%d\n",
59197+				 rate, rate_temp);
59198+		else if ((rate < rate_temp) && ((rate_temp - rate) > diff))
59199+			dev_info(p->dev, "set rate:%ld, but get rate:%d\n",
59200+				 rate, rate_temp);
59201+	}
59202+	if (!ret)
59203+		p->uartclk = rate;
59204+#else
59205 	rate = clk_round_rate(d->clk, newrate);
59206 	if (rate > 0) {
59207 		/*
59208@@ -351,6 +396,7 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
59209 		if (ret)
59210 			swap(p->uartclk, rate);
59211 	}
59212+#endif
59213 	clk_prepare_enable(d->clk);
59214 
59215 	p->status &= ~UPSTAT_AUTOCTS;
59216@@ -483,6 +529,9 @@ static int dw8250_probe(struct platform_device *pdev)
59217 	data->data.dma.fn = dw8250_fallback_dma_filter;
59218 	data->usr_reg = DW_UART_USR;
59219 	p->private_data = &data->data;
59220+#ifdef CONFIG_ARCH_ROCKCHIP
59221+	data->irq	= irq;
59222+#endif
59223 
59224 	data->uart_16550_compatible = device_property_read_bool(dev,
59225 						"snps,uart-16550-compatible");
59226@@ -522,6 +571,13 @@ static int dw8250_probe(struct platform_device *pdev)
59227 		data->msr_mask_off |= UART_MSR_TERI;
59228 	}
59229 
59230+#ifdef CONFIG_ARCH_ROCKCHIP
59231+	if (device_property_read_bool(p->dev, "wakeup-source"))
59232+		data->enable_wakeup = 1;
59233+	else
59234+		data->enable_wakeup = 0;
59235+#endif
59236+
59237 	/* Always ask for fixed clock rate from a property. */
59238 	device_property_read_u32(dev, "clock-frequency", &p->uartclk);
59239 
59240@@ -603,6 +659,10 @@ static int dw8250_probe(struct platform_device *pdev)
59241 			queue_work(system_unbound_wq, &data->clk_work);
59242 	}
59243 
59244+#ifdef CONFIG_ARCH_ROCKCHIP
59245+	if (data->enable_wakeup)
59246+		device_init_wakeup(&pdev->dev, true);
59247+#endif
59248 	platform_set_drvdata(pdev, data);
59249 
59250 	pm_runtime_set_active(dev);
59251@@ -645,6 +705,10 @@ static int dw8250_remove(struct platform_device *pdev)
59252 
59253 	pm_runtime_disable(dev);
59254 	pm_runtime_put_noidle(dev);
59255+#ifdef CONFIG_ARCH_ROCKCHIP
59256+	if (data->enable_wakeup)
59257+		device_init_wakeup(&pdev->dev, false);
59258+#endif
59259 
59260 	return 0;
59261 }
59262@@ -655,6 +719,13 @@ static int dw8250_suspend(struct device *dev)
59263 	struct dw8250_data *data = dev_get_drvdata(dev);
59264 
59265 	serial8250_suspend_port(data->data.line);
59266+#ifdef CONFIG_ARCH_ROCKCHIP
59267+	if (device_may_wakeup(dev)) {
59268+		if (!enable_irq_wake(data->irq))
59269+			data->irq_wake = 1;
59270+		return 0;
59271+	}
59272+#endif
59273 
59274 	return 0;
59275 }
59276@@ -664,6 +735,15 @@ static int dw8250_resume(struct device *dev)
59277 	struct dw8250_data *data = dev_get_drvdata(dev);
59278 
59279 	serial8250_resume_port(data->data.line);
59280+#ifdef CONFIG_ARCH_ROCKCHIP
59281+	if (device_may_wakeup(dev)) {
59282+		if (data->irq_wake) {
59283+			disable_irq_wake(data->irq);
59284+			data->irq_wake = 0;
59285+		}
59286+		return 0;
59287+	}
59288+#endif
59289 
59290 	return 0;
59291 }
59292diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c
59293index 6d6a78eea..fee4f2933 100644
59294--- a/drivers/tty/serial/8250/8250_dwlib.c
59295+++ b/drivers/tty/serial/8250/8250_dwlib.c
59296@@ -106,6 +106,15 @@ void dw8250_setup_port(struct uart_port *p)
59297 	}
59298 
59299 	reg = dw8250_readl_ext(p, DW_UART_CPR);
59300+
59301+#ifdef CONFIG_ARCH_ROCKCHIP
59302+	/*
59303+	 * The UART CPR may be 0 of some rockchip soc,
59304+	 * but it supports fifo and AFC, fifo entry is 32 default.
59305+	 */
59306+	if (reg == 0)
59307+		reg = 0x00023ff2;
59308+#endif
59309 	if (!reg)
59310 		return;
59311 
59312@@ -114,6 +123,9 @@ void dw8250_setup_port(struct uart_port *p)
59313 		p->type = PORT_16550A;
59314 		p->flags |= UPF_FIXED_TYPE;
59315 		p->fifosize = DW_UART_CPR_FIFO_SIZE(reg);
59316+#ifdef CONFIG_ARCH_ROCKCHIP
59317+		up->tx_loadsz = p->fifosize * 3 / 4;
59318+#endif
59319 		up->capabilities = UART_CAP_FIFO;
59320 	}
59321 
59322diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
59323index 1f231fcda..1cb78f8ea 100644
59324--- a/drivers/tty/serial/8250/8250_port.c
59325+++ b/drivers/tty/serial/8250/8250_port.c
59326@@ -122,8 +122,7 @@ static const struct serial8250_config uart_config[] = {
59327 		.name		= "16C950/954",
59328 		.fifo_size	= 128,
59329 		.tx_loadsz	= 128,
59330-		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01,
59331-		.rxtrig_bytes	= {16, 32, 112, 120},
59332+		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
59333 		/* UART_CAP_EFR breaks billionon CF bluetooth card. */
59334 		.flags		= UART_CAP_FIFO | UART_CAP_SLEEP,
59335 	},
59336@@ -1537,8 +1536,13 @@ static inline void __start_tx(struct uart_port *port)
59337 {
59338 	struct uart_8250_port *up = up_to_u8250p(port);
59339 
59340+#ifdef CONFIG_ARCH_ROCKCHIP
59341+	if (up->dma && up->dma->txchan && !up->dma->tx_dma(up))
59342+		return;
59343+#else
59344 	if (up->dma && !up->dma->tx_dma(up))
59345 		return;
59346+#endif
59347 
59348 	if (serial8250_set_THRI(up)) {
59349 		if (up->bugs & UART_BUG_TXEN) {
59350@@ -1868,6 +1872,12 @@ EXPORT_SYMBOL_GPL(serial8250_modem_status);
59351 
59352 static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
59353 {
59354+#ifdef CONFIG_ARCH_ROCKCHIP
59355+	if ((iir & 0xf) != UART_IIR_RX_TIMEOUT)
59356+		return 0;
59357+	else
59358+		return up->dma->rx_dma(up);
59359+#else
59360 	switch (iir & 0x3f) {
59361 	case UART_IIR_RDI:
59362 		if (!up->dma->rx_running)
59363@@ -1879,6 +1889,7 @@ static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
59364 		return true;
59365 	}
59366 	return up->dma->rx_dma(up);
59367+#endif
59368 }
59369 
59370 /*
59371@@ -1889,7 +1900,9 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
59372 	unsigned char status;
59373 	unsigned long flags;
59374 	struct uart_8250_port *up = up_to_u8250p(port);
59375+#ifndef CONFIG_ARCH_ROCKCHIP
59376 	bool skip_rx = false;
59377+#endif
59378 
59379 	if (iir & UART_IIR_NO_INT)
59380 		return 0;
59381@@ -1898,6 +1911,17 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
59382 
59383 	status = serial_port_in(port, UART_LSR);
59384 
59385+#ifdef CONFIG_ARCH_ROCKCHIP
59386+	if (status & (UART_LSR_DR | UART_LSR_BI)) {
59387+		int dma_err = -1;
59388+
59389+		if (up->dma && up->dma->rxchan)
59390+			dma_err = handle_rx_dma(up, iir);
59391+
59392+		if (!up->dma || dma_err)
59393+			status = serial8250_rx_chars(up, status);
59394+	}
59395+#else
59396 	/*
59397 	 * If port is stopped and there are no error conditions in the
59398 	 * FIFO, then don't drain the FIFO, as this may lead to TTY buffer
59399@@ -1915,11 +1939,34 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
59400 		if (!up->dma || handle_rx_dma(up, iir))
59401 			status = serial8250_rx_chars(up, status);
59402 	}
59403+#endif
59404 	serial8250_modem_status(up);
59405-	if ((!up->dma || up->dma->tx_err) && (status & UART_LSR_THRE) &&
59406-		(up->ier & UART_IER_THRI))
59407+#ifdef CONFIG_ARCH_ROCKCHIP
59408+	if ((!up->dma || (up->dma && (!up->dma->txchan || up->dma->tx_err))) &&
59409+	    ((iir & 0xf) == UART_IIR_THRI))
59410 		serial8250_tx_chars(up);
59411-
59412+#else
59413+	if ((!up->dma || (up->dma && up->dma->tx_err)) &&
59414+	    (status & UART_LSR_THRE))
59415+		serial8250_tx_chars(up);
59416+#endif
59417+ 
59418+#ifdef CONFIG_ARCH_ROCKCHIP
59419+	if (status & UART_LSR_BRK_ERROR_BITS) {
59420+
59421+		if (status & UART_LSR_OE)
59422+			pr_err("%s: Overrun error!\n", port->name);
59423+		if (status & UART_LSR_PE)
59424+			pr_err("%s: Parity error!\n", port->name);
59425+		if (status & UART_LSR_FE)
59426+			pr_err("%s: Frame error!\n", port->name);
59427+		if (status & UART_LSR_BI)
59428+			pr_err("%s: Break interrupt!\n", port->name);
59429+
59430+		pr_err("%s: maybe rx pin is low or baudrate is not correct!\n",
59431+			port->name);
59432+	}
59433+#endif
59434 	uart_unlock_and_check_sysrq(port, flags);
59435 	return 1;
59436 }
59437@@ -2393,7 +2440,11 @@ int serial8250_do_startup(struct uart_port *port)
59438 		if (uart_console(port))
59439 			msg = "forbid DMA for kernel console";
59440 		else if (serial8250_request_dma(up))
59441+#ifdef CONFIG_ARCH_ROCKCHIP
59442+			msg = "failed to request DMA, use interrupt mode";
59443+#else
59444 			msg = "failed to request DMA";
59445+#endif
59446 		if (msg) {
59447 			dev_warn_ratelimited(port->dev, "%s\n", msg);
59448 			up->dma = NULL;
59449@@ -2587,6 +2638,10 @@ void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud,
59450 {
59451 	struct uart_8250_port *up = up_to_u8250p(port);
59452 
59453+#ifdef CONFIG_ARCH_ROCKCHIP
59454+	serial_port_out(port, UART_MCR, UART_MCR_LOOP);
59455+#endif
59456+
59457 	/* Workaround to enable 115200 baud on OMAP1510 internal ports */
59458 	if (is_omap1510_8250(up)) {
59459 		if (baud == 115200) {
59460@@ -2606,6 +2661,17 @@ void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud,
59461 		serial_port_out(port, UART_LCR, up->lcr | UART_LCR_DLAB);
59462 
59463 	serial_dl_write(up, quot);
59464+#ifdef CONFIG_ARCH_ROCKCHIP
59465+	if (quot != serial_dl_read(up))
59466+		dev_warn_ratelimited(port->dev, "ttyS%d set divisor fail, quot:%d != dll,dlh:%d\n",
59467+					serial_index(port), quot, serial_dl_read(up));
59468+#endif
59469+	if (port->type != PORT_16750)
59470+		serial_port_out(port, UART_LCR, up->lcr);	/* reset DLAB */
59471+
59472+#ifdef CONFIG_ARCH_ROCKCHIP
59473+	serial_port_out(port, UART_MCR, up->mcr);
59474+#endif
59475 }
59476 EXPORT_SYMBOL_GPL(serial8250_do_set_divisor);
59477 
59478@@ -2785,6 +2851,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
59479 	if ((termios->c_cflag & CREAD) == 0)
59480 		port->ignore_status_mask |= UART_LSR_DR;
59481 
59482+#ifndef CONFIG_ARCH_ROCKCHIP
59483 	/*
59484 	 * CTS flow control flag and modem status interrupts
59485 	 */
59486@@ -2798,6 +2865,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
59487 		up->ier |= UART_IER_RTOIE;
59488 
59489 	serial_port_out(port, UART_IER, up->ier);
59490+#endif
59491 
59492 	if (up->capabilities & UART_CAP_EFR) {
59493 		unsigned char efr = 0;
59494@@ -2816,16 +2884,25 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
59495 			serial_port_out(port, UART_EFR, efr);
59496 	}
59497 
59498+#ifdef CONFIG_ARCH_ROCKCHIP
59499+	/* Reset uart to make sure it is idle, then set baud rate */
59500+	serial_port_out(port, 0x88 >> 2, 0x7);
59501+#endif
59502+
59503 	serial8250_set_divisor(port, baud, quot, frac);
59504 
59505+#ifdef CONFIG_ARCH_ROCKCHIP
59506+	up->fcr = UART_FCR_ENABLE_FIFO | UART_FCR_T_TRIG_10 | UART_FCR_R_TRIG_10;
59507+#endif
59508 	/*
59509 	 * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR
59510 	 * is written without DLAB set, this mode will be disabled.
59511 	 */
59512-	if (port->type == PORT_16750)
59513+	if (port->type == PORT_16750) {
59514 		serial_port_out(port, UART_FCR, up->fcr);
59515+		serial_port_out(port, UART_LCR, up->lcr);	/* reset DLAB */
59516+	}
59517 
59518-	serial_port_out(port, UART_LCR, up->lcr);	/* reset DLAB */
59519 	if (port->type != PORT_16750) {
59520 		/* emulated UARTs (Lucent Venus 167x) need two steps */
59521 		if (up->fcr & UART_FCR_ENABLE_FIFO)
59522@@ -2833,6 +2910,23 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
59523 		serial_port_out(port, UART_FCR, up->fcr);	/* set fcr */
59524 	}
59525 	serial8250_set_mctrl(port, port->mctrl);
59526+
59527+#ifndef CONFIG_ARCH_ROCKCHIP
59528+	/*
59529+	 * CTS flow control flag and modem status interrupts
59530+	 */
59531+	up->ier &= ~UART_IER_MSI;
59532+	if (!(up->bugs & UART_BUG_NOMSR) &&
59533+			UART_ENABLE_MS(&up->port, termios->c_cflag))
59534+		up->ier |= UART_IER_MSI;
59535+	if (up->capabilities & UART_CAP_UUE)
59536+		up->ier |= UART_IER_UUE;
59537+	if (up->capabilities & UART_CAP_RTOIE)
59538+		up->ier |= UART_IER_RTOIE;
59539+
59540+	serial_port_out(port, UART_IER, up->ier);
59541+#endif
59542+
59543 	spin_unlock_irqrestore(&port->lock, flags);
59544 	serial8250_rpm_put(up);
59545 
59546diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
59547index 070b838c7..ab24ec3fb 100644
59548--- a/drivers/usb/class/cdc-acm.c
59549+++ b/drivers/usb/class/cdc-acm.c
59550@@ -340,9 +340,6 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
59551 			acm->iocount.overrun++;
59552 		spin_unlock_irqrestore(&acm->read_lock, flags);
59553 
59554-		if (newctrl & ACM_CTRL_BRK)
59555-			tty_flip_buffer_push(&acm->port);
59556-
59557 		if (difference)
59558 			wake_up_all(&acm->wioctl);
59559 
59560@@ -478,16 +475,11 @@ static int acm_submit_read_urbs(struct acm *acm, gfp_t mem_flags)
59561 
59562 static void acm_process_read_urb(struct acm *acm, struct urb *urb)
59563 {
59564-	unsigned long flags;
59565-
59566 	if (!urb->actual_length)
59567 		return;
59568 
59569-	spin_lock_irqsave(&acm->read_lock, flags);
59570 	tty_insert_flip_string(&acm->port, urb->transfer_buffer,
59571 			urb->actual_length);
59572-	spin_unlock_irqrestore(&acm->read_lock, flags);
59573-
59574 	tty_flip_buffer_push(&acm->port);
59575 }
59576 
59577@@ -734,8 +726,7 @@ static void acm_port_destruct(struct tty_port *port)
59578 {
59579 	struct acm *acm = container_of(port, struct acm, port);
59580 
59581-	if (acm->minor != ACM_MINOR_INVALID)
59582-		acm_release_minor(acm);
59583+	acm_release_minor(acm);
59584 	usb_put_intf(acm->control);
59585 	kfree(acm->country_codes);
59586 	kfree(acm);
59587@@ -1352,10 +1343,8 @@ static int acm_probe(struct usb_interface *intf,
59588 	usb_get_intf(acm->control); /* undone in destruct() */
59589 
59590 	minor = acm_alloc_minor(acm);
59591-	if (minor < 0) {
59592-		acm->minor = ACM_MINOR_INVALID;
59593+	if (minor < 0)
59594 		goto alloc_fail1;
59595-	}
59596 
59597 	acm->minor = minor;
59598 	acm->dev = usb_dev;
59599diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
59600index 3aa7f0a3a..8aef5eb76 100644
59601--- a/drivers/usb/class/cdc-acm.h
59602+++ b/drivers/usb/class/cdc-acm.h
59603@@ -22,8 +22,6 @@
59604 #define ACM_TTY_MAJOR		166
59605 #define ACM_TTY_MINORS		256
59606 
59607-#define ACM_MINOR_INVALID	ACM_TTY_MINORS
59608-
59609 /*
59610  * Requests.
59611  */
59612diff --git a/drivers/usb/common/Kconfig b/drivers/usb/common/Kconfig
59613index b85662243..5e8a04e3d 100644
59614--- a/drivers/usb/common/Kconfig
59615+++ b/drivers/usb/common/Kconfig
59616@@ -6,7 +6,8 @@ config USB_COMMON
59617 
59618 config USB_LED_TRIG
59619 	bool "USB LED Triggers"
59620-	depends on LEDS_CLASS && USB_COMMON && LEDS_TRIGGERS
59621+	depends on LEDS_CLASS && LEDS_TRIGGERS
59622+	select USB_COMMON
59623 	help
59624 	  This option adds LED triggers for USB host and/or gadget activity.
59625 
59626diff --git a/drivers/usb/common/debug.c b/drivers/usb/common/debug.c
59627index f0c0e8db7..a76a086b9 100644
59628--- a/drivers/usb/common/debug.c
59629+++ b/drivers/usb/common/debug.c
59630@@ -207,28 +207,30 @@ static void usb_decode_set_isoch_delay(__u8 wValue, char *str, size_t size)
59631 	snprintf(str, size, "Set Isochronous Delay(Delay = %d ns)", wValue);
59632 }
59633 
59634-static void usb_decode_ctrl_generic(char *str, size_t size, __u8 bRequestType,
59635-				    __u8 bRequest, __u16 wValue, __u16 wIndex,
59636-				    __u16 wLength)
59637-{
59638-	u8 recip = bRequestType & USB_RECIP_MASK;
59639-	u8 type = bRequestType & USB_TYPE_MASK;
59640-
59641-	snprintf(str, size,
59642-		 "Type=%s Recipient=%s Dir=%s bRequest=%u wValue=%u wIndex=%u wLength=%u",
59643-		 (type == USB_TYPE_STANDARD)    ? "Standard" :
59644-		 (type == USB_TYPE_VENDOR)      ? "Vendor" :
59645-		 (type == USB_TYPE_CLASS)       ? "Class" : "Unknown",
59646-		 (recip == USB_RECIP_DEVICE)    ? "Device" :
59647-		 (recip == USB_RECIP_INTERFACE) ? "Interface" :
59648-		 (recip == USB_RECIP_ENDPOINT)  ? "Endpoint" : "Unknown",
59649-		 (bRequestType & USB_DIR_IN)    ? "IN" : "OUT",
59650-		 bRequest, wValue, wIndex, wLength);
59651-}
59652-
59653-static void usb_decode_ctrl_standard(char *str, size_t size, __u8 bRequestType,
59654-				     __u8 bRequest, __u16 wValue, __u16 wIndex,
59655-				     __u16 wLength)
59656+/**
59657+ * usb_decode_ctrl - Returns human readable representation of control request.
59658+ * @str: buffer to return a human-readable representation of control request.
59659+ *       This buffer should have about 200 bytes.
59660+ * @size: size of str buffer.
59661+ * @bRequestType: matches the USB bmRequestType field
59662+ * @bRequest: matches the USB bRequest field
59663+ * @wValue: matches the USB wValue field (CPU byte order)
59664+ * @wIndex: matches the USB wIndex field (CPU byte order)
59665+ * @wLength: matches the USB wLength field (CPU byte order)
59666+ *
59667+ * Function returns decoded, formatted and human-readable description of
59668+ * control request packet.
59669+ *
59670+ * The usage scenario for this is for tracepoints, so function as a return
59671+ * use the same value as in parameters. This approach allows to use this
59672+ * function in TP_printk
59673+ *
59674+ * Important: wValue, wIndex, wLength parameters before invoking this function
59675+ * should be processed by le16_to_cpu macro.
59676+ */
59677+const char *usb_decode_ctrl(char *str, size_t size, __u8 bRequestType,
59678+			    __u8 bRequest, __u16 wValue, __u16 wIndex,
59679+			    __u16 wLength)
59680 {
59681 	switch (bRequest) {
59682 	case USB_REQ_GET_STATUS:
59683@@ -269,48 +271,14 @@ static void usb_decode_ctrl_standard(char *str, size_t size, __u8 bRequestType,
59684 		usb_decode_set_isoch_delay(wValue, str, size);
59685 		break;
59686 	default:
59687-		usb_decode_ctrl_generic(str, size, bRequestType, bRequest,
59688-					wValue, wIndex, wLength);
59689-		break;
59690-	}
59691-}
59692-
59693-/**
59694- * usb_decode_ctrl - Returns human readable representation of control request.
59695- * @str: buffer to return a human-readable representation of control request.
59696- *       This buffer should have about 200 bytes.
59697- * @size: size of str buffer.
59698- * @bRequestType: matches the USB bmRequestType field
59699- * @bRequest: matches the USB bRequest field
59700- * @wValue: matches the USB wValue field (CPU byte order)
59701- * @wIndex: matches the USB wIndex field (CPU byte order)
59702- * @wLength: matches the USB wLength field (CPU byte order)
59703- *
59704- * Function returns decoded, formatted and human-readable description of
59705- * control request packet.
59706- *
59707- * The usage scenario for this is for tracepoints, so function as a return
59708- * use the same value as in parameters. This approach allows to use this
59709- * function in TP_printk
59710- *
59711- * Important: wValue, wIndex, wLength parameters before invoking this function
59712- * should be processed by le16_to_cpu macro.
59713- */
59714-const char *usb_decode_ctrl(char *str, size_t size, __u8 bRequestType,
59715-			    __u8 bRequest, __u16 wValue, __u16 wIndex,
59716-			    __u16 wLength)
59717-{
59718-	switch (bRequestType & USB_TYPE_MASK) {
59719-	case USB_TYPE_STANDARD:
59720-		usb_decode_ctrl_standard(str, size, bRequestType, bRequest,
59721-					 wValue, wIndex, wLength);
59722-		break;
59723-	case USB_TYPE_VENDOR:
59724-	case USB_TYPE_CLASS:
59725-	default:
59726-		usb_decode_ctrl_generic(str, size, bRequestType, bRequest,
59727-					wValue, wIndex, wLength);
59728-		break;
59729+		snprintf(str, size, "%02x %02x %02x %02x %02x %02x %02x %02x",
59730+			 bRequestType, bRequest,
59731+			 (u8)(cpu_to_le16(wValue) & 0xff),
59732+			 (u8)(cpu_to_le16(wValue) >> 8),
59733+			 (u8)(cpu_to_le16(wIndex) & 0xff),
59734+			 (u8)(cpu_to_le16(wIndex) >> 8),
59735+			 (u8)(cpu_to_le16(wLength) & 0xff),
59736+			 (u8)(cpu_to_le16(wLength) >> 8));
59737 	}
59738 
59739 	return str;
59740diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
59741index 39f1eca60..562a730be 100644
59742--- a/drivers/usb/core/config.c
59743+++ b/drivers/usb/core/config.c
59744@@ -406,7 +406,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
59745 	 * the USB-2 spec requires such endpoints to have wMaxPacketSize = 0
59746 	 * (see the end of section 5.6.3), so don't warn about them.
59747 	 */
59748-	maxp = le16_to_cpu(endpoint->desc.wMaxPacketSize);
59749+	maxp = usb_endpoint_maxp(&endpoint->desc);
59750 	if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) {
59751 		dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n",
59752 		    cfgno, inum, asnum, d->bEndpointAddress);
59753@@ -422,9 +422,9 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
59754 		maxpacket_maxes = full_speed_maxpacket_maxes;
59755 		break;
59756 	case USB_SPEED_HIGH:
59757-		/* Multiple-transactions bits are allowed only for HS periodic endpoints */
59758+		/* Bits 12..11 are allowed only for HS periodic endpoints */
59759 		if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) {
59760-			i = maxp & USB_EP_MAXP_MULT_MASK;
59761+			i = maxp & (BIT(12) | BIT(11));
59762 			maxp &= ~i;
59763 		}
59764 		fallthrough;
59765diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
59766index 4dfa44d6c..6253dce08 100644
59767--- a/drivers/usb/core/driver.c
59768+++ b/drivers/usb/core/driver.c
59769@@ -34,6 +34,7 @@
59770 
59771 #include "usb.h"
59772 
59773+#include <trace/hooks/usb.h>
59774 
59775 /*
59776  * Adds a new dynamic USBdevice ID to this driver,
59777@@ -1403,11 +1404,16 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
59778 	int			status = 0;
59779 	int			i = 0, n = 0;
59780 	struct usb_interface	*intf;
59781+	int			bypass = 0;
59782 
59783 	if (udev->state == USB_STATE_NOTATTACHED ||
59784 			udev->state == USB_STATE_SUSPENDED)
59785 		goto done;
59786 
59787+	trace_android_vh_usb_dev_suspend(udev, msg, &bypass);
59788+	if (bypass)
59789+		goto done;
59790+
59791 	/* Suspend all the interfaces and then udev itself */
59792 	if (udev->actconfig) {
59793 		n = udev->actconfig->desc.bNumInterfaces;
59794@@ -1504,11 +1510,17 @@ static int usb_resume_both(struct usb_device *udev, pm_message_t msg)
59795 	int			status = 0;
59796 	int			i;
59797 	struct usb_interface	*intf;
59798+	int			bypass = 0;
59799 
59800 	if (udev->state == USB_STATE_NOTATTACHED) {
59801 		status = -ENODEV;
59802 		goto done;
59803 	}
59804+
59805+	trace_android_vh_usb_dev_resume(udev, msg, &bypass);
59806+	if (bypass)
59807+		goto done;
59808+
59809 	udev->can_submit = 1;
59810 
59811 	/* Resume the device */
59812diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
59813index ac347f9d5..ddd1d3eef 100644
59814--- a/drivers/usb/core/hcd.c
59815+++ b/drivers/usb/core/hcd.c
59816@@ -1692,6 +1692,7 @@ static void usb_giveback_urb_bh(struct tasklet_struct *t)
59817 
59818 	spin_lock_irq(&bh->lock);
59819 	bh->running = true;
59820+ restart:
59821 	list_replace_init(&bh->head, &local_list);
59822 	spin_unlock_irq(&bh->lock);
59823 
59824@@ -1705,17 +1706,10 @@ static void usb_giveback_urb_bh(struct tasklet_struct *t)
59825 		bh->completing_ep = NULL;
59826 	}
59827 
59828-	/*
59829-	 * giveback new URBs next time to prevent this function
59830-	 * from not exiting for a long time.
59831-	 */
59832+	/* check if there are new URBs to giveback */
59833 	spin_lock_irq(&bh->lock);
59834-	if (!list_empty(&bh->head)) {
59835-		if (bh->high_prio)
59836-			tasklet_hi_schedule(&bh->bh);
59837-		else
59838-			tasklet_schedule(&bh->bh);
59839-	}
59840+	if (!list_empty(&bh->head))
59841+		goto restart;
59842 	bh->running = false;
59843 	spin_unlock_irq(&bh->lock);
59844 }
59845@@ -1740,7 +1734,7 @@ static void usb_giveback_urb_bh(struct tasklet_struct *t)
59846 void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status)
59847 {
59848 	struct giveback_urb_bh *bh;
59849-	bool running;
59850+	bool running, high_prio_bh;
59851 
59852 	/* pass status to tasklet via unlinked */
59853 	if (likely(!urb->unlinked))
59854@@ -1751,10 +1745,13 @@ void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status)
59855 		return;
59856 	}
59857 
59858-	if (usb_pipeisoc(urb->pipe) || usb_pipeint(urb->pipe))
59859+	if (usb_pipeisoc(urb->pipe) || usb_pipeint(urb->pipe)) {
59860 		bh = &hcd->high_prio_bh;
59861-	else
59862+		high_prio_bh = true;
59863+	} else {
59864 		bh = &hcd->low_prio_bh;
59865+		high_prio_bh = false;
59866+	}
59867 
59868 	spin_lock(&bh->lock);
59869 	list_add_tail(&urb->urb_list, &bh->head);
59870@@ -1763,7 +1760,7 @@ void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status)
59871 
59872 	if (running)
59873 		;
59874-	else if (bh->high_prio)
59875+	else if (high_prio_bh)
59876 		tasklet_hi_schedule(&bh->bh);
59877 	else
59878 		tasklet_schedule(&bh->bh);
59879@@ -2664,7 +2661,6 @@ int usb_add_hcd(struct usb_hcd *hcd,
59880 {
59881 	int retval;
59882 	struct usb_device *rhdev;
59883-	struct usb_hcd *shared_hcd;
59884 
59885 	if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) {
59886 		hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
59887@@ -2803,7 +2799,6 @@ int usb_add_hcd(struct usb_hcd *hcd,
59888 
59889 	/* initialize tasklets */
59890 	init_giveback_urb_bh(&hcd->high_prio_bh);
59891-	hcd->high_prio_bh.high_prio = true;
59892 	init_giveback_urb_bh(&hcd->low_prio_bh);
59893 
59894 	/* enable irqs just before we start the controller,
59895@@ -2822,26 +2817,13 @@ int usb_add_hcd(struct usb_hcd *hcd,
59896 		goto err_hcd_driver_start;
59897 	}
59898 
59899-	/* starting here, usbcore will pay attention to the shared HCD roothub */
59900-	shared_hcd = hcd->shared_hcd;
59901-	if (!usb_hcd_is_primary_hcd(hcd) && shared_hcd && HCD_DEFER_RH_REGISTER(shared_hcd)) {
59902-		retval = register_root_hub(shared_hcd);
59903-		if (retval != 0)
59904-			goto err_register_root_hub;
59905-
59906-		if (shared_hcd->uses_new_polling && HCD_POLL_RH(shared_hcd))
59907-			usb_hcd_poll_rh_status(shared_hcd);
59908-	}
59909-
59910 	/* starting here, usbcore will pay attention to this root hub */
59911-	if (!HCD_DEFER_RH_REGISTER(hcd)) {
59912-		retval = register_root_hub(hcd);
59913-		if (retval != 0)
59914-			goto err_register_root_hub;
59915+	retval = register_root_hub(hcd);
59916+	if (retval != 0)
59917+		goto err_register_root_hub;
59918 
59919-		if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
59920-			usb_hcd_poll_rh_status(hcd);
59921-	}
59922+	if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
59923+		usb_hcd_poll_rh_status(hcd);
59924 
59925 	return retval;
59926 
59927@@ -2884,7 +2866,6 @@ EXPORT_SYMBOL_GPL(usb_add_hcd);
59928 void usb_remove_hcd(struct usb_hcd *hcd)
59929 {
59930 	struct usb_device *rhdev = hcd->self.root_hub;
59931-	bool rh_registered;
59932 
59933 	dev_info(hcd->self.controller, "remove, state %x\n", hcd->state);
59934 
59935@@ -2895,7 +2876,6 @@ void usb_remove_hcd(struct usb_hcd *hcd)
59936 
59937 	dev_dbg(hcd->self.controller, "roothub graceful disconnect\n");
59938 	spin_lock_irq (&hcd_root_hub_lock);
59939-	rh_registered = hcd->rh_registered;
59940 	hcd->rh_registered = 0;
59941 	spin_unlock_irq (&hcd_root_hub_lock);
59942 
59943@@ -2905,8 +2885,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
59944 	cancel_work_sync(&hcd->died_work);
59945 
59946 	mutex_lock(&usb_bus_idr_lock);
59947-	if (rh_registered)
59948-		usb_disconnect(&rhdev);		/* Sets rhdev to NULL */
59949+	usb_disconnect(&rhdev);		/* Sets rhdev to NULL */
59950 	mutex_unlock(&usb_bus_idr_lock);
59951 
59952 	/*
59953diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
59954index 7af2def63..28d472da8 100644
59955--- a/drivers/usb/core/hub.c
59956+++ b/drivers/usb/core/hub.c
59957@@ -1112,10 +1112,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
59958 		} else {
59959 			hub_power_on(hub, true);
59960 		}
59961-	/* Give some time on remote wakeup to let links to transit to U0 */
59962-	} else if (hub_is_superspeed(hub->hdev))
59963-		msleep(20);
59964-
59965+	}
59966  init2:
59967 
59968 	/*
59969@@ -1230,7 +1227,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
59970 			 */
59971 			if (portchange || (hub_is_superspeed(hub->hdev) &&
59972 						port_resumed))
59973-				set_bit(port1, hub->event_bits);
59974+				set_bit(port1, hub->change_bits);
59975 
59976 		} else if (udev->persist_enabled) {
59977 #ifdef CONFIG_PM
59978@@ -1832,7 +1829,8 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
59979 	 * bus_resume methods.
59980 	 */
59981 	if (hdev->parent) {		/* normal device */
59982-		usb_enable_autosuspend(hdev);
59983+		if (!(hdev->parent->quirks & USB_QUIRK_AUTO_SUSPEND))
59984+			usb_enable_autosuspend(hdev);
59985 	} else {			/* root hub */
59986 		const struct hc_driver *drv = bus_to_hcd(hdev->bus)->driver;
59987 
59988@@ -4633,6 +4631,8 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
59989 	if (oldspeed == USB_SPEED_LOW)
59990 		delay = HUB_LONG_RESET_TIME;
59991 
59992+	mutex_lock(hcd->address0_mutex);
59993+
59994 	/* Reset the device; full speed may morph to high speed */
59995 	/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
59996 	retval = hub_port_reset(hub, port1, udev, delay, false);
59997@@ -4943,6 +4943,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
59998 		hub_port_disable(hub, port1, 0);
59999 		update_devnum(udev, devnum);	/* for disconnect processing */
60000 	}
60001+	mutex_unlock(hcd->address0_mutex);
60002 	return retval;
60003 }
60004 
60005@@ -5117,7 +5118,6 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
60006 	struct usb_port *port_dev = hub->ports[port1 - 1];
60007 	struct usb_device *udev = port_dev->child;
60008 	static int unreliable_port = -1;
60009-	bool retry_locked;
60010 
60011 	/* Disconnect any existing devices under this port */
60012 	if (udev) {
60013@@ -5173,11 +5173,8 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
60014 		unit_load = 100;
60015 
60016 	status = 0;
60017-
60018 	for (i = 0; i < PORT_INIT_TRIES; i++) {
60019-		usb_lock_port(port_dev);
60020-		mutex_lock(hcd->address0_mutex);
60021-		retry_locked = true;
60022+
60023 		/* reallocate for each attempt, since references
60024 		 * to the previous one can escape in various ways
60025 		 */
60026@@ -5185,8 +5182,6 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
60027 		if (!udev) {
60028 			dev_err(&port_dev->dev,
60029 					"couldn't allocate usb_device\n");
60030-			mutex_unlock(hcd->address0_mutex);
60031-			usb_unlock_port(port_dev);
60032 			goto done;
60033 		}
60034 
60035@@ -5208,14 +5203,12 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
60036 		}
60037 
60038 		/* reset (non-USB 3.0 devices) and get descriptor */
60039+		usb_lock_port(port_dev);
60040 		status = hub_port_init(hub, udev, port1, i);
60041+		usb_unlock_port(port_dev);
60042 		if (status < 0)
60043 			goto loop;
60044 
60045-		mutex_unlock(hcd->address0_mutex);
60046-		usb_unlock_port(port_dev);
60047-		retry_locked = false;
60048-
60049 		if (udev->quirks & USB_QUIRK_DELAY_INIT)
60050 			msleep(2000);
60051 
60052@@ -5308,10 +5301,6 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
60053 		usb_ep0_reinit(udev);
60054 		release_devnum(udev);
60055 		hub_free_dev(udev);
60056-		if (retry_locked) {
60057-			mutex_unlock(hcd->address0_mutex);
60058-			usb_unlock_port(port_dev);
60059-		}
60060 		usb_put_dev(udev);
60061 		if ((status == -ENOTCONN) || (status == -ENOTSUPP))
60062 			break;
60063@@ -5336,7 +5325,8 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
60064 done:
60065 	hub_port_disable(hub, port1, 1);
60066 	if (hcd->driver->relinquish_port && !hub->hdev->parent) {
60067-		if (status != -ENOTCONN && status != -ENODEV)
60068+		if ((status != -ENOTCONN && status != -ENODEV) ||
60069+		    (status == -ENOTCONN && of_machine_is_compatible("rockchip,rk3288")))
60070 			hcd->driver->relinquish_port(hcd, port1);
60071 	}
60072 }
60073@@ -5863,8 +5853,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
60074 	bos = udev->bos;
60075 	udev->bos = NULL;
60076 
60077-	mutex_lock(hcd->address0_mutex);
60078-
60079 	for (i = 0; i < PORT_INIT_TRIES; ++i) {
60080 
60081 		/* ep0 maxpacket size may change; let the HCD know about it.
60082@@ -5874,7 +5862,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
60083 		if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV)
60084 			break;
60085 	}
60086-	mutex_unlock(hcd->address0_mutex);
60087 
60088 	if (ret < 0)
60089 		goto re_enumerate;
60090diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
60091index 4ac1c22f1..32141da82 100644
60092--- a/drivers/usb/core/quirks.c
60093+++ b/drivers/usb/core/quirks.c
60094@@ -322,6 +322,10 @@ static const struct usb_device_id usb_quirk_list[] = {
60095 	/* Alcor Micro Corp. Hub */
60096 	{ USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
60097 
60098+	/* HD Camera Manufacturer */
60099+	{ USB_DEVICE(0x05a3, 0x9230), .driver_info = USB_QUIRK_AUTO_SUSPEND },
60100+	{ USB_DEVICE(0x05a3, 0x9320), .driver_info = USB_QUIRK_AUTO_SUSPEND },
60101+
60102 	/* appletouch */
60103 	{ USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
60104 
60105@@ -426,6 +430,10 @@ static const struct usb_device_id usb_quirk_list[] = {
60106 	/* Generic RTL8153 based ethernet adapters */
60107 	{ USB_DEVICE(0x0bda, 0x8153), .driver_info = USB_QUIRK_NO_LPM },
60108 
60109+	/* Sonix FaceBlack device */
60110+	{ USB_DEVICE(0x0c45, 0x64ab), .driver_info = USB_QUIRK_AUTO_SUSPEND },
60111+	{ USB_DEVICE(0x0c45, 0x64ac), .driver_info = USB_QUIRK_AUTO_SUSPEND },
60112+
60113 	/* SONiX USB DEVICE Touchpad */
60114 	{ USB_DEVICE(0x0c45, 0x7056), .driver_info =
60115 			USB_QUIRK_IGNORE_REMOTE_WAKEUP },
60116@@ -454,12 +462,6 @@ static const struct usb_device_id usb_quirk_list[] = {
60117 	{ USB_DEVICE(0x17ef, 0x1018), .driver_info = USB_QUIRK_RESET_RESUME },
60118 	{ USB_DEVICE(0x17ef, 0x1019), .driver_info = USB_QUIRK_RESET_RESUME },
60119 
60120-	/* Lenovo USB-C to Ethernet Adapter RTL8153-04 */
60121-	{ USB_DEVICE(0x17ef, 0x720c), .driver_info = USB_QUIRK_NO_LPM },
60122-
60123-	/* Lenovo Powered USB-C Travel Hub (4X90S92381, RTL8153 GigE) */
60124-	{ USB_DEVICE(0x17ef, 0x721e), .driver_info = USB_QUIRK_NO_LPM },
60125-
60126 	/* Lenovo ThinkCenter A630Z TI024Gen3 usb-audio */
60127 	{ USB_DEVICE(0x17ef, 0xa012), .driver_info =
60128 			USB_QUIRK_DISCONNECT_SUSPEND },
60129diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
60130index 03d16a082..12630e4d8 100644
60131--- a/drivers/usb/dwc2/core.h
60132+++ b/drivers/usb/dwc2/core.h
60133@@ -1061,6 +1061,7 @@ struct dwc2_hsotg {
60134 	unsigned int hcd_enabled:1;
60135 	unsigned int gadget_enabled:1;
60136 	unsigned int ll_hw_enabled:1;
60137+	unsigned int ll_phy_enabled:1;
60138 	unsigned int hibernated:1;
60139 	unsigned int reset_phy_on_wake:1;
60140 	unsigned int need_phy_for_wake:1;
60141@@ -1078,6 +1079,8 @@ struct dwc2_hsotg {
60142 	void *priv;
60143 	int     irq;
60144 	struct clk *clk;
60145+	struct clk_bulk_data *clks;
60146+	int num_clks;
60147 	struct reset_control *reset;
60148 	struct reset_control *reset_ecc;
60149 
60150@@ -1345,6 +1348,9 @@ extern const struct of_device_id dwc2_of_match_table[];
60151 int dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg);
60152 int dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg);
60153 
60154+int dwc2_lowlevel_phy_enable(struct dwc2_hsotg *hsotg);
60155+int dwc2_lowlevel_phy_disable(struct dwc2_hsotg *hsotg);
60156+
60157 /* Common polling functions */
60158 int dwc2_hsotg_wait_bit_set(struct dwc2_hsotg *hs_otg, u32 reg, u32 bit,
60159 			    u32 timeout);
60160diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
60161index da0df69cc..99e04bf63 100644
60162--- a/drivers/usb/dwc2/gadget.c
60163+++ b/drivers/usb/dwc2/gadget.c
60164@@ -23,6 +23,7 @@
60165 #include <linux/io.h>
60166 #include <linux/slab.h>
60167 #include <linux/of_platform.h>
60168+#include <linux/platform_data/s3c-hsotg.h>
60169 
60170 #include <linux/usb/ch9.h>
60171 #include <linux/usb/gadget.h>
60172@@ -106,6 +107,23 @@ static inline bool using_desc_dma(struct dwc2_hsotg *hsotg)
60173 	return hsotg->params.g_dma_desc;
60174 }
60175 
60176+/**
60177+ * dwc2_hsotg_read_frameno - read current frame number
60178+ * @hsotg: The device instance
60179+ *
60180+ * Return the current frame number
60181+ */
60182+static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
60183+{
60184+	u32 dsts;
60185+
60186+	dsts = dwc2_readl(hsotg, DSTS);
60187+	dsts &= DSTS_SOFFN_MASK;
60188+	dsts >>= DSTS_SOFFN_SHIFT;
60189+
60190+	return dsts;
60191+}
60192+
60193 /**
60194  * dwc2_gadget_incr_frame_num - Increments the targeted frame number.
60195  * @hs_ep: The endpoint
60196@@ -116,16 +134,13 @@ static inline bool using_desc_dma(struct dwc2_hsotg *hsotg)
60197 static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
60198 {
60199 	struct dwc2_hsotg *hsotg = hs_ep->parent;
60200-	u16 limit = DSTS_SOFFN_LIMIT;
60201-
60202-	if (hsotg->gadget.speed != USB_SPEED_HIGH)
60203-		limit >>= 3;
60204+	u32 current_frame = dwc2_hsotg_read_frameno(hsotg);
60205 
60206 	hs_ep->target_frame += hs_ep->interval;
60207-	if (hs_ep->target_frame > limit) {
60208+	if (hs_ep->target_frame > DSTS_SOFFN_LIMIT) {
60209 		hs_ep->frame_overrun = true;
60210-		hs_ep->target_frame &= limit;
60211-	} else {
60212+		hs_ep->target_frame &= DSTS_SOFFN_LIMIT;
60213+	} else if (current_frame <= hs_ep->target_frame) {
60214 		hs_ep->frame_overrun = false;
60215 	}
60216 }
60217@@ -142,16 +157,10 @@ static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
60218  */
60219 static inline void dwc2_gadget_dec_frame_num_by_one(struct dwc2_hsotg_ep *hs_ep)
60220 {
60221-	struct dwc2_hsotg *hsotg = hs_ep->parent;
60222-	u16 limit = DSTS_SOFFN_LIMIT;
60223-
60224-	if (hsotg->gadget.speed != USB_SPEED_HIGH)
60225-		limit >>= 3;
60226-
60227 	if (hs_ep->target_frame)
60228 		hs_ep->target_frame -= 1;
60229 	else
60230-		hs_ep->target_frame = limit;
60231+		hs_ep->target_frame = DSTS_SOFFN_LIMIT;
60232 }
60233 
60234 /**
60235@@ -697,23 +706,6 @@ static unsigned int get_ep_limit(struct dwc2_hsotg_ep *hs_ep)
60236 	return maxsize;
60237 }
60238 
60239-/**
60240- * dwc2_hsotg_read_frameno - read current frame number
60241- * @hsotg: The device instance
60242- *
60243- * Return the current frame number
60244- */
60245-static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
60246-{
60247-	u32 dsts;
60248-
60249-	dsts = dwc2_readl(hsotg, DSTS);
60250-	dsts &= DSTS_SOFFN_MASK;
60251-	dsts >>= DSTS_SOFFN_SHIFT;
60252-
60253-	return dsts;
60254-}
60255-
60256 /**
60257  * dwc2_gadget_get_chain_limit - get the maximum data payload value of the
60258  * DMA descriptor chain prepared for specific endpoint
60259@@ -1030,12 +1022,6 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
60260 	dwc2_writel(hsotg, ctrl, depctl);
60261 }
60262 
60263-static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep);
60264-static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
60265-					struct dwc2_hsotg_ep *hs_ep,
60266-				       struct dwc2_hsotg_req *hs_req,
60267-				       int result);
60268-
60269 /**
60270  * dwc2_hsotg_start_req - start a USB request from an endpoint's queue
60271  * @hsotg: The controller state.
60272@@ -1188,21 +1174,14 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
60273 		}
60274 	}
60275 
60276-	if (hs_ep->isochronous) {
60277-		if (!dwc2_gadget_target_frame_elapsed(hs_ep)) {
60278-			if (hs_ep->interval == 1) {
60279-				if (hs_ep->target_frame & 0x1)
60280-					ctrl |= DXEPCTL_SETODDFR;
60281-				else
60282-					ctrl |= DXEPCTL_SETEVENFR;
60283-			}
60284-			ctrl |= DXEPCTL_CNAK;
60285-		} else {
60286-			hs_req->req.frame_number = hs_ep->target_frame;
60287-			hs_req->req.actual = 0;
60288-			dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
60289-			return;
60290-		}
60291+	if (hs_ep->isochronous && hs_ep->interval == 1) {
60292+		hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg);
60293+		dwc2_gadget_incr_frame_num(hs_ep);
60294+
60295+		if (hs_ep->target_frame & 0x1)
60296+			ctrl |= DXEPCTL_SETODDFR;
60297+		else
60298+			ctrl |= DXEPCTL_SETEVENFR;
60299 	}
60300 
60301 	ctrl |= DXEPCTL_EPENA;	/* ensure ep enabled */
60302@@ -1350,16 +1329,12 @@ static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep)
60303 	u32 target_frame = hs_ep->target_frame;
60304 	u32 current_frame = hsotg->frame_number;
60305 	bool frame_overrun = hs_ep->frame_overrun;
60306-	u16 limit = DSTS_SOFFN_LIMIT;
60307-
60308-	if (hsotg->gadget.speed != USB_SPEED_HIGH)
60309-		limit >>= 3;
60310 
60311 	if (!frame_overrun && current_frame >= target_frame)
60312 		return true;
60313 
60314 	if (frame_overrun && current_frame >= target_frame &&
60315-	    ((current_frame - target_frame) < limit / 2))
60316+	    ((current_frame - target_frame) < DSTS_SOFFN_LIMIT / 2))
60317 		return true;
60318 
60319 	return false;
60320@@ -1742,9 +1717,11 @@ static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep)
60321  */
60322 static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
60323 {
60324+	u32 mask;
60325 	struct dwc2_hsotg *hsotg = hs_ep->parent;
60326 	int dir_in = hs_ep->dir_in;
60327 	struct dwc2_hsotg_req *hs_req;
60328+	u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK;
60329 
60330 	if (!list_empty(&hs_ep->queue)) {
60331 		hs_req = get_ep_head(hs_ep);
60332@@ -1760,6 +1737,9 @@ static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
60333 	} else {
60334 		dev_dbg(hsotg->dev, "%s: No more ISOC-OUT requests\n",
60335 			__func__);
60336+		mask = dwc2_readl(hsotg, epmsk_reg);
60337+		mask |= DOEPMSK_OUTTKNEPDISMSK;
60338+		dwc2_writel(hsotg, mask, epmsk_reg);
60339 	}
60340 }
60341 
60342@@ -2329,6 +2309,19 @@ static void dwc2_hsotg_ep0_zlp(struct dwc2_hsotg *hsotg, bool dir_in)
60343 	dwc2_hsotg_program_zlp(hsotg, hsotg->eps_out[0]);
60344 }
60345 
60346+static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg,
60347+					    u32 epctl_reg)
60348+{
60349+	u32 ctrl;
60350+
60351+	ctrl = dwc2_readl(hsotg, epctl_reg);
60352+	if (ctrl & DXEPCTL_EOFRNUM)
60353+		ctrl |= DXEPCTL_SETEVENFR;
60354+	else
60355+		ctrl |= DXEPCTL_SETODDFR;
60356+	dwc2_writel(hsotg, ctrl, epctl_reg);
60357+}
60358+
60359 /*
60360  * dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc
60361  * @hs_ep - The endpoint on which transfer went
60362@@ -2449,12 +2442,21 @@ static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
60363 			dwc2_hsotg_ep0_zlp(hsotg, true);
60364 	}
60365 
60366-	/* Set actual frame number for completed transfers */
60367-	if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
60368-		req->frame_number = hs_ep->target_frame;
60369-		dwc2_gadget_incr_frame_num(hs_ep);
60370+	/*
60371+	 * Slave mode OUT transfers do not go through XferComplete so
60372+	 * adjust the ISOC parity here.
60373+	 */
60374+	if (!using_dma(hsotg)) {
60375+		if (hs_ep->isochronous && hs_ep->interval == 1)
60376+			dwc2_hsotg_change_ep_iso_parity(hsotg, DOEPCTL(epnum));
60377+		else if (hs_ep->isochronous && hs_ep->interval > 1)
60378+			dwc2_gadget_incr_frame_num(hs_ep);
60379 	}
60380 
60381+	/* Set actual frame number for completed transfers */
60382+	if (!using_desc_dma(hsotg) && hs_ep->isochronous)
60383+		req->frame_number = hsotg->frame_number;
60384+
60385 	dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
60386 }
60387 
60388@@ -2767,12 +2769,6 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
60389 		return;
60390 	}
60391 
60392-	/* Set actual frame number for completed transfers */
60393-	if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
60394-		hs_req->req.frame_number = hs_ep->target_frame;
60395-		dwc2_gadget_incr_frame_num(hs_ep);
60396-	}
60397-
60398 	dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
60399 }
60400 
60401@@ -2833,18 +2829,23 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
60402 
60403 		dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
60404 
60405+		if (hs_ep->isochronous) {
60406+			dwc2_hsotg_complete_in(hsotg, hs_ep);
60407+			return;
60408+		}
60409+
60410 		if ((epctl & DXEPCTL_STALL) && (epctl & DXEPCTL_EPTYPE_BULK)) {
60411 			int dctl = dwc2_readl(hsotg, DCTL);
60412 
60413 			dctl |= DCTL_CGNPINNAK;
60414 			dwc2_writel(hsotg, dctl, DCTL);
60415 		}
60416-	} else {
60417+		return;
60418+	}
60419 
60420-		if (dctl & DCTL_GOUTNAKSTS) {
60421-			dctl |= DCTL_CGOUTNAK;
60422-			dwc2_writel(hsotg, dctl, DCTL);
60423-		}
60424+	if (dctl & DCTL_GOUTNAKSTS) {
60425+		dctl |= DCTL_CGOUTNAK;
60426+		dwc2_writel(hsotg, dctl, DCTL);
60427 	}
60428 
60429 	if (!hs_ep->isochronous)
60430@@ -2858,16 +2859,15 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
60431 
60432 	do {
60433 		hs_req = get_ep_head(hs_ep);
60434-		if (hs_req) {
60435-			hs_req->req.frame_number = hs_ep->target_frame;
60436-			hs_req->req.actual = 0;
60437+		if (hs_req)
60438 			dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req,
60439 						    -ENODATA);
60440-		}
60441 		dwc2_gadget_incr_frame_num(hs_ep);
60442 		/* Update current frame number value. */
60443 		hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
60444 	} while (dwc2_gadget_target_frame_elapsed(hs_ep));
60445+
60446+	dwc2_gadget_start_next_request(hs_ep);
60447 }
60448 
60449 /**
60450@@ -2884,8 +2884,8 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
60451 static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
60452 {
60453 	struct dwc2_hsotg *hsotg = ep->parent;
60454-	struct dwc2_hsotg_req *hs_req;
60455 	int dir_in = ep->dir_in;
60456+	u32 doepmsk;
60457 
60458 	if (dir_in || !ep->isochronous)
60459 		return;
60460@@ -2899,42 +2899,28 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
60461 		return;
60462 	}
60463 
60464-	if (ep->target_frame == TARGET_FRAME_INITIAL) {
60465+	if (ep->interval > 1 &&
60466+	    ep->target_frame == TARGET_FRAME_INITIAL) {
60467 		u32 ctrl;
60468 
60469 		ep->target_frame = hsotg->frame_number;
60470-		if (ep->interval > 1) {
60471-			ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index));
60472-			if (ep->target_frame & 0x1)
60473-				ctrl |= DXEPCTL_SETODDFR;
60474-			else
60475-				ctrl |= DXEPCTL_SETEVENFR;
60476-
60477-			dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index));
60478-		}
60479-	}
60480+		dwc2_gadget_incr_frame_num(ep);
60481 
60482-	while (dwc2_gadget_target_frame_elapsed(ep)) {
60483-		hs_req = get_ep_head(ep);
60484-		if (hs_req) {
60485-			hs_req->req.frame_number = ep->target_frame;
60486-			hs_req->req.actual = 0;
60487-			dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA);
60488-		}
60489+		ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index));
60490+		if (ep->target_frame & 0x1)
60491+			ctrl |= DXEPCTL_SETODDFR;
60492+		else
60493+			ctrl |= DXEPCTL_SETEVENFR;
60494 
60495-		dwc2_gadget_incr_frame_num(ep);
60496-		/* Update current frame number value. */
60497-		hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
60498+		dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index));
60499 	}
60500 
60501-	if (!ep->req)
60502-		dwc2_gadget_start_next_request(ep);
60503-
60504+	dwc2_gadget_start_next_request(ep);
60505+	doepmsk = dwc2_readl(hsotg, DOEPMSK);
60506+	doepmsk &= ~DOEPMSK_OUTTKNEPDISMSK;
60507+	dwc2_writel(hsotg, doepmsk, DOEPMSK);
60508 }
60509 
60510-static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
60511-				   struct dwc2_hsotg_ep *hs_ep);
60512-
60513 /**
60514  * dwc2_gadget_handle_nak - handle NAK interrupt
60515  * @hs_ep: The endpoint on which interrupt is asserted.
60516@@ -2952,9 +2938,7 @@ static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
60517 static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
60518 {
60519 	struct dwc2_hsotg *hsotg = hs_ep->parent;
60520-	struct dwc2_hsotg_req *hs_req;
60521 	int dir_in = hs_ep->dir_in;
60522-	u32 ctrl;
60523 
60524 	if (!dir_in || !hs_ep->isochronous)
60525 		return;
60526@@ -2996,32 +2980,13 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
60527 
60528 			dwc2_writel(hsotg, ctrl, DIEPCTL(hs_ep->index));
60529 		}
60530-	}
60531 
60532-	if (using_desc_dma(hsotg))
60533-		return;
60534-
60535-	ctrl = dwc2_readl(hsotg, DIEPCTL(hs_ep->index));
60536-	if (ctrl & DXEPCTL_EPENA)
60537-		dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
60538-	else
60539-		dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
60540-
60541-	while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
60542-		hs_req = get_ep_head(hs_ep);
60543-		if (hs_req) {
60544-			hs_req->req.frame_number = hs_ep->target_frame;
60545-			hs_req->req.actual = 0;
60546-			dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
60547-		}
60548-
60549-		dwc2_gadget_incr_frame_num(hs_ep);
60550-		/* Update current frame number value. */
60551-		hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
60552+		dwc2_hsotg_complete_request(hsotg, hs_ep,
60553+					    get_ep_head(hs_ep), 0);
60554 	}
60555 
60556-	if (!hs_ep->req)
60557-		dwc2_gadget_start_next_request(hs_ep);
60558+	if (!using_desc_dma(hsotg))
60559+		dwc2_gadget_incr_frame_num(hs_ep);
60560 }
60561 
60562 /**
60563@@ -3077,15 +3042,21 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
60564 
60565 		/* In DDMA handle isochronous requests separately */
60566 		if (using_desc_dma(hsotg) && hs_ep->isochronous) {
60567-			dwc2_gadget_complete_isoc_request_ddma(hs_ep);
60568+			/* XferCompl set along with BNA */
60569+			if (!(ints & DXEPINT_BNAINTR))
60570+				dwc2_gadget_complete_isoc_request_ddma(hs_ep);
60571 		} else if (dir_in) {
60572 			/*
60573 			 * We get OutDone from the FIFO, so we only
60574 			 * need to look at completing IN requests here
60575 			 * if operating slave mode
60576 			 */
60577-			if (!hs_ep->isochronous || !(ints & DXEPINT_NAKINTRPT))
60578-				dwc2_hsotg_complete_in(hsotg, hs_ep);
60579+			if (hs_ep->isochronous && hs_ep->interval > 1)
60580+				dwc2_gadget_incr_frame_num(hs_ep);
60581+
60582+			dwc2_hsotg_complete_in(hsotg, hs_ep);
60583+			if (ints & DXEPINT_NAKINTRPT)
60584+				ints &= ~DXEPINT_NAKINTRPT;
60585 
60586 			if (idx == 0 && !hs_ep->req)
60587 				dwc2_hsotg_enqueue_setup(hsotg);
60588@@ -3094,8 +3065,10 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
60589 			 * We're using DMA, we need to fire an OutDone here
60590 			 * as we ignore the RXFIFO.
60591 			 */
60592-			if (!hs_ep->isochronous || !(ints & DXEPINT_OUTTKNEPDIS))
60593-				dwc2_hsotg_handle_outdone(hsotg, idx);
60594+			if (hs_ep->isochronous && hs_ep->interval > 1)
60595+				dwc2_gadget_incr_frame_num(hs_ep);
60596+
60597+			dwc2_hsotg_handle_outdone(hsotg, idx);
60598 		}
60599 	}
60600 
60601@@ -4115,7 +4088,6 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
60602 			mask |= DIEPMSK_NAKMSK;
60603 			dwc2_writel(hsotg, mask, DIEPMSK);
60604 		} else {
60605-			epctrl |= DXEPCTL_SNAK;
60606 			mask = dwc2_readl(hsotg, DOEPMSK);
60607 			mask |= DOEPMSK_OUTTKNEPDISMSK;
60608 			dwc2_writel(hsotg, mask, DOEPMSK);
60609@@ -4529,6 +4501,14 @@ static int dwc2_hsotg_udc_start(struct usb_gadget *gadget,
60610 			goto err;
60611 	}
60612 
60613+	if (hsotg->dr_mode == USB_DR_MODE_OTG && dwc2_is_device_mode(hsotg)) {
60614+		if (!hsotg->ll_phy_enabled) {
60615+			ret = dwc2_lowlevel_phy_enable(hsotg);
60616+			if (ret)
60617+				goto err;
60618+		}
60619+	}
60620+
60621 	if (!IS_ERR_OR_NULL(hsotg->uphy))
60622 		otg_set_peripheral(hsotg->uphy->otg, &hsotg->gadget);
60623 
60624@@ -4588,6 +4568,11 @@ static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
60625 	if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
60626 		dwc2_lowlevel_hw_disable(hsotg);
60627 
60628+	if (hsotg->dr_mode == USB_DR_MODE_OTG && dwc2_is_device_mode(hsotg)) {
60629+		if (hsotg->ll_phy_enabled)
60630+			dwc2_lowlevel_phy_disable(hsotg);
60631+	}
60632+
60633 	return 0;
60634 }
60635 
60636@@ -5032,7 +5017,7 @@ int dwc2_hsotg_suspend(struct dwc2_hsotg *hsotg)
60637 		hsotg->gadget.speed = USB_SPEED_UNKNOWN;
60638 		spin_unlock_irqrestore(&hsotg->lock, flags);
60639 
60640-		for (ep = 1; ep < hsotg->num_of_eps; ep++) {
60641+		for (ep = 0; ep < hsotg->num_of_eps; ep++) {
60642 			if (hsotg->eps_in[ep])
60643 				dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
60644 			if (hsotg->eps_out[ep])
60645diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
60646index 9279d3d36..e6fadfd68 100644
60647--- a/drivers/usb/dwc2/hcd.c
60648+++ b/drivers/usb/dwc2/hcd.c
60649@@ -812,11 +812,13 @@ void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
60650 	 * uframe/frame (in the worst case), the core generates a channel
60651 	 * halted and disables the channel automatically.
60652 	 */
60653-	if ((hsotg->params.g_dma && !hsotg->params.g_dma_desc) ||
60654+	if ((hsotg->params.host_dma && !hsotg->params.dma_desc_enable) ||
60655 	    hsotg->hw_params.arch == GHWCFG2_EXT_DMA_ARCH) {
60656 		if (!chan->do_split &&
60657 		    (chan->ep_type == USB_ENDPOINT_XFER_ISOC ||
60658-		     chan->ep_type == USB_ENDPOINT_XFER_INT)) {
60659+		     chan->ep_type == USB_ENDPOINT_XFER_INT) &&
60660+		    (halt_status == DWC2_HC_XFER_URB_DEQUEUE)) {
60661+			chan->halt_status = halt_status;
60662 			dev_err(hsotg->dev, "%s() Channel can't be halted\n",
60663 				__func__);
60664 			return;
60665@@ -1827,11 +1829,13 @@ void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force)
60666 	 * Without the extra check here we will end calling disconnect
60667 	 * and won't get any future interrupts to handle the connect.
60668 	 */
60669-	if (!force) {
60670-		hprt0 = dwc2_readl(hsotg, HPRT0);
60671-		if (!(hprt0 & HPRT0_CONNDET) && (hprt0 & HPRT0_CONNSTS))
60672-			dwc2_hcd_connect(hsotg);
60673-	}
60674+	hprt0 = dwc2_readl(hsotg, HPRT0);
60675+
60676+	if (!force && !(hprt0 & HPRT0_CONNDET) &&
60677+	    (hprt0 & HPRT0_CONNSTS))
60678+		dwc2_hcd_connect(hsotg);
60679+	else if (hsotg->lx_state != DWC2_L0)
60680+		usb_hcd_resume_root_hub(hsotg->priv);
60681 }
60682 
60683 /**
60684@@ -2441,10 +2445,13 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
60685 	}
60686 }
60687 
60688-static int dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg *hsotg,
60689-					    struct dwc2_qh *qh,
60690-					    struct dwc2_host_chan *chan)
60691+static int dwc2_alloc_qh_dma_aligned_buf(struct dwc2_hsotg *hsotg,
60692+					 struct dwc2_qh *qh,
60693+					 struct dwc2_qtd *qtd,
60694+					 struct dwc2_host_chan *chan)
60695 {
60696+	u32 offset;
60697+
60698 	if (!hsotg->unaligned_cache ||
60699 	    chan->max_packet > DWC2_KMEM_UNALIGNED_BUF_SIZE)
60700 		return -ENOMEM;
60701@@ -2456,6 +2463,18 @@ static int dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg *hsotg,
60702 			return -ENOMEM;
60703 	}
60704 
60705+	if (!chan->ep_is_in) {
60706+		if (qh->do_split) {
60707+			offset = chan->xfer_dma - qtd->urb->dma;
60708+			memcpy(qh->dw_align_buf, (u8 *)qtd->urb->buf + offset,
60709+			       (chan->xfer_len > 188 ? 188 : chan->xfer_len));
60710+		} else {
60711+			offset = chan->xfer_dma - qtd->urb->dma;
60712+			memcpy(qh->dw_align_buf, (u8 *)qtd->urb->buf + offset,
60713+			       chan->xfer_len);
60714+		}
60715+	}
60716+
60717 	qh->dw_align_buf_dma = dma_map_single(hsotg->dev, qh->dw_align_buf,
60718 					      DWC2_KMEM_UNALIGNED_BUF_SIZE,
60719 					      DMA_FROM_DEVICE);
60720@@ -2660,10 +2679,10 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
60721 	dwc2_hc_init_xfer(hsotg, chan, qtd);
60722 
60723 	/* For non-dword aligned buffers */
60724-	if (hsotg->params.host_dma && qh->do_split &&
60725-	    chan->ep_is_in && (chan->xfer_dma & 0x3)) {
60726+	if (hsotg->params.host_dma && (chan->xfer_dma & 0x3) &&
60727+	    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
60728 		dev_vdbg(hsotg->dev, "Non-aligned buffer\n");
60729-		if (dwc2_alloc_split_dma_aligned_buf(hsotg, qh, chan)) {
60730+		if (dwc2_alloc_qh_dma_aligned_buf(hsotg, qh, qtd, chan)) {
60731 			dev_err(hsotg->dev,
60732 				"Failed to allocate memory to handle non-aligned buffer\n");
60733 			/* Add channel back to free list */
60734@@ -2677,8 +2696,8 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
60735 		}
60736 	} else {
60737 		/*
60738-		 * We assume that DMA is always aligned in non-split
60739-		 * case or split out case. Warn if not.
60740+		 * We assume that DMA is always aligned in other case,
60741+		 * Warn if not.
60742 		 */
60743 		WARN_ON_ONCE(hsotg->params.host_dma &&
60744 			     (chan->xfer_dma & 0x3));
60745@@ -3173,6 +3192,9 @@ static void dwc2_conn_id_status_change(struct work_struct *work)
60746 
60747 	dev_dbg(hsotg->dev, "%s()\n", __func__);
60748 
60749+	if (!hsotg->ll_phy_enabled && dwc2_is_host_mode(hsotg))
60750+		dwc2_lowlevel_phy_enable(hsotg);
60751+
60752 	gotgctl = dwc2_readl(hsotg, GOTGCTL);
60753 	dev_dbg(hsotg->dev, "gotgctl=%0x\n", gotgctl);
60754 	dev_dbg(hsotg->dev, "gotgctl.b.conidsts=%d\n",
60755@@ -5074,10 +5096,6 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
60756 	hcd->has_tt = 1;
60757 
60758 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
60759-	if (!res) {
60760-		retval = -EINVAL;
60761-		goto error2;
60762-	}
60763 	hcd->rsrc_start = res->start;
60764 	hcd->rsrc_len = resource_size(res);
60765 
60766@@ -5228,6 +5246,13 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
60767 	if (!IS_ERR_OR_NULL(hsotg->uphy))
60768 		otg_set_host(hsotg->uphy->otg, &hcd->self);
60769 
60770+	/*
60771+	 * do not manage the PHY state in the HCD core, instead let the driver
60772+	 * handle this (for example if the PHY can only be turned on after a
60773+	 * specific event)
60774+	 */
60775+	hcd->skip_phy_initialization = 1;
60776+
60777 	/*
60778 	 * Finish generic HCD initialization and start the HCD. This function
60779 	 * allocates the DMA buffer pool, registers the USB bus, requests the
60780diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
60781index d5f4ec1b7..4989ec761 100644
60782--- a/drivers/usb/dwc2/hcd_intr.c
60783+++ b/drivers/usb/dwc2/hcd_intr.c
60784@@ -1180,7 +1180,10 @@ static void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg,
60785 
60786 	if (urb->actual_length + xfer_length > urb->length) {
60787 		dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
60788-		xfer_length = urb->length - urb->actual_length;
60789+		if (urb->length & 0x3)
60790+			xfer_length = 0;
60791+		else
60792+			xfer_length = urb->length - urb->actual_length;
60793 	}
60794 
60795 	urb->actual_length += xfer_length;
60796@@ -2065,8 +2068,6 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
60797 			 hcint, hcintmsk, hcint & hcintmsk);
60798 	}
60799 
60800-	dwc2_writel(hsotg, hcint, HCINT(chnum));
60801-
60802 	/*
60803 	 * If we got an interrupt after someone called
60804 	 * dwc2_hcd_endpoint_disable() we don't want to crash below
60805@@ -2079,6 +2080,8 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
60806 	chan->hcint = hcint;
60807 	hcint &= hcintmsk;
60808 
60809+	dwc2_writel(hsotg, hcint, HCINT(chnum));
60810+
60811 	/*
60812 	 * If the channel was halted due to a dequeue, the qtd list might
60813 	 * be empty or at least the first entry will not be the active qtd.
60814diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
60815index 94af71e98..9d2b92a37 100644
60816--- a/drivers/usb/dwc2/hcd_queue.c
60817+++ b/drivers/usb/dwc2/hcd_queue.c
60818@@ -59,7 +59,7 @@
60819 #define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
60820 
60821 /* If we get a NAK, wait this long before retrying */
60822-#define DWC2_RETRY_WAIT_DELAY (1 * NSEC_PER_MSEC)
60823+#define DWC2_RETRY_WAIT_DELAY 1*1E6L
60824 
60825 /**
60826  * dwc2_periodic_channel_available() - Checks that a channel is available for a
60827@@ -730,8 +730,14 @@ static int dwc2_uframe_schedule_split(struct dwc2_hsotg *hsotg,
60828 	 * Note that this will tend to front-load the high speed schedule.
60829 	 * We may eventually want to try to avoid this by either considering
60830 	 * both schedules together or doing some sort of round robin.
60831+	 *
60832+	 * For isoc split out, start schedule at the 2 * DWC2_SLICES_PER_UFRAME
60833+	 * to transfer SSPLIT-begin OUT transaction like EHCI controller.
60834 	 */
60835-	ls_search_slice = 0;
60836+	if (qh->ep_type == USB_ENDPOINT_XFER_ISOC && !qh->ep_is_in)
60837+		ls_search_slice = 2 * DWC2_SLICES_PER_UFRAME;
60838+	else
60839+		ls_search_slice = 0;
60840 
60841 	while (ls_search_slice < DWC2_LS_SCHEDULE_SLICES) {
60842 		int start_s_uframe;
60843diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
60844index 267543c3d..195cca1fe 100644
60845--- a/drivers/usb/dwc2/params.c
60846+++ b/drivers/usb/dwc2/params.c
60847@@ -90,6 +90,8 @@ static void dwc2_set_rk_params(struct dwc2_hsotg *hsotg)
60848 	p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
60849 		GAHBCFG_HBSTLEN_SHIFT;
60850 	p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
60851+	p->lpm = false;
60852+	p->g_dma_desc = false;
60853 }
60854 
60855 static void dwc2_set_ltq_params(struct dwc2_hsotg *hsotg)
60856@@ -177,7 +179,10 @@ static void dwc2_set_stm32mp15_fsotg_params(struct dwc2_hsotg *hsotg)
60857 	p->i2c_enable = false;
60858 	p->activate_stm_fs_transceiver = true;
60859 	p->activate_stm_id_vb_detection = true;
60860+	p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 << GAHBCFG_HBSTLEN_SHIFT;
60861 	p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
60862+	p->host_support_fs_ls_low_power = true;
60863+	p->host_ls_low_power_phy_clk = true;
60864 }
60865 
60866 static void dwc2_set_stm32mp15_hsotg_params(struct dwc2_hsotg *hsotg)
60867@@ -189,7 +194,12 @@ static void dwc2_set_stm32mp15_hsotg_params(struct dwc2_hsotg *hsotg)
60868 	p->host_rx_fifo_size = 440;
60869 	p->host_nperio_tx_fifo_size = 256;
60870 	p->host_perio_tx_fifo_size = 256;
60871+	p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 << GAHBCFG_HBSTLEN_SHIFT;
60872 	p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
60873+	p->lpm = false;
60874+	p->lpm_clock_gating = false;
60875+	p->besl = false;
60876+	p->hird_threshold_en = false;
60877 }
60878 
60879 const struct of_device_id dwc2_of_match_table[] = {
60880diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
60881index 8851db646..f658e3cdc 100644
60882--- a/drivers/usb/dwc2/platform.c
60883+++ b/drivers/usb/dwc2/platform.c
60884@@ -128,27 +128,11 @@ static void __dwc2_disable_regulators(void *data)
60885 	regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
60886 }
60887 
60888-static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
60889+static int __dwc2_lowlevel_phy_enable(struct dwc2_hsotg *hsotg)
60890 {
60891 	struct platform_device *pdev = to_platform_device(hsotg->dev);
60892 	int ret;
60893 
60894-	ret = regulator_bulk_enable(ARRAY_SIZE(hsotg->supplies),
60895-				    hsotg->supplies);
60896-	if (ret)
60897-		return ret;
60898-
60899-	ret = devm_add_action_or_reset(&pdev->dev,
60900-				       __dwc2_disable_regulators, hsotg);
60901-	if (ret)
60902-		return ret;
60903-
60904-	if (hsotg->clk) {
60905-		ret = clk_prepare_enable(hsotg->clk);
60906-		if (ret)
60907-			return ret;
60908-	}
60909-
60910 	if (hsotg->uphy) {
60911 		ret = usb_phy_init(hsotg->uphy);
60912 	} else if (hsotg->plat && hsotg->plat->phy_init) {
60913@@ -163,22 +147,22 @@ static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
60914 }
60915 
60916 /**
60917- * dwc2_lowlevel_hw_enable - enable platform lowlevel hw resources
60918+ * dwc2_lowlevel_phy_enable - enable lowlevel PHY resources
60919  * @hsotg: The driver state
60920  *
60921  * A wrapper for platform code responsible for controlling
60922- * low-level USB platform resources (phy, clock, regulators)
60923+ * low-level PHY resources.
60924  */
60925-int dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
60926+int dwc2_lowlevel_phy_enable(struct dwc2_hsotg *hsotg)
60927 {
60928-	int ret = __dwc2_lowlevel_hw_enable(hsotg);
60929+	int ret = __dwc2_lowlevel_phy_enable(hsotg);
60930 
60931 	if (ret == 0)
60932-		hsotg->ll_hw_enabled = true;
60933+		hsotg->ll_phy_enabled = true;
60934 	return ret;
60935 }
60936 
60937-static int __dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg)
60938+static int __dwc2_lowlevel_phy_disable(struct dwc2_hsotg *hsotg)
60939 {
60940 	struct platform_device *pdev = to_platform_device(hsotg->dev);
60941 	int ret = 0;
60942@@ -192,11 +176,78 @@ static int __dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg)
60943 		if (ret == 0)
60944 			ret = phy_exit(hsotg->phy);
60945 	}
60946+
60947+	return ret;
60948+}
60949+
60950+/**
60951+ * dwc2_lowlevel_phy_disable - disable lowlevel PHY resources
60952+ * @hsotg: The driver state
60953+ *
60954+ * A wrapper for platform code responsible for controlling
60955+ * low-level PHY platform resources.
60956+ */
60957+int dwc2_lowlevel_phy_disable(struct dwc2_hsotg *hsotg)
60958+{
60959+	int ret = __dwc2_lowlevel_phy_disable(hsotg);
60960+
60961+	if (ret == 0)
60962+		hsotg->ll_phy_enabled = false;
60963+	return ret;
60964+}
60965+
60966+static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
60967+{
60968+	struct platform_device *pdev = to_platform_device(hsotg->dev);
60969+	int ret;
60970+
60971+	ret = regulator_bulk_enable(ARRAY_SIZE(hsotg->supplies),
60972+				    hsotg->supplies);
60973+	if (ret)
60974+		return ret;
60975+
60976+	ret = devm_add_action_or_reset(&pdev->dev,
60977+				       __dwc2_disable_regulators, hsotg);
60978+	if (ret)
60979+		return ret;
60980+
60981+	ret = clk_bulk_prepare_enable(hsotg->num_clks, hsotg->clks);
60982 	if (ret)
60983 		return ret;
60984 
60985-	if (hsotg->clk)
60986-		clk_disable_unprepare(hsotg->clk);
60987+	if (!hsotg->ll_phy_enabled)
60988+		ret = dwc2_lowlevel_phy_enable(hsotg);
60989+
60990+	return ret;
60991+}
60992+
60993+/**
60994+ * dwc2_lowlevel_hw_enable - enable platform lowlevel hw resources
60995+ * @hsotg: The driver state
60996+ *
60997+ * A wrapper for platform code responsible for controlling
60998+ * low-level USB platform resources (phy, clock, regulators)
60999+ */
61000+int dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
61001+{
61002+	int ret = __dwc2_lowlevel_hw_enable(hsotg);
61003+
61004+	if (ret == 0)
61005+		hsotg->ll_hw_enabled = true;
61006+	return ret;
61007+}
61008+
61009+static int __dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg)
61010+{
61011+	int ret = 0;
61012+
61013+	if (hsotg->ll_phy_enabled)
61014+		ret = dwc2_lowlevel_phy_disable(hsotg);
61015+
61016+	if (ret)
61017+		return ret;
61018+
61019+	clk_bulk_disable_unprepare(hsotg->num_clks, hsotg->clks);
61020 
61021 	return 0;
61022 }
61023@@ -281,10 +332,18 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
61024 	hsotg->plat = dev_get_platdata(hsotg->dev);
61025 
61026 	/* Clock */
61027-	hsotg->clk = devm_clk_get_optional(hsotg->dev, "otg");
61028-	if (IS_ERR(hsotg->clk)) {
61029-		dev_err(hsotg->dev, "cannot get otg clock\n");
61030-		return PTR_ERR(hsotg->clk);
61031+	if (hsotg->dev->of_node) {
61032+		ret = devm_clk_bulk_get_all(hsotg->dev, &hsotg->clks);
61033+		if (ret == -EPROBE_DEFER)
61034+			return ret;
61035+		/*
61036+		 * Clocks are optional, but new DT platforms should support all
61037+		 * clocks as required by the DT-binding.
61038+		 */
61039+		if (ret < 0)
61040+			hsotg->num_clks = 0;
61041+		else
61042+			hsotg->num_clks = ret;
61043 	}
61044 
61045 	/* Regulators */
61046@@ -328,6 +387,9 @@ static int dwc2_driver_remove(struct platform_device *dev)
61047 	if (hsotg->params.activate_stm_id_vb_detection)
61048 		regulator_disable(hsotg->usb33d);
61049 
61050+	pm_runtime_put_sync(hsotg->dev);
61051+	pm_runtime_disable(hsotg->dev);
61052+
61053 	if (hsotg->ll_hw_enabled)
61054 		dwc2_lowlevel_hw_disable(hsotg);
61055 
61056@@ -479,6 +541,11 @@ static int dwc2_driver_probe(struct platform_device *dev)
61057 
61058 	hsotg->needs_byte_swap = dwc2_check_core_endianness(hsotg);
61059 
61060+	pm_runtime_enable(hsotg->dev);
61061+	retval = pm_runtime_get_sync(hsotg->dev);
61062+	if (retval < 0)
61063+		goto error;
61064+
61065 	retval = dwc2_get_dr_mode(hsotg);
61066 	if (retval)
61067 		goto error;
61068@@ -542,9 +609,6 @@ static int dwc2_driver_probe(struct platform_device *dev)
61069 		ggpio |= GGPIO_STM32_OTG_GCCFG_IDEN;
61070 		ggpio |= GGPIO_STM32_OTG_GCCFG_VBDEN;
61071 		dwc2_writel(hsotg, ggpio, GGPIO);
61072-
61073-		/* ID/VBUS detection startup time */
61074-		usleep_range(5000, 7000);
61075 	}
61076 
61077 	retval = dwc2_drd_init(hsotg);
61078@@ -597,6 +661,11 @@ static int dwc2_driver_probe(struct platform_device *dev)
61079 	if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
61080 		dwc2_lowlevel_hw_disable(hsotg);
61081 
61082+	if (hsotg->dr_mode == USB_DR_MODE_OTG && dwc2_is_device_mode(hsotg)) {
61083+		if (hsotg->ll_phy_enabled)
61084+			dwc2_lowlevel_phy_disable(hsotg);
61085+	}
61086+
61087 #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
61088 	IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
61089 	/* Postponed adding a new gadget to the udc class driver list */
61090@@ -625,6 +694,8 @@ static int dwc2_driver_probe(struct platform_device *dev)
61091 	if (hsotg->params.activate_stm_id_vb_detection)
61092 		regulator_disable(hsotg->usb33d);
61093 error:
61094+	pm_runtime_put_sync(hsotg->dev);
61095+	pm_runtime_disable(hsotg->dev);
61096 	if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL)
61097 		dwc2_lowlevel_hw_disable(hsotg);
61098 	return retval;
61099@@ -684,6 +755,7 @@ static int __maybe_unused dwc2_suspend(struct device *dev)
61100 static int __maybe_unused dwc2_resume(struct device *dev)
61101 {
61102 	struct dwc2_hsotg *dwc2 = dev_get_drvdata(dev);
61103+	unsigned long flags;
61104 	int ret = 0;
61105 
61106 	if (dwc2->phy_off_for_suspend && dwc2->ll_hw_enabled) {
61107@@ -723,6 +795,17 @@ static int __maybe_unused dwc2_resume(struct device *dev)
61108 
61109 	dwc2_drd_resume(dwc2);
61110 
61111+	/* Stop hcd if dr_mode is host and PD is power off when suspend */
61112+	if (dwc2->op_state == OTG_STATE_A_HOST && dwc2_is_device_mode(dwc2)) {
61113+		spin_lock_irqsave(&dwc2->lock, flags);
61114+		dwc2_hcd_disconnect(dwc2, true);
61115+		dwc2->op_state = OTG_STATE_B_PERIPHERAL;
61116+		dwc2->lx_state = DWC2_L3;
61117+		if (!dwc2->driver)
61118+			dwc2_hsotg_core_init_disconnected(dwc2, false);
61119+		spin_unlock_irqrestore(&dwc2->lock, flags);
61120+	}
61121+
61122 	if (dwc2_is_device_mode(dwc2))
61123 		ret = dwc2_hsotg_resume(dwc2);
61124 
61125diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
61126index d73f624ed..7078a4912 100644
61127--- a/drivers/usb/dwc3/core.c
61128+++ b/drivers/usb/dwc3/core.c
61129@@ -119,26 +119,35 @@ static void __dwc3_set_mode(struct work_struct *work)
61130 	struct dwc3 *dwc = work_to_dwc(work);
61131 	unsigned long flags;
61132 	int ret;
61133+	int retries = 1000;
61134 	u32 reg;
61135-	u32 desired_dr_role;
61136 
61137 	mutex_lock(&dwc->mutex);
61138-	spin_lock_irqsave(&dwc->lock, flags);
61139-	desired_dr_role = dwc->desired_dr_role;
61140-	spin_unlock_irqrestore(&dwc->lock, flags);
61141 
61142 	pm_runtime_get_sync(dwc->dev);
61143 
61144+#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
61145+	if (dwc->desired_role_sw_mode == USB_DR_MODE_PERIPHERAL &&
61146+	    dwc->desired_role_sw_mode != dwc->current_role_sw_mode)
61147+		pm_runtime_get(dwc->dev);
61148+	else if ((dwc->desired_role_sw_mode == USB_DR_MODE_UNKNOWN ||
61149+		  dwc->desired_role_sw_mode == USB_DR_MODE_HOST) &&
61150+		  dwc->current_role_sw_mode == USB_DR_MODE_PERIPHERAL)
61151+		pm_runtime_put(dwc->dev);
61152+
61153+	dwc->current_role_sw_mode = dwc->desired_role_sw_mode;
61154+#endif
61155+
61156 	if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG)
61157 		dwc3_otg_update(dwc, 0);
61158 
61159-	if (!desired_dr_role)
61160+	if (!dwc->desired_dr_role)
61161 		goto out;
61162 
61163-	if (desired_dr_role == dwc->current_dr_role)
61164+	if (dwc->desired_dr_role == dwc->current_dr_role)
61165 		goto out;
61166 
61167-	if (desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev)
61168+	if (dwc->desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev)
61169 		goto out;
61170 
61171 	switch (dwc->current_dr_role) {
61172@@ -160,13 +169,8 @@ static void __dwc3_set_mode(struct work_struct *work)
61173 		break;
61174 	}
61175 
61176-	/*
61177-	 * When current_dr_role is not set, there's no role switching.
61178-	 * Only perform GCTL.CoreSoftReset when there's DRD role switching.
61179-	 */
61180-	if (dwc->current_dr_role && ((DWC3_IP_IS(DWC3) ||
61181-			DWC3_VER_IS_PRIOR(DWC31, 190A)) &&
61182-			desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) {
61183+	/* For DRD host or device mode only */
61184+	if (dwc->desired_dr_role != DWC3_GCTL_PRTCAP_OTG) {
61185 		reg = dwc3_readl(dwc->regs, DWC3_GCTL);
61186 		reg |= DWC3_GCTL_CORESOFTRESET;
61187 		dwc3_writel(dwc->regs, DWC3_GCTL, reg);
61188@@ -186,11 +190,11 @@ static void __dwc3_set_mode(struct work_struct *work)
61189 
61190 	spin_lock_irqsave(&dwc->lock, flags);
61191 
61192-	dwc3_set_prtcap(dwc, desired_dr_role);
61193+	dwc3_set_prtcap(dwc, dwc->desired_dr_role);
61194 
61195 	spin_unlock_irqrestore(&dwc->lock, flags);
61196 
61197-	switch (desired_dr_role) {
61198+	switch (dwc->desired_dr_role) {
61199 	case DWC3_GCTL_PRTCAP_HOST:
61200 		ret = dwc3_host_init(dwc);
61201 		if (ret) {
61202@@ -208,7 +212,26 @@ static void __dwc3_set_mode(struct work_struct *work)
61203 		}
61204 		break;
61205 	case DWC3_GCTL_PRTCAP_DEVICE:
61206-		dwc3_core_soft_reset(dwc);
61207+		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
61208+		reg |= DWC3_DCTL_CSFTRST;
61209+		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
61210+
61211+		if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32))
61212+			retries = 10;
61213+
61214+		do {
61215+			reg = dwc3_readl(dwc->regs, DWC3_DCTL);
61216+			if (!(reg & DWC3_DCTL_CSFTRST))
61217+				goto done;
61218+
61219+			if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32))
61220+				msleep(20);
61221+			else
61222+				udelay(1);
61223+		} while (--retries);
61224+done:
61225+		if (DWC3_VER_IS_WITHIN(DWC31, ANY, 180A))
61226+			msleep(50);
61227 
61228 		dwc3_event_buffers_setup(dwc);
61229 
61230@@ -267,10 +290,23 @@ u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type)
61231  * dwc3_core_soft_reset - Issues core soft reset and PHY reset
61232  * @dwc: pointer to our context structure
61233  */
61234-int dwc3_core_soft_reset(struct dwc3 *dwc)
61235+static int dwc3_core_soft_reset(struct dwc3 *dwc)
61236 {
61237 	u32		reg;
61238 	int		retries = 1000;
61239+	int		ret;
61240+
61241+	usb_phy_init(dwc->usb2_phy);
61242+	usb_phy_init(dwc->usb3_phy);
61243+	ret = phy_init(dwc->usb2_generic_phy);
61244+	if (ret < 0)
61245+		return ret;
61246+
61247+	ret = phy_init(dwc->usb3_generic_phy);
61248+	if (ret < 0) {
61249+		phy_exit(dwc->usb2_generic_phy);
61250+		return ret;
61251+	}
61252 
61253 	/*
61254 	 * We're resetting only the device side because, if we're in host mode,
61255@@ -282,8 +318,7 @@ int dwc3_core_soft_reset(struct dwc3 *dwc)
61256 
61257 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
61258 	reg |= DWC3_DCTL_CSFTRST;
61259-	reg &= ~DWC3_DCTL_RUN_STOP;
61260-	dwc3_gadget_dctl_write_safe(dwc, reg);
61261+	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
61262 
61263 	/*
61264 	 * For DWC_usb31 controller 1.90a and later, the DCTL.CSFRST bit
61265@@ -305,6 +340,9 @@ int dwc3_core_soft_reset(struct dwc3 *dwc)
61266 			udelay(1);
61267 	} while (--retries);
61268 
61269+	phy_exit(dwc->usb3_generic_phy);
61270+	phy_exit(dwc->usb2_generic_phy);
61271+
61272 	return -ETIMEDOUT;
61273 
61274 done:
61275@@ -562,6 +600,9 @@ static void dwc3_cache_hwparams(struct dwc3 *dwc)
61276 	parms->hwparams6 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS6);
61277 	parms->hwparams7 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS7);
61278 	parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8);
61279+
61280+	if (DWC3_IP_IS(DWC32))
61281+		parms->hwparams9 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS9);
61282 }
61283 
61284 static int dwc3_core_ulpi_init(struct dwc3 *dwc)
61285@@ -730,16 +771,15 @@ static void dwc3_core_exit(struct dwc3 *dwc)
61286 {
61287 	dwc3_event_buffers_cleanup(dwc);
61288 
61289-	usb_phy_set_suspend(dwc->usb2_phy, 1);
61290-	usb_phy_set_suspend(dwc->usb3_phy, 1);
61291-	phy_power_off(dwc->usb2_generic_phy);
61292-	phy_power_off(dwc->usb3_generic_phy);
61293-
61294 	usb_phy_shutdown(dwc->usb2_phy);
61295 	usb_phy_shutdown(dwc->usb3_phy);
61296 	phy_exit(dwc->usb2_generic_phy);
61297 	phy_exit(dwc->usb3_generic_phy);
61298 
61299+	usb_phy_set_suspend(dwc->usb2_phy, 1);
61300+	usb_phy_set_suspend(dwc->usb3_phy, 1);
61301+	phy_power_off(dwc->usb2_generic_phy);
61302+	phy_power_off(dwc->usb3_generic_phy);
61303 	clk_bulk_disable_unprepare(dwc->num_clks, dwc->clks);
61304 	reset_control_assert(dwc->reset);
61305 }
61306@@ -960,13 +1000,8 @@ static int dwc3_core_init(struct dwc3 *dwc)
61307 
61308 	if (!dwc->ulpi_ready) {
61309 		ret = dwc3_core_ulpi_init(dwc);
61310-		if (ret) {
61311-			if (ret == -ETIMEDOUT) {
61312-				dwc3_core_soft_reset(dwc);
61313-				ret = -EPROBE_DEFER;
61314-			}
61315+		if (ret)
61316 			goto err0;
61317-		}
61318 		dwc->ulpi_ready = true;
61319 	}
61320 
61321@@ -977,21 +1012,9 @@ static int dwc3_core_init(struct dwc3 *dwc)
61322 		dwc->phys_ready = true;
61323 	}
61324 
61325-	usb_phy_init(dwc->usb2_phy);
61326-	usb_phy_init(dwc->usb3_phy);
61327-	ret = phy_init(dwc->usb2_generic_phy);
61328-	if (ret < 0)
61329-		goto err0a;
61330-
61331-	ret = phy_init(dwc->usb3_generic_phy);
61332-	if (ret < 0) {
61333-		phy_exit(dwc->usb2_generic_phy);
61334-		goto err0a;
61335-	}
61336-
61337 	ret = dwc3_core_soft_reset(dwc);
61338 	if (ret)
61339-		goto err1;
61340+		goto err0a;
61341 
61342 	if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD &&
61343 	    !DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) {
61344@@ -1057,12 +1080,25 @@ static int dwc3_core_init(struct dwc3 *dwc)
61345 		if (!DWC3_VER_IS_PRIOR(DWC3, 290A))
61346 			reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW;
61347 
61348+		/*
61349+		 * Decouple USB 2.0 L1 & L2 events which will allow for
61350+		 * gadget driver to only receive U3/L2 suspend & wakeup
61351+		 * events and prevent the more frequent L1 LPM transitions
61352+		 * from interrupting the driver.
61353+		 */
61354+		if (!DWC3_VER_IS_PRIOR(DWC3, 300A))
61355+			reg |= DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT;
61356+
61357 		if (dwc->dis_tx_ipgap_linecheck_quirk)
61358 			reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS;
61359 
61360 		if (dwc->parkmode_disable_ss_quirk)
61361 			reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS;
61362 
61363+		if (dwc->maximum_speed == USB_SPEED_HIGH ||
61364+		    dwc->maximum_speed == USB_SPEED_FULL)
61365+			reg |= DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK;
61366+
61367 		dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
61368 	}
61369 
61370@@ -1162,11 +1198,8 @@ static int dwc3_core_get_phy(struct dwc3 *dwc)
61371 		ret = PTR_ERR(dwc->usb2_phy);
61372 		if (ret == -ENXIO || ret == -ENODEV) {
61373 			dwc->usb2_phy = NULL;
61374-		} else if (ret == -EPROBE_DEFER) {
61375-			return ret;
61376 		} else {
61377-			dev_err(dev, "no usb2 phy configured\n");
61378-			return ret;
61379+			return dev_err_probe(dev, ret, "no usb2 phy configured\n");
61380 		}
61381 	}
61382 
61383@@ -1174,11 +1207,8 @@ static int dwc3_core_get_phy(struct dwc3 *dwc)
61384 		ret = PTR_ERR(dwc->usb3_phy);
61385 		if (ret == -ENXIO || ret == -ENODEV) {
61386 			dwc->usb3_phy = NULL;
61387-		} else if (ret == -EPROBE_DEFER) {
61388-			return ret;
61389 		} else {
61390-			dev_err(dev, "no usb3 phy configured\n");
61391-			return ret;
61392+			return dev_err_probe(dev, ret, "no usb3 phy configured\n");
61393 		}
61394 	}
61395 
61396@@ -1187,11 +1217,8 @@ static int dwc3_core_get_phy(struct dwc3 *dwc)
61397 		ret = PTR_ERR(dwc->usb2_generic_phy);
61398 		if (ret == -ENOSYS || ret == -ENODEV) {
61399 			dwc->usb2_generic_phy = NULL;
61400-		} else if (ret == -EPROBE_DEFER) {
61401-			return ret;
61402 		} else {
61403-			dev_err(dev, "no usb2 phy configured\n");
61404-			return ret;
61405+			return dev_err_probe(dev, ret, "no usb2 phy configured\n");
61406 		}
61407 	}
61408 
61409@@ -1200,11 +1227,8 @@ static int dwc3_core_get_phy(struct dwc3 *dwc)
61410 		ret = PTR_ERR(dwc->usb3_generic_phy);
61411 		if (ret == -ENOSYS || ret == -ENODEV) {
61412 			dwc->usb3_generic_phy = NULL;
61413-		} else if (ret == -EPROBE_DEFER) {
61414-			return ret;
61415 		} else {
61416-			dev_err(dev, "no usb3 phy configured\n");
61417-			return ret;
61418+			return dev_err_probe(dev, ret, "no usb3 phy configured\n");
61419 		}
61420 	}
61421 
61422@@ -1226,11 +1250,8 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
61423 		phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_DEVICE);
61424 
61425 		ret = dwc3_gadget_init(dwc);
61426-		if (ret) {
61427-			if (ret != -EPROBE_DEFER)
61428-				dev_err(dev, "failed to initialize gadget\n");
61429-			return ret;
61430-		}
61431+		if (ret)
61432+			return dev_err_probe(dev, ret, "failed to initialize gadget\n");
61433 		break;
61434 	case USB_DR_MODE_HOST:
61435 		dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
61436@@ -1241,20 +1262,14 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
61437 		phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST);
61438 
61439 		ret = dwc3_host_init(dwc);
61440-		if (ret) {
61441-			if (ret != -EPROBE_DEFER)
61442-				dev_err(dev, "failed to initialize host\n");
61443-			return ret;
61444-		}
61445+		if (ret)
61446+			return dev_err_probe(dev, ret, "failed to initialize host\n");
61447 		break;
61448 	case USB_DR_MODE_OTG:
61449 		INIT_WORK(&dwc->drd_work, __dwc3_set_mode);
61450 		ret = dwc3_drd_init(dwc);
61451-		if (ret) {
61452-			if (ret != -EPROBE_DEFER)
61453-				dev_err(dev, "failed to initialize dual-role\n");
61454-			return ret;
61455-		}
61456+		if (ret)
61457+			return dev_err_probe(dev, ret, "failed to initialize dual-role\n");
61458 		break;
61459 	default:
61460 		dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode);
61461@@ -1291,10 +1306,13 @@ static void dwc3_get_properties(struct dwc3 *dwc)
61462 	u8			lpm_nyet_threshold;
61463 	u8			tx_de_emphasis;
61464 	u8			hird_threshold;
61465-	u8			rx_thr_num_pkt_prd = 0;
61466-	u8			rx_max_burst_prd = 0;
61467-	u8			tx_thr_num_pkt_prd = 0;
61468-	u8			tx_max_burst_prd = 0;
61469+	u8			rx_thr_num_pkt_prd;
61470+	u8			rx_max_burst_prd;
61471+	u8			tx_thr_num_pkt_prd;
61472+	u8			tx_max_burst_prd;
61473+	u8			tx_fifo_resize_max_num;
61474+	const char		*usb_psy_name;
61475+	int			ret;
61476 
61477 	/* default to highest possible threshold */
61478 	lpm_nyet_threshold = 0xf;
61479@@ -1308,7 +1326,15 @@ static void dwc3_get_properties(struct dwc3 *dwc)
61480 	 */
61481 	hird_threshold = 12;
61482 
61483+	/*
61484+	 * default to a TXFIFO size large enough to fit 6 max packets.  This
61485+	 * allows for systems with larger bus latencies to have some headroom
61486+	 * for endpoints that have a large bMaxBurst value.
61487+	 */
61488+	tx_fifo_resize_max_num = 6;
61489+
61490 	dwc->maximum_speed = usb_get_maximum_speed(dev);
61491+	dwc->max_ssp_rate = usb_get_maximum_ssp_rate(dev);
61492 	dwc->dr_mode = usb_get_dr_mode(dev);
61493 	dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node);
61494 
61495@@ -1319,6 +1345,13 @@ static void dwc3_get_properties(struct dwc3 *dwc)
61496 	else
61497 		dwc->sysdev = dwc->dev;
61498 
61499+	ret = device_property_read_string(dev, "usb-psy-name", &usb_psy_name);
61500+	if (ret >= 0) {
61501+		dwc->usb_psy = power_supply_get_by_name(usb_psy_name);
61502+		if (!dwc->usb_psy)
61503+			dev_err(dev, "couldn't get usb power supply\n");
61504+	}
61505+
61506 	dwc->has_lpm_erratum = device_property_read_bool(dev,
61507 				"snps,has-lpm-erratum");
61508 	device_property_read_u8(dev, "snps,lpm-nyet-threshold",
61509@@ -1343,6 +1376,11 @@ static void dwc3_get_properties(struct dwc3 *dwc)
61510 				&tx_thr_num_pkt_prd);
61511 	device_property_read_u8(dev, "snps,tx-max-burst-prd",
61512 				&tx_max_burst_prd);
61513+	dwc->do_fifo_resize = device_property_read_bool(dev,
61514+							"tx-fifo-resize");
61515+	if (dwc->do_fifo_resize)
61516+		device_property_read_u8(dev, "tx-fifo-max-num",
61517+					&tx_fifo_resize_max_num);
61518 
61519 	dwc->disable_scramble_quirk = device_property_read_bool(dev,
61520 				"snps,disable_scramble_quirk");
61521@@ -1408,6 +1446,8 @@ static void dwc3_get_properties(struct dwc3 *dwc)
61522 	dwc->tx_max_burst_prd = tx_max_burst_prd;
61523 
61524 	dwc->imod_interval = 0;
61525+
61526+	dwc->tx_fifo_resize_max_num = tx_fifo_resize_max_num;
61527 }
61528 
61529 /* check whether the core supports IMOD */
61530@@ -1482,21 +1522,59 @@ static void dwc3_check_params(struct dwc3 *dwc)
61531 		}
61532 		break;
61533 	}
61534+
61535+	/*
61536+	 * Currently the controller does not have visibility into the HW
61537+	 * parameter to determine the maximum number of lanes the HW supports.
61538+	 * If the number of lanes is not specified in the device property, then
61539+	 * set the default to support dual-lane for DWC_usb32 and single-lane
61540+	 * for DWC_usb31 for super-speed-plus.
61541+	 */
61542+	if (dwc->maximum_speed == USB_SPEED_SUPER_PLUS) {
61543+		switch (dwc->max_ssp_rate) {
61544+		case USB_SSP_GEN_2x1:
61545+			if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_GEN1)
61546+				dev_warn(dev, "UDC only supports Gen 1\n");
61547+			break;
61548+		case USB_SSP_GEN_1x2:
61549+		case USB_SSP_GEN_2x2:
61550+			if (DWC3_IP_IS(DWC31))
61551+				dev_warn(dev, "UDC only supports single lane\n");
61552+			break;
61553+		case USB_SSP_GEN_UNKNOWN:
61554+		default:
61555+			switch (hwparam_gen) {
61556+			case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2:
61557+				if (DWC3_IP_IS(DWC32))
61558+					dwc->max_ssp_rate = USB_SSP_GEN_2x2;
61559+				else
61560+					dwc->max_ssp_rate = USB_SSP_GEN_2x1;
61561+				break;
61562+			case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1:
61563+				if (DWC3_IP_IS(DWC32))
61564+					dwc->max_ssp_rate = USB_SSP_GEN_1x2;
61565+				break;
61566+			}
61567+			break;
61568+		}
61569+	}
61570 }
61571 
61572 static int dwc3_probe(struct platform_device *pdev)
61573 {
61574 	struct device		*dev = &pdev->dev;
61575 	struct resource		*res, dwc_res;
61576+	struct dwc3_vendor	*vdwc;
61577 	struct dwc3		*dwc;
61578 
61579 	int			ret;
61580 
61581 	void __iomem		*regs;
61582 
61583-	dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
61584-	if (!dwc)
61585+	vdwc = devm_kzalloc(dev, sizeof(*vdwc), GFP_KERNEL);
61586+	if (!vdwc)
61587 		return -ENOMEM;
61588+	dwc = &vdwc->dwc;
61589 
61590 	dwc->dev = dev;
61591 
61592@@ -1528,7 +1606,7 @@ static int dwc3_probe(struct platform_device *pdev)
61593 
61594 	dwc3_get_properties(dwc);
61595 
61596-	dwc->reset = devm_reset_control_array_get(dev, true, true);
61597+	dwc->reset = devm_reset_control_array_get_optional_shared(dev);
61598 	if (IS_ERR(dwc->reset))
61599 		return PTR_ERR(dwc->reset);
61600 
61601@@ -1594,8 +1672,7 @@ static int dwc3_probe(struct platform_device *pdev)
61602 
61603 	ret = dwc3_core_init(dwc);
61604 	if (ret) {
61605-		if (ret != -EPROBE_DEFER)
61606-			dev_err(dev, "failed to initialize core: %d\n", ret);
61607+		dev_err_probe(dev, ret, "failed to initialize core\n");
61608 		goto err4;
61609 	}
61610 
61611@@ -1606,24 +1683,36 @@ static int dwc3_probe(struct platform_device *pdev)
61612 	if (ret)
61613 		goto err5;
61614 
61615-	pm_runtime_put(dev);
61616-
61617+#if 0
61618+	if (dwc->dr_mode == USB_DR_MODE_OTG &&
61619+	    of_device_is_compatible(dev->parent->of_node,
61620+				    "rockchip,rk3399-dwc3")) {
61621+#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
61622+		pm_runtime_set_autosuspend_delay(dev, 100);
61623+#endif
61624+		pm_runtime_allow(dev);
61625+		pm_runtime_put_sync_suspend(dev);
61626+	} else {
61627+		pm_runtime_put(dev);
61628+	}
61629+#endif
61630+    pm_runtime_put(dev);
61631 	return 0;
61632 
61633 err5:
61634 	dwc3_debugfs_exit(dwc);
61635 	dwc3_event_buffers_cleanup(dwc);
61636 
61637-	usb_phy_set_suspend(dwc->usb2_phy, 1);
61638-	usb_phy_set_suspend(dwc->usb3_phy, 1);
61639-	phy_power_off(dwc->usb2_generic_phy);
61640-	phy_power_off(dwc->usb3_generic_phy);
61641-
61642 	usb_phy_shutdown(dwc->usb2_phy);
61643 	usb_phy_shutdown(dwc->usb3_phy);
61644 	phy_exit(dwc->usb2_generic_phy);
61645 	phy_exit(dwc->usb3_generic_phy);
61646 
61647+	usb_phy_set_suspend(dwc->usb2_phy, 1);
61648+	usb_phy_set_suspend(dwc->usb3_phy, 1);
61649+	phy_power_off(dwc->usb2_generic_phy);
61650+	phy_power_off(dwc->usb3_generic_phy);
61651+
61652 	dwc3_ulpi_exit(dwc);
61653 
61654 err4:
61655@@ -1644,6 +1733,9 @@ static int dwc3_probe(struct platform_device *pdev)
61656 assert_reset:
61657 	reset_control_assert(dwc->reset);
61658 
61659+	if (dwc->usb_psy)
61660+		power_supply_put(dwc->usb_psy);
61661+
61662 	return ret;
61663 }
61664 
61665@@ -1666,6 +1758,9 @@ static int dwc3_remove(struct platform_device *pdev)
61666 	dwc3_free_event_buffers(dwc);
61667 	dwc3_free_scratch_buffers(dwc);
61668 
61669+	if (dwc->usb_psy)
61670+		power_supply_put(dwc->usb_psy);
61671+
61672 	return 0;
61673 }
61674 
61675@@ -1850,7 +1945,7 @@ static int dwc3_runtime_suspend(struct device *dev)
61676 	if (ret)
61677 		return ret;
61678 
61679-	device_init_wakeup(dev, true);
61680+	device_init_wakeup(dev, false);
61681 
61682 	return 0;
61683 }
61684@@ -1860,7 +1955,7 @@ static int dwc3_runtime_resume(struct device *dev)
61685 	struct dwc3     *dwc = dev_get_drvdata(dev);
61686 	int		ret;
61687 
61688-	device_init_wakeup(dev, false);
61689+	device_init_wakeup(dev, true);
61690 
61691 	ret = dwc3_resume_common(dwc, PMSG_AUTO_RESUME);
61692 	if (ret)
61693@@ -1909,6 +2004,9 @@ static int dwc3_suspend(struct device *dev)
61694 	struct dwc3	*dwc = dev_get_drvdata(dev);
61695 	int		ret;
61696 
61697+	if (pm_runtime_suspended(dwc->dev))
61698+		return 0;
61699+
61700 	ret = dwc3_suspend_common(dwc, PMSG_SUSPEND);
61701 	if (ret)
61702 		return ret;
61703@@ -1923,6 +2021,9 @@ static int dwc3_resume(struct device *dev)
61704 	struct dwc3	*dwc = dev_get_drvdata(dev);
61705 	int		ret;
61706 
61707+	if (pm_runtime_suspended(dwc->dev))
61708+		return 0;
61709+
61710 	pinctrl_pm_select_default_state(dev);
61711 
61712 	ret = dwc3_resume_common(dwc, PMSG_RESUME);
61713diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
61714index cbebe541f..ab09e6668 100644
61715--- a/drivers/usb/dwc3/core.h
61716+++ b/drivers/usb/dwc3/core.h
61717@@ -22,6 +22,7 @@
61718 #include <linux/debugfs.h>
61719 #include <linux/wait.h>
61720 #include <linux/workqueue.h>
61721+#include <linux/android_kabi.h>
61722 
61723 #include <linux/usb/ch9.h>
61724 #include <linux/usb/gadget.h>
61725@@ -31,6 +32,8 @@
61726 
61727 #include <linux/phy/phy.h>
61728 
61729+#include <linux/power_supply.h>
61730+
61731 #define DWC3_MSG_MAX	500
61732 
61733 /* Global constants */
61734@@ -55,7 +58,7 @@
61735 #define DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE	3
61736 #define DWC3_DEVICE_EVENT_WAKEUP		4
61737 #define DWC3_DEVICE_EVENT_HIBER_REQ		5
61738-#define DWC3_DEVICE_EVENT_EOPF			6
61739+#define DWC3_DEVICE_EVENT_SUSPEND		6
61740 #define DWC3_DEVICE_EVENT_SOF			7
61741 #define DWC3_DEVICE_EVENT_ERRATIC_ERROR		9
61742 #define DWC3_DEVICE_EVENT_CMD_CMPL		10
61743@@ -141,6 +144,7 @@
61744 #define DWC3_GHWPARAMS8		0xc600
61745 #define DWC3_GUCTL3		0xc60c
61746 #define DWC3_GFLADJ		0xc630
61747+#define DWC3_GHWPARAMS9		0xc680
61748 
61749 /* Device Registers */
61750 #define DWC3_DCFG		0xc700
61751@@ -253,9 +257,11 @@
61752 #define DWC3_GUCTL_HSTINAUTORETRY	BIT(14)
61753 
61754 /* Global User Control 1 Register */
61755-#define DWC3_GUCTL1_PARKMODE_DISABLE_SS	BIT(17)
61756+#define DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT	BIT(31)
61757 #define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS	BIT(28)
61758-#define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW	BIT(24)
61759+#define DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK	BIT(26)
61760+#define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW		BIT(24)
61761+#define DWC3_GUCTL1_PARKMODE_DISABLE_SS		BIT(17)
61762 
61763 /* Global Status Register */
61764 #define DWC3_GSTS_OTG_IP	BIT(10)
61765@@ -376,6 +382,9 @@
61766 #define DWC3_GHWPARAMS7_RAM1_DEPTH(n)	((n) & 0xffff)
61767 #define DWC3_GHWPARAMS7_RAM2_DEPTH(n)	(((n) >> 16) & 0xffff)
61768 
61769+/* Global HWPARAMS9 Register */
61770+#define DWC3_GHWPARAMS9_DEV_TXF_FLUSH_BYPASS	BIT(0)
61771+
61772 /* Global Frame Length Adjustment Register */
61773 #define DWC3_GFLADJ_30MHZ_SDBND_SEL		BIT(7)
61774 #define DWC3_GFLADJ_30MHZ_MASK			0x3f
61775@@ -387,6 +396,8 @@
61776 #define DWC3_GUCTL3_SPLITDISABLE		BIT(14)
61777 
61778 /* Device Configuration Register */
61779+#define DWC3_DCFG_NUMLANES(n)	(((n) & 0x3) << 30) /* DWC_usb32 only */
61780+
61781 #define DWC3_DCFG_DEVADDR(addr)	((addr) << 3)
61782 #define DWC3_DCFG_DEVADDR_MASK	DWC3_DCFG_DEVADDR(0x7f)
61783 
61784@@ -401,6 +412,7 @@
61785 #define DWC3_DCFG_NUMP(n)	(((n) >> DWC3_DCFG_NUMP_SHIFT) & 0x1f)
61786 #define DWC3_DCFG_NUMP_MASK	(0x1f << DWC3_DCFG_NUMP_SHIFT)
61787 #define DWC3_DCFG_LPM_CAP	BIT(22)
61788+#define DWC3_DCFG_IGNSTRMPP	BIT(23)
61789 
61790 /* Device Control Register */
61791 #define DWC3_DCTL_RUN_STOP	BIT(31)
61792@@ -452,7 +464,7 @@
61793 #define DWC3_DEVTEN_CMDCMPLTEN		BIT(10)
61794 #define DWC3_DEVTEN_ERRTICERREN		BIT(9)
61795 #define DWC3_DEVTEN_SOFEN		BIT(7)
61796-#define DWC3_DEVTEN_EOPFEN		BIT(6)
61797+#define DWC3_DEVTEN_U3L2L1SUSPEN	BIT(6)
61798 #define DWC3_DEVTEN_HIBERNATIONREQEVTEN	BIT(5)
61799 #define DWC3_DEVTEN_WKUPEVTEN		BIT(4)
61800 #define DWC3_DEVTEN_ULSTCNGEN		BIT(3)
61801@@ -460,6 +472,8 @@
61802 #define DWC3_DEVTEN_USBRSTEN		BIT(1)
61803 #define DWC3_DEVTEN_DISCONNEVTEN	BIT(0)
61804 
61805+#define DWC3_DSTS_CONNLANES(n)		(((n) >> 30) & 0x3) /* DWC_usb32 only */
61806+
61807 /* Device Status Register */
61808 #define DWC3_DSTS_DCNRD			BIT(29)
61809 
61810@@ -650,6 +664,8 @@ struct dwc3_event_buffer {
61811 	dma_addr_t		dma;
61812 
61813 	struct dwc3		*dwc;
61814+
61815+	ANDROID_KABI_RESERVE(1);
61816 };
61817 
61818 #define DWC3_EP_FLAG_STALLED	BIT(0)
61819@@ -713,6 +729,7 @@ struct dwc3_ep {
61820 #define DWC3_EP_FORCE_RESTART_STREAM	BIT(9)
61821 #define DWC3_EP_FIRST_STREAM_PRIMED	BIT(10)
61822 #define DWC3_EP_PENDING_CLEAR_STALL	BIT(11)
61823+#define DWC3_EP_TXFIFO_RESIZED		BIT(12)
61824 
61825 	/* This last one is specific to EP0 */
61826 #define DWC3_EP0_DIR_IN		BIT(31)
61827@@ -743,6 +760,9 @@ struct dwc3_ep {
61828 	/* For isochronous START TRANSFER workaround only */
61829 	u8			combo_num;
61830 	int			start_cmd_status;
61831+
61832+	ANDROID_KABI_RESERVE(1);
61833+	ANDROID_KABI_RESERVE(2);
61834 };
61835 
61836 enum dwc3_phy {
61837@@ -841,6 +861,7 @@ struct dwc3_trb {
61838  * @hwparams6: GHWPARAMS6
61839  * @hwparams7: GHWPARAMS7
61840  * @hwparams8: GHWPARAMS8
61841+ * @hwparams9: GHWPARAMS9
61842  */
61843 struct dwc3_hwparams {
61844 	u32	hwparams0;
61845@@ -852,13 +873,15 @@ struct dwc3_hwparams {
61846 	u32	hwparams6;
61847 	u32	hwparams7;
61848 	u32	hwparams8;
61849+	u32	hwparams9;
61850+
61851+	ANDROID_KABI_RESERVE(1);
61852+	ANDROID_KABI_RESERVE(2);
61853 };
61854 
61855 /* HWPARAMS0 */
61856 #define DWC3_MODE(n)		((n) & 0x7)
61857 
61858-#define DWC3_MDWIDTH(n)		(((n) & 0xff00) >> 8)
61859-
61860 /* HWPARAMS1 */
61861 #define DWC3_NUM_INT(n)		(((n) & (0x3f << 15)) >> 15)
61862 
61863@@ -905,11 +928,13 @@ struct dwc3_request {
61864 	unsigned int		remaining;
61865 
61866 	unsigned int		status;
61867-#define DWC3_REQUEST_STATUS_QUEUED	0
61868-#define DWC3_REQUEST_STATUS_STARTED	1
61869-#define DWC3_REQUEST_STATUS_CANCELLED	2
61870-#define DWC3_REQUEST_STATUS_COMPLETED	3
61871-#define DWC3_REQUEST_STATUS_UNKNOWN	-1
61872+#define DWC3_REQUEST_STATUS_QUEUED		0
61873+#define DWC3_REQUEST_STATUS_STARTED		1
61874+#define DWC3_REQUEST_STATUS_DISCONNECTED	2
61875+#define DWC3_REQUEST_STATUS_DEQUEUED		3
61876+#define DWC3_REQUEST_STATUS_STALLED		4
61877+#define DWC3_REQUEST_STATUS_COMPLETED		5
61878+#define DWC3_REQUEST_STATUS_UNKNOWN		-1
61879 
61880 	u8			epnum;
61881 	struct dwc3_trb		*trb;
61882@@ -920,6 +945,9 @@ struct dwc3_request {
61883 	unsigned int		needs_extra_trb:1;
61884 	unsigned int		direction:1;
61885 	unsigned int		mapped:1;
61886+
61887+	ANDROID_KABI_RESERVE(1);
61888+	ANDROID_KABI_RESERVE(2);
61889 };
61890 
61891 /*
61892@@ -966,6 +994,10 @@ struct dwc3_scratchpad_array {
61893  * @nr_scratch: number of scratch buffers
61894  * @u1u2: only used on revisions <1.83a for workaround
61895  * @maximum_speed: maximum speed requested (mainly for testing purposes)
61896+ * @max_ssp_rate: SuperSpeed Plus maximum signaling rate and lane count
61897+ * @gadget_max_speed: maximum gadget speed requested
61898+ * @gadget_ssp_rate: Gadget driver's maximum supported SuperSpeed Plus signaling
61899+ *			rate and lane count.
61900  * @ip: controller's ID
61901  * @revision: controller's version of an IP
61902  * @version_type: VERSIONTYPE register contents, a sub release of a revision
61903@@ -980,6 +1012,9 @@ struct dwc3_scratchpad_array {
61904  * @role_sw: usb_role_switch handle
61905  * @role_switch_default_mode: default operation mode of controller while
61906  *			usb role is USB_ROLE_NONE.
61907+ * @current_role_sw_mode: current usb role switch mode.
61908+ * @desired_role_sw_mode: desired usb role switch mode.
61909+ * @usb_psy: pointer to power supply interface.
61910  * @usb2_phy: pointer to USB2 PHY
61911  * @usb3_phy: pointer to USB3 PHY
61912  * @usb2_generic_phy: pointer to USB2 PHY
61913@@ -1008,9 +1043,9 @@ struct dwc3_scratchpad_array {
61914  * @rx_max_burst_prd: max periodic ESS receive burst size
61915  * @tx_thr_num_pkt_prd: periodic ESS transmit packet count
61916  * @tx_max_burst_prd: max periodic ESS transmit burst size
61917+ * @tx_fifo_resize_max_num: max number of fifos allocated during txfifo resize
61918  * @hsphy_interface: "utmi" or "ulpi"
61919  * @connected: true when we're connected to a host, false otherwise
61920- * @softconnect: true when gadget connect is called, false when disconnect runs
61921  * @delayed_status: true when gadget driver asks for delayed status
61922  * @ep0_bounced: true when we used bounce buffer
61923  * @ep0_expect_in: true when we expect a DATA IN transfer
61924@@ -1023,6 +1058,7 @@ struct dwc3_scratchpad_array {
61925  *	1	- utmi_l1_suspend_n
61926  * @is_fpga: true when we are using the FPGA board
61927  * @pending_events: true when we have pending IRQs to be handled
61928+ * @do_fifo_resize: true when txfifo resizing is enabled for dwc3 endpoints
61929  * @pullups_connected: true when Run/Stop bit is set
61930  * @setup_packet_pending: true when there's a Setup Packet in FIFO. Workaround
61931  * @three_stage_setup: set if we perform a three phase setup
61932@@ -1065,6 +1101,11 @@ struct dwc3_scratchpad_array {
61933  * @dis_split_quirk: set to disable split boundary.
61934  * @imod_interval: set the interrupt moderation interval in 250ns
61935  *			increments or 0 to disable.
61936+ * @max_cfg_eps: current max number of IN eps used across all USB configs.
61937+ * @last_fifo_depth: last fifo depth used to determine next fifo ram start
61938+ *		     address.
61939+ * @num_ep_resized: carries the current number endpoints which have had its tx
61940+ *		    fifo resized.
61941  */
61942 struct dwc3 {
61943 	struct work_struct	drd_work;
61944@@ -1123,6 +1164,12 @@ struct dwc3 {
61945 	enum usb_phy_interface	hsphy_mode;
61946 	struct usb_role_switch	*role_sw;
61947 	enum usb_dr_mode	role_switch_default_mode;
61948+#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
61949+	u32			current_role_sw_mode;
61950+	u32			desired_role_sw_mode;
61951+#endif
61952+
61953+	struct power_supply	*usb_psy;
61954 
61955 	u32			fladj;
61956 	u32			irq_gadget;
61957@@ -1133,6 +1180,9 @@ struct dwc3 {
61958 	u32			nr_scratch;
61959 	u32			u1u2;
61960 	u32			maximum_speed;
61961+	u32			gadget_max_speed;
61962+	enum usb_ssp_rate	max_ssp_rate;
61963+	enum usb_ssp_rate	gadget_ssp_rate;
61964 
61965 	u32			ip;
61966 
61967@@ -1215,11 +1265,11 @@ struct dwc3 {
61968 	u8			rx_max_burst_prd;
61969 	u8			tx_thr_num_pkt_prd;
61970 	u8			tx_max_burst_prd;
61971+	u8			tx_fifo_resize_max_num;
61972 
61973 	const char		*hsphy_interface;
61974 
61975 	unsigned		connected:1;
61976-	unsigned		softconnect:1;
61977 	unsigned		delayed_status:1;
61978 	unsigned		ep0_bounced:1;
61979 	unsigned		ep0_expect_in:1;
61980@@ -1229,6 +1279,7 @@ struct dwc3 {
61981 	unsigned		is_utmi_l1_suspend:1;
61982 	unsigned		is_fpga:1;
61983 	unsigned		pending_events:1;
61984+	unsigned		do_fifo_resize:1;
61985 	unsigned		pullups_connected:1;
61986 	unsigned		setup_packet_pending:1;
61987 	unsigned		three_stage_setup:1;
61988@@ -1262,8 +1313,28 @@ struct dwc3 {
61989 	unsigned		dis_metastability_quirk:1;
61990 
61991 	unsigned		dis_split_quirk:1;
61992+	unsigned		async_callbacks:1;
61993 
61994 	u16			imod_interval;
61995+
61996+	int			max_cfg_eps;
61997+	int			last_fifo_depth;
61998+	int			num_ep_resized;
61999+
62000+	ANDROID_KABI_RESERVE(1);
62001+	ANDROID_KABI_RESERVE(2);
62002+	ANDROID_KABI_RESERVE(3);
62003+	ANDROID_KABI_RESERVE(4);
62004+};
62005+
62006+/**
62007+ * struct dwc3_vendor - contains parameters without modifying the format of DWC3 core
62008+ * @dwc: contains dwc3 core reference
62009+ * @softconnect: true when gadget connect is called, false when disconnect runs
62010+ */
62011+struct dwc3_vendor {
62012+	struct dwc3	dwc;
62013+	unsigned	softconnect:1;
62014 };
62015 
62016 #define INCRX_BURST_MODE 0
62017@@ -1356,7 +1427,7 @@ struct dwc3_event_depevt {
62018  *	3	- ULStChng
62019  *	4	- WkUpEvt
62020  *	5	- Reserved
62021- *	6	- EOPF
62022+ *	6	- Suspend (EOPF on revisions 2.10a and prior)
62023  *	7	- SOF
62024  *	8	- Reserved
62025  *	9	- ErrticErr
62026@@ -1453,13 +1524,28 @@ u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type);
62027 	 (!(_ip##_VERSIONTYPE_##_to) ||					\
62028 	  dwc->version_type <= _ip##_VERSIONTYPE_##_to))
62029 
62030+/**
62031+ * dwc3_mdwidth - get MDWIDTH value in bits
62032+ * @dwc: pointer to our context structure
62033+ *
62034+ * Return MDWIDTH configuration value in bits.
62035+ */
62036+static inline u32 dwc3_mdwidth(struct dwc3 *dwc)
62037+{
62038+	u32 mdwidth;
62039+
62040+	mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0);
62041+	if (DWC3_IP_IS(DWC32))
62042+		mdwidth += DWC3_GHWPARAMS6_MDWIDTH(dwc->hwparams.hwparams6);
62043+
62044+	return mdwidth;
62045+}
62046+
62047 bool dwc3_has_imod(struct dwc3 *dwc);
62048 
62049 int dwc3_event_buffers_setup(struct dwc3 *dwc);
62050 void dwc3_event_buffers_cleanup(struct dwc3 *dwc);
62051 
62052-int dwc3_core_soft_reset(struct dwc3 *dwc);
62053-
62054 #if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
62055 int dwc3_host_init(struct dwc3 *dwc);
62056 void dwc3_host_exit(struct dwc3 *dwc);
62057@@ -1480,6 +1566,8 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
62058 		struct dwc3_gadget_ep_cmd_params *params);
62059 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned int cmd,
62060 		u32 param);
62061+void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt);
62062+void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc);
62063 #else
62064 static inline int dwc3_gadget_init(struct dwc3 *dwc)
62065 { return 0; }
62066@@ -1499,6 +1587,11 @@ static inline int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
62067 static inline int dwc3_send_gadget_generic_command(struct dwc3 *dwc,
62068 		int cmd, u32 param)
62069 { return 0; }
62070+static inline void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
62071+					     bool interrupt)
62072+{ }
62073+static inline void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc)
62074+{ }
62075 #endif
62076 
62077 #if IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
62078diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
62079index 74d9c2c38..b2aa7272f 100644
62080--- a/drivers/usb/dwc3/debug.h
62081+++ b/drivers/usb/dwc3/debug.h
62082@@ -221,8 +221,8 @@ static inline const char *dwc3_gadget_event_string(char *str, size_t size,
62083 		snprintf(str, size, "WakeUp [%s]",
62084 				dwc3_gadget_link_string(state));
62085 		break;
62086-	case DWC3_DEVICE_EVENT_EOPF:
62087-		snprintf(str, size, "End-Of-Frame [%s]",
62088+	case DWC3_DEVICE_EVENT_SUSPEND:
62089+		snprintf(str, size, "Suspend [%s]",
62090 				dwc3_gadget_link_string(state));
62091 		break;
62092 	case DWC3_DEVICE_EVENT_SOF:
62093@@ -353,8 +353,8 @@ static inline const char *dwc3_gadget_event_type_string(u8 event)
62094 		return "Wake-Up";
62095 	case DWC3_DEVICE_EVENT_HIBER_REQ:
62096 		return "Hibernation";
62097-	case DWC3_DEVICE_EVENT_EOPF:
62098-		return "End of Periodic Frame";
62099+	case DWC3_DEVICE_EVENT_SUSPEND:
62100+		return "Suspend";
62101 	case DWC3_DEVICE_EVENT_SOF:
62102 		return "Start of Frame";
62103 	case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
62104diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
62105index 3ebe3e6c2..dc81e8349 100644
62106--- a/drivers/usb/dwc3/debugfs.c
62107+++ b/drivers/usb/dwc3/debugfs.c
62108@@ -440,6 +440,10 @@ static ssize_t dwc3_mode_write(struct file *file,
62109 	if (!strncmp(buf, "otg", 3))
62110 		mode = DWC3_GCTL_PRTCAP_OTG;
62111 
62112+#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
62113+	dwc->desired_role_sw_mode = mode;
62114+#endif
62115+
62116 	dwc3_set_mode(dwc, mode);
62117 
62118 	return count;
62119@@ -638,16 +642,14 @@ static int dwc3_tx_fifo_size_show(struct seq_file *s, void *unused)
62120 	struct dwc3_ep		*dep = s->private;
62121 	struct dwc3		*dwc = dep->dwc;
62122 	unsigned long		flags;
62123-	int			mdwidth;
62124+	u32			mdwidth;
62125 	u32			val;
62126 
62127 	spin_lock_irqsave(&dwc->lock, flags);
62128 	val = dwc3_core_fifo_space(dep, DWC3_TXFIFO);
62129 
62130 	/* Convert to bytes */
62131-	mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
62132-	if (DWC3_IP_IS(DWC32))
62133-		mdwidth += DWC3_GHWPARAMS6_MDWIDTH(dwc->hwparams.hwparams6);
62134+	mdwidth = dwc3_mdwidth(dwc);
62135 
62136 	val *= mdwidth;
62137 	val >>= 3;
62138@@ -662,16 +664,14 @@ static int dwc3_rx_fifo_size_show(struct seq_file *s, void *unused)
62139 	struct dwc3_ep		*dep = s->private;
62140 	struct dwc3		*dwc = dep->dwc;
62141 	unsigned long		flags;
62142-	int			mdwidth;
62143+	u32			mdwidth;
62144 	u32			val;
62145 
62146 	spin_lock_irqsave(&dwc->lock, flags);
62147 	val = dwc3_core_fifo_space(dep, DWC3_RXFIFO);
62148 
62149 	/* Convert to bytes */
62150-	mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
62151-	if (DWC3_IP_IS(DWC32))
62152-		mdwidth += DWC3_GHWPARAMS6_MDWIDTH(dwc->hwparams.hwparams6);
62153+	mdwidth = dwc3_mdwidth(dwc);
62154 
62155 	val *= mdwidth;
62156 	val >>= 3;
62157diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
62158index 0a96f44cc..347795a8b 100644
62159--- a/drivers/usb/dwc3/drd.c
62160+++ b/drivers/usb/dwc3/drd.c
62161@@ -420,6 +420,12 @@ static void dwc3_drd_update(struct dwc3 *dwc)
62162 		id = extcon_get_state(dwc->edev, EXTCON_USB_HOST);
62163 		if (id < 0)
62164 			id = 0;
62165+
62166+#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
62167+		dwc->desired_role_sw_mode = (id ? USB_DR_MODE_HOST :
62168+					     USB_DR_MODE_PERIPHERAL);
62169+#endif
62170+
62171 		dwc3_set_mode(dwc, id ?
62172 			      DWC3_GCTL_PRTCAP_HOST :
62173 			      DWC3_GCTL_PRTCAP_DEVICE);
62174@@ -431,6 +437,15 @@ static int dwc3_drd_notifier(struct notifier_block *nb,
62175 {
62176 	struct dwc3 *dwc = container_of(nb, struct dwc3, edev_nb);
62177 
62178+#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
62179+	if (extcon_get_state(dwc->edev, EXTCON_USB))
62180+		dwc->desired_role_sw_mode = USB_DR_MODE_PERIPHERAL;
62181+	else if (extcon_get_state(dwc->edev, EXTCON_USB_HOST))
62182+		dwc->desired_role_sw_mode = USB_DR_MODE_HOST;
62183+	else
62184+		dwc->desired_role_sw_mode = USB_DR_MODE_UNKNOWN;
62185+#endif
62186+
62187 	dwc3_set_mode(dwc, event ?
62188 		      DWC3_GCTL_PRTCAP_HOST :
62189 		      DWC3_GCTL_PRTCAP_DEVICE);
62190@@ -441,8 +456,8 @@ static int dwc3_drd_notifier(struct notifier_block *nb,
62191 static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
62192 {
62193 	struct device *dev = dwc->dev;
62194-	struct device_node *np_phy, *np_conn;
62195-	struct extcon_dev *edev;
62196+	struct device_node *np_phy;
62197+	struct extcon_dev *edev = NULL;
62198 	const char *name;
62199 
62200 	if (device_property_read_bool(dev, "extcon"))
62201@@ -462,15 +477,22 @@ static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
62202 		return edev;
62203 	}
62204 
62205+	/*
62206+	 * Try to get an extcon device from the USB PHY controller's "port"
62207+	 * node. Check if it has the "port" node first, to avoid printing the
62208+	 * error message from underlying code, as it's a valid case: extcon
62209+	 * device (and "port" node) may be missing in case of "usb-role-switch"
62210+	 * or OTG mode.
62211+	 */
62212 	np_phy = of_parse_phandle(dev->of_node, "phys", 0);
62213-	np_conn = of_graph_get_remote_node(np_phy, -1, -1);
62214-
62215-	if (np_conn)
62216-		edev = extcon_find_edev_by_node(np_conn);
62217-	else
62218-		edev = NULL;
62219+	if (of_graph_is_present(np_phy)) {
62220+		struct device_node *np_conn;
62221 
62222-	of_node_put(np_conn);
62223+		np_conn = of_graph_get_remote_node(np_phy, -1, -1);
62224+		if (np_conn)
62225+			edev = extcon_find_edev_by_node(np_conn);
62226+		of_node_put(np_conn);
62227+	}
62228 	of_node_put(np_phy);
62229 
62230 	return edev;
62231@@ -484,6 +506,10 @@ static int dwc3_usb_role_switch_set(struct usb_role_switch *sw,
62232 	struct dwc3 *dwc = usb_role_switch_get_drvdata(sw);
62233 	u32 mode;
62234 
62235+#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
62236+	dwc->desired_role_sw_mode = role;
62237+#endif
62238+
62239 	switch (role) {
62240 	case USB_ROLE_HOST:
62241 		mode = DWC3_GCTL_PRTCAP_HOST;
62242diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
62243index 3cd294264..658739410 100644
62244--- a/drivers/usb/dwc3/ep0.c
62245+++ b/drivers/usb/dwc3/ep0.c
62246@@ -597,11 +597,13 @@ static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
62247 
62248 static int dwc3_ep0_delegate_req(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
62249 {
62250-	int ret;
62251+	int ret = -EINVAL;
62252 
62253-	spin_unlock(&dwc->lock);
62254-	ret = dwc->gadget_driver->setup(dwc->gadget, ctrl);
62255-	spin_lock(&dwc->lock);
62256+	if (dwc->async_callbacks) {
62257+		spin_unlock(&dwc->lock);
62258+		ret = dwc->gadget_driver->setup(dwc->gadget, ctrl);
62259+		spin_lock(&dwc->lock);
62260+	}
62261 	return ret;
62262 }
62263 
62264@@ -619,6 +621,8 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
62265 		return -EINVAL;
62266 
62267 	case USB_STATE_ADDRESS:
62268+		dwc3_gadget_clear_tx_fifos(dwc);
62269+
62270 		ret = dwc3_ep0_delegate_req(dwc, ctrl);
62271 		/* if the cfg matches and the cfg is non zero */
62272 		if (cfg && (!ret || (ret == USB_GADGET_DELAYED_STATUS))) {
62273diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
62274index 28a1194f8..8e9a2622d 100644
62275--- a/drivers/usb/dwc3/gadget.c
62276+++ b/drivers/usb/dwc3/gadget.c
62277@@ -311,24 +311,13 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
62278 	if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
62279 		int link_state;
62280 
62281-		/*
62282-		 * Initiate remote wakeup if the link state is in U3 when
62283-		 * operating in SS/SSP or L1/L2 when operating in HS/FS. If the
62284-		 * link state is in U1/U2, no remote wakeup is needed. The Start
62285-		 * Transfer command will initiate the link recovery.
62286-		 */
62287 		link_state = dwc3_gadget_get_link_state(dwc);
62288-		switch (link_state) {
62289-		case DWC3_LINK_STATE_U2:
62290-			if (dwc->gadget->speed >= USB_SPEED_SUPER)
62291-				break;
62292-
62293-			fallthrough;
62294-		case DWC3_LINK_STATE_U3:
62295+		if (link_state == DWC3_LINK_STATE_U1 ||
62296+		    link_state == DWC3_LINK_STATE_U2 ||
62297+		    link_state == DWC3_LINK_STATE_U3) {
62298 			ret = __dwc3_gadget_wakeup(dwc);
62299 			dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
62300 					ret);
62301-			break;
62302 		}
62303 	}
62304 
62305@@ -417,6 +406,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
62306 
62307 	return ret;
62308 }
62309+EXPORT_SYMBOL_GPL(dwc3_send_gadget_ep_cmd);
62310 
62311 static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
62312 {
62313@@ -640,8 +630,192 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
62314 	return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
62315 }
62316 
62317-static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
62318-		bool interrupt);
62319+/**
62320+ * dwc3_gadget_calc_tx_fifo_size - calculates the txfifo size value
62321+ * @dwc: pointer to the DWC3 context
62322+ * @nfifos: number of fifos to calculate for
62323+ *
62324+ * Calculates the size value based on the equation below:
62325+ *
62326+ * DWC3 revision 280A and prior:
62327+ * fifo_size = mult * (max_packet / mdwidth) + 1;
62328+ *
62329+ * DWC3 revision 290A and onwards:
62330+ * fifo_size = mult * ((max_packet + mdwidth)/mdwidth + 1) + 1
62331+ *
62332+ * The max packet size is set to 1024, as the txfifo requirements mainly apply
62333+ * to super speed USB use cases.  However, it is safe to overestimate the fifo
62334+ * allocations for other scenarios, i.e. high speed USB.
62335+ */
62336+static int dwc3_gadget_calc_tx_fifo_size(struct dwc3 *dwc, int mult)
62337+{
62338+	int max_packet = 1024;
62339+	int fifo_size;
62340+	int mdwidth;
62341+
62342+	mdwidth = dwc3_mdwidth(dwc);
62343+
62344+	/* MDWIDTH is represented in bits, we need it in bytes */
62345+	mdwidth >>= 3;
62346+
62347+	if (DWC3_VER_IS_PRIOR(DWC3, 290A))
62348+		fifo_size = mult * (max_packet / mdwidth) + 1;
62349+	else
62350+		fifo_size = mult * ((max_packet + mdwidth) / mdwidth) + 1;
62351+	return fifo_size;
62352+}
62353+
62354+/**
62355+ * dwc3_gadget_clear_tx_fifo_size - Clears txfifo allocation
62356+ * @dwc: pointer to the DWC3 context
62357+ *
62358+ * Iterates through all the endpoint registers and clears the previous txfifo
62359+ * allocations.
62360+ */
62361+void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc)
62362+{
62363+	struct dwc3_ep *dep;
62364+	int fifo_depth;
62365+	int size;
62366+	int num;
62367+
62368+	if (!dwc->do_fifo_resize)
62369+		return;
62370+
62371+	/* Read ep0IN related TXFIFO size */
62372+	dep = dwc->eps[1];
62373+	size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(0));
62374+	if (DWC3_IP_IS(DWC3))
62375+		fifo_depth = DWC3_GTXFIFOSIZ_TXFDEP(size);
62376+	else
62377+		fifo_depth = DWC31_GTXFIFOSIZ_TXFDEP(size);
62378+
62379+	dwc->last_fifo_depth = fifo_depth;
62380+	/* Clear existing TXFIFO for all IN eps except ep0 */
62381+	for (num = 3; num < min_t(int, dwc->num_eps, DWC3_ENDPOINTS_NUM);
62382+	     num += 2) {
62383+		dep = dwc->eps[num];
62384+		/* Don't change TXFRAMNUM on usb31 version */
62385+		size = DWC3_IP_IS(DWC3) ? 0 :
62386+			dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(num >> 1)) &
62387+				   DWC31_GTXFIFOSIZ_TXFRAMNUM;
62388+
62389+		dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num >> 1), size);
62390+		dep->flags &= ~DWC3_EP_TXFIFO_RESIZED;
62391+	}
62392+	dwc->num_ep_resized = 0;
62393+}
62394+
62395+/*
62396+ * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
62397+ * @dwc: pointer to our context structure
62398+ *
62399+ * This function will a best effort FIFO allocation in order
62400+ * to improve FIFO usage and throughput, while still allowing
62401+ * us to enable as many endpoints as possible.
62402+ *
62403+ * Keep in mind that this operation will be highly dependent
62404+ * on the configured size for RAM1 - which contains TxFifo -,
62405+ * the amount of endpoints enabled on coreConsultant tool, and
62406+ * the width of the Master Bus.
62407+ *
62408+ * In general, FIFO depths are represented with the following equation:
62409+ *
62410+ * fifo_size = mult * ((max_packet + mdwidth)/mdwidth + 1) + 1
62411+ *
62412+ * In conjunction with dwc3_gadget_check_config(), this resizing logic will
62413+ * ensure that all endpoints will have enough internal memory for one max
62414+ * packet per endpoint.
62415+ */
62416+static int dwc3_gadget_resize_tx_fifos(struct dwc3_ep *dep)
62417+{
62418+	struct dwc3 *dwc = dep->dwc;
62419+	int fifo_0_start;
62420+	int ram1_depth;
62421+	int fifo_size;
62422+	int min_depth;
62423+	int num_in_ep;
62424+	int remaining;
62425+	int num_fifos = 1;
62426+	int fifo;
62427+	int tmp;
62428+
62429+	if (!dwc->do_fifo_resize)
62430+		return 0;
62431+
62432+	/* resize IN endpoints except ep0 */
62433+	if (!usb_endpoint_dir_in(dep->endpoint.desc) || dep->number <= 1)
62434+		return 0;
62435+
62436+	/* bail if already resized */
62437+	if (dep->flags & DWC3_EP_TXFIFO_RESIZED)
62438+		return 0;
62439+
62440+	ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
62441+
62442+	if ((dep->endpoint.maxburst > 1 &&
62443+	     usb_endpoint_xfer_bulk(dep->endpoint.desc)) ||
62444+	    usb_endpoint_xfer_isoc(dep->endpoint.desc))
62445+		num_fifos = 3;
62446+
62447+	if (dep->endpoint.maxburst > 6 &&
62448+	    usb_endpoint_xfer_bulk(dep->endpoint.desc) && DWC3_IP_IS(DWC31))
62449+		num_fifos = dwc->tx_fifo_resize_max_num;
62450+
62451+	/* FIFO size for a single buffer */
62452+	fifo = dwc3_gadget_calc_tx_fifo_size(dwc, 1);
62453+
62454+	/* Calculate the number of remaining EPs w/o any FIFO */
62455+	num_in_ep = dwc->max_cfg_eps;
62456+	num_in_ep -= dwc->num_ep_resized;
62457+
62458+	/* Reserve at least one FIFO for the number of IN EPs */
62459+	min_depth = num_in_ep * (fifo + 1);
62460+	remaining = ram1_depth - min_depth - dwc->last_fifo_depth;
62461+	remaining = max_t(int, 0, remaining);
62462+	/*
62463+	 * We've already reserved 1 FIFO per EP, so check what we can fit in
62464+	 * addition to it.  If there is not enough remaining space, allocate
62465+	 * all the remaining space to the EP.
62466+	 */
62467+	fifo_size = (num_fifos - 1) * fifo;
62468+	if (remaining < fifo_size)
62469+		fifo_size = remaining;
62470+
62471+	fifo_size += fifo;
62472+	/* Last increment according to the TX FIFO size equation */
62473+	fifo_size++;
62474+
62475+	/* Check if TXFIFOs start at non-zero addr */
62476+	tmp = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(0));
62477+	fifo_0_start = DWC3_GTXFIFOSIZ_TXFSTADDR(tmp);
62478+
62479+	fifo_size |= (fifo_0_start + (dwc->last_fifo_depth << 16));
62480+	if (DWC3_IP_IS(DWC3))
62481+		dwc->last_fifo_depth += DWC3_GTXFIFOSIZ_TXFDEP(fifo_size);
62482+	else
62483+		dwc->last_fifo_depth += DWC31_GTXFIFOSIZ_TXFDEP(fifo_size);
62484+
62485+	/* Check fifo size allocation doesn't exceed available RAM size. */
62486+	if (dwc->last_fifo_depth >= ram1_depth) {
62487+		dev_err(dwc->dev, "Fifosize(%d) > RAM size(%d) %s depth:%d\n",
62488+			dwc->last_fifo_depth, ram1_depth,
62489+			dep->endpoint.name, fifo_size);
62490+		if (DWC3_IP_IS(DWC3))
62491+			fifo_size = DWC3_GTXFIFOSIZ_TXFDEP(fifo_size);
62492+		else
62493+			fifo_size = DWC31_GTXFIFOSIZ_TXFDEP(fifo_size);
62494+
62495+		dwc->last_fifo_depth -= fifo_size;
62496+		return -ENOMEM;
62497+	}
62498+
62499+	dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(dep->number >> 1), fifo_size);
62500+	dep->flags |= DWC3_EP_TXFIFO_RESIZED;
62501+	dwc->num_ep_resized++;
62502+
62503+	return 0;
62504+}
62505 
62506 /**
62507  * __dwc3_gadget_ep_enable - initializes a hw endpoint
62508@@ -660,6 +834,10 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
62509 	int			ret;
62510 
62511 	if (!(dep->flags & DWC3_EP_ENABLED)) {
62512+		ret = dwc3_gadget_resize_tx_fifos(dep);
62513+		if (ret)
62514+			return ret;
62515+
62516 		ret = dwc3_gadget_start_config(dep);
62517 		if (ret)
62518 			return ret;
62519@@ -742,8 +920,16 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
62520 			 * All stream eps will reinitiate stream on NoStream
62521 			 * rejection until we can determine that the host can
62522 			 * prime after the first transfer.
62523+			 *
62524+			 * However, if the controller is capable of
62525+			 * TXF_FLUSH_BYPASS, then IN direction endpoints will
62526+			 * automatically restart the stream without the driver
62527+			 * initiation.
62528 			 */
62529-			dep->flags |= DWC3_EP_FORCE_RESTART_STREAM;
62530+			if (!dep->direction ||
62531+			    !(dwc->hwparams.hwparams9 &
62532+			      DWC3_GHWPARAMS9_DEV_TXF_FLUSH_BYPASS))
62533+				dep->flags |= DWC3_EP_FORCE_RESTART_STREAM;
62534 		}
62535 	}
62536 
62537@@ -808,7 +994,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
62538 
62539 	dep->stream_capable = false;
62540 	dep->type = 0;
62541-	dep->flags = 0;
62542+	dep->flags &= DWC3_EP_TXFIFO_RESIZED;
62543 
62544 	/* Clear out the ep descriptors for non-ep0 */
62545 	if (dep->number > 1) {
62546@@ -971,49 +1157,17 @@ static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
62547 	return trbs_left;
62548 }
62549 
62550-/**
62551- * dwc3_prepare_one_trb - setup one TRB from one request
62552- * @dep: endpoint for which this request is prepared
62553- * @req: dwc3_request pointer
62554- * @trb_length: buffer size of the TRB
62555- * @chain: should this TRB be chained to the next?
62556- * @node: only for isochronous endpoints. First TRB needs different type.
62557- * @use_bounce_buffer: set to use bounce buffer
62558- * @must_interrupt: set to interrupt on TRB completion
62559- */
62560-static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
62561-		struct dwc3_request *req, unsigned int trb_length,
62562-		unsigned int chain, unsigned int node, bool use_bounce_buffer,
62563-		bool must_interrupt)
62564+static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
62565+		dma_addr_t dma, unsigned int length, unsigned int chain,
62566+		unsigned int node, unsigned int stream_id,
62567+		unsigned int short_not_ok, unsigned int no_interrupt,
62568+		unsigned int is_last, bool must_interrupt)
62569 {
62570-	struct dwc3_trb		*trb;
62571-	dma_addr_t		dma;
62572-	unsigned int		stream_id = req->request.stream_id;
62573-	unsigned int		short_not_ok = req->request.short_not_ok;
62574-	unsigned int		no_interrupt = req->request.no_interrupt;
62575-	unsigned int		is_last = req->request.is_last;
62576 	struct dwc3		*dwc = dep->dwc;
62577 	struct usb_gadget	*gadget = dwc->gadget;
62578 	enum usb_device_speed	speed = gadget->speed;
62579 
62580-	if (use_bounce_buffer)
62581-		dma = dep->dwc->bounce_addr;
62582-	else if (req->request.num_sgs > 0)
62583-		dma = sg_dma_address(req->start_sg);
62584-	else
62585-		dma = req->request.dma;
62586-
62587-	trb = &dep->trb_pool[dep->trb_enqueue];
62588-
62589-	if (!req->trb) {
62590-		dwc3_gadget_move_started_request(req);
62591-		req->trb = trb;
62592-		req->trb_dma = dwc3_trb_dma_offset(dep, trb);
62593-	}
62594-
62595-	req->num_trbs++;
62596-
62597-	trb->size = DWC3_TRB_SIZE_LENGTH(trb_length);
62598+	trb->size = DWC3_TRB_SIZE_LENGTH(length);
62599 	trb->bpl = lower_32_bits(dma);
62600 	trb->bph = upper_32_bits(dma);
62601 
62602@@ -1053,10 +1207,10 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
62603 				unsigned int mult = 2;
62604 				unsigned int maxp = usb_endpoint_maxp(ep->desc);
62605 
62606-				if (req->request.length <= (2 * maxp))
62607+				if (length <= (2 * maxp))
62608 					mult--;
62609 
62610-				if (req->request.length <= maxp)
62611+				if (length <= maxp)
62612 					mult--;
62613 
62614 				trb->size |= DWC3_TRB_SIZE_PCM1(mult);
62615@@ -1105,19 +1259,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
62616 	if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
62617 		trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id);
62618 
62619-	/*
62620-	 * As per data book 4.2.3.2TRB Control Bit Rules section
62621-	 *
62622-	 * The controller autonomously checks the HWO field of a TRB to determine if the
62623-	 * entire TRB is valid. Therefore, software must ensure that the rest of the TRB
62624-	 * is valid before setting the HWO field to '1'. In most systems, this means that
62625-	 * software must update the fourth DWORD of a TRB last.
62626-	 *
62627-	 * However there is a possibility of CPU re-ordering here which can cause
62628-	 * controller to observe the HWO bit set prematurely.
62629-	 * Add a write memory barrier to prevent CPU re-ordering.
62630-	 */
62631-	wmb();
62632 	trb->ctrl |= DWC3_TRB_CTRL_HWO;
62633 
62634 	dwc3_ep_inc_enq(dep);
62635@@ -1125,6 +1266,50 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
62636 	trace_dwc3_prepare_trb(dep, trb);
62637 }
62638 
62639+/**
62640+ * dwc3_prepare_one_trb - setup one TRB from one request
62641+ * @dep: endpoint for which this request is prepared
62642+ * @req: dwc3_request pointer
62643+ * @trb_length: buffer size of the TRB
62644+ * @chain: should this TRB be chained to the next?
62645+ * @node: only for isochronous endpoints. First TRB needs different type.
62646+ * @use_bounce_buffer: set to use bounce buffer
62647+ * @must_interrupt: set to interrupt on TRB completion
62648+ */
62649+static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
62650+		struct dwc3_request *req, unsigned int trb_length,
62651+		unsigned int chain, unsigned int node, bool use_bounce_buffer,
62652+		bool must_interrupt)
62653+{
62654+	struct dwc3_trb		*trb;
62655+	dma_addr_t		dma;
62656+	unsigned int		stream_id = req->request.stream_id;
62657+	unsigned int		short_not_ok = req->request.short_not_ok;
62658+	unsigned int		no_interrupt = req->request.no_interrupt;
62659+	unsigned int		is_last = req->request.is_last;
62660+
62661+	if (use_bounce_buffer)
62662+		dma = dep->dwc->bounce_addr;
62663+	else if (req->request.num_sgs > 0)
62664+		dma = sg_dma_address(req->start_sg);
62665+	else
62666+		dma = req->request.dma;
62667+
62668+	trb = &dep->trb_pool[dep->trb_enqueue];
62669+
62670+	if (!req->trb) {
62671+		dwc3_gadget_move_started_request(req);
62672+		req->trb = trb;
62673+		req->trb_dma = dwc3_trb_dma_offset(dep, trb);
62674+	}
62675+
62676+	req->num_trbs++;
62677+
62678+	__dwc3_prepare_one_trb(dep, trb, dma, trb_length, chain, node,
62679+			stream_id, short_not_ok, no_interrupt, is_last,
62680+			must_interrupt);
62681+}
62682+
62683 static bool dwc3_needs_extra_trb(struct dwc3_ep *dep, struct dwc3_request *req)
62684 {
62685 	unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
62686@@ -1417,7 +1602,7 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
62687 		dwc3_stop_active_transfer(dep, true, true);
62688 
62689 		list_for_each_entry_safe(req, tmp, &dep->started_list, list)
62690-			dwc3_gadget_move_cancelled_request(req);
62691+			dwc3_gadget_move_cancelled_request(req, DWC3_REQUEST_STATUS_DEQUEUED);
62692 
62693 		/* If ep isn't started, then there's no end transfer pending */
62694 		if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
62695@@ -1633,7 +1818,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
62696 	struct dwc3		*dwc = dep->dwc;
62697 
62698 	if (!dep->endpoint.desc || !dwc->pullups_connected || !dwc->connected) {
62699-		dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
62700+		dev_dbg(dwc->dev, "%s: can't queue to disabled endpoint\n",
62701 				dep->name);
62702 		return -ESHUTDOWN;
62703 	}
62704@@ -1746,10 +1931,25 @@ static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep)
62705 {
62706 	struct dwc3_request		*req;
62707 	struct dwc3_request		*tmp;
62708+	struct dwc3			*dwc = dep->dwc;
62709 
62710 	list_for_each_entry_safe(req, tmp, &dep->cancelled_list, list) {
62711 		dwc3_gadget_ep_skip_trbs(dep, req);
62712-		dwc3_gadget_giveback(dep, req, -ECONNRESET);
62713+		switch (req->status) {
62714+		case DWC3_REQUEST_STATUS_DISCONNECTED:
62715+			dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
62716+			break;
62717+		case DWC3_REQUEST_STATUS_DEQUEUED:
62718+			dwc3_gadget_giveback(dep, req, -ECONNRESET);
62719+			break;
62720+		case DWC3_REQUEST_STATUS_STALLED:
62721+			dwc3_gadget_giveback(dep, req, -EPIPE);
62722+			break;
62723+		default:
62724+			dev_err(dwc->dev, "request cancelled with wrong reason:%d\n", req->status);
62725+			dwc3_gadget_giveback(dep, req, -ECONNRESET);
62726+			break;
62727+		}
62728 	}
62729 }
62730 
62731@@ -1793,7 +1993,8 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
62732 			 * cancelled.
62733 			 */
62734 			list_for_each_entry_safe(r, t, &dep->started_list, list)
62735-				dwc3_gadget_move_cancelled_request(r);
62736+				dwc3_gadget_move_cancelled_request(r,
62737+						DWC3_REQUEST_STATUS_DEQUEUED);
62738 
62739 			dep->flags &= ~DWC3_EP_WAIT_TRANSFER_COMPLETE;
62740 
62741@@ -1814,8 +2015,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
62742 {
62743 	struct dwc3_gadget_ep_cmd_params	params;
62744 	struct dwc3				*dwc = dep->dwc;
62745-	struct dwc3_request			*req;
62746-	struct dwc3_request			*tmp;
62747 	int					ret;
62748 
62749 	if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
62750@@ -1864,16 +2063,14 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
62751 
62752 		dwc3_stop_active_transfer(dep, true, true);
62753 
62754-		list_for_each_entry_safe(req, tmp, &dep->started_list, list)
62755-			dwc3_gadget_move_cancelled_request(req);
62756+		if (!list_empty(&dep->started_list))
62757+			dep->flags |= DWC3_EP_DELAY_START;
62758 
62759 		if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) {
62760 			dep->flags |= DWC3_EP_PENDING_CLEAR_STALL;
62761 			return 0;
62762 		}
62763 
62764-		dwc3_gadget_ep_cleanup_cancelled_requests(dep);
62765-
62766 		ret = dwc3_send_clear_stall_ep_cmd(dep);
62767 		if (ret) {
62768 			dev_err(dwc->dev, "failed to clear STALL on %s\n",
62769@@ -2072,6 +2269,102 @@ static void dwc3_stop_active_transfers(struct dwc3 *dwc)
62770 	}
62771 }
62772 
62773+static void __dwc3_gadget_set_ssp_rate(struct dwc3 *dwc)
62774+{
62775+	enum usb_ssp_rate	ssp_rate = dwc->gadget_ssp_rate;
62776+	u32			reg;
62777+
62778+	if (ssp_rate == USB_SSP_GEN_UNKNOWN)
62779+		ssp_rate = dwc->max_ssp_rate;
62780+
62781+	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
62782+	reg &= ~DWC3_DCFG_SPEED_MASK;
62783+	reg &= ~DWC3_DCFG_NUMLANES(~0);
62784+
62785+	if (ssp_rate == USB_SSP_GEN_1x2)
62786+		reg |= DWC3_DCFG_SUPERSPEED;
62787+	else if (dwc->max_ssp_rate != USB_SSP_GEN_1x2)
62788+		reg |= DWC3_DCFG_SUPERSPEED_PLUS;
62789+
62790+	if (ssp_rate != USB_SSP_GEN_2x1 &&
62791+	    dwc->max_ssp_rate != USB_SSP_GEN_2x1)
62792+		reg |= DWC3_DCFG_NUMLANES(1);
62793+
62794+	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
62795+}
62796+
62797+static void __dwc3_gadget_set_speed(struct dwc3 *dwc)
62798+{
62799+	enum usb_device_speed	speed;
62800+	u32			reg;
62801+
62802+	speed = dwc->gadget_max_speed;
62803+	if (speed == USB_SPEED_UNKNOWN || speed > dwc->maximum_speed)
62804+		speed = dwc->maximum_speed;
62805+
62806+	if (speed == USB_SPEED_SUPER_PLUS &&
62807+	    DWC3_IP_IS(DWC32)) {
62808+		__dwc3_gadget_set_ssp_rate(dwc);
62809+		return;
62810+	}
62811+
62812+	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
62813+	reg &= ~(DWC3_DCFG_SPEED_MASK);
62814+
62815+	/*
62816+	 * WORKAROUND: DWC3 revision < 2.20a have an issue
62817+	 * which would cause metastability state on Run/Stop
62818+	 * bit if we try to force the IP to USB2-only mode.
62819+	 *
62820+	 * Because of that, we cannot configure the IP to any
62821+	 * speed other than the SuperSpeed
62822+	 *
62823+	 * Refers to:
62824+	 *
62825+	 * STAR#9000525659: Clock Domain Crossing on DCTL in
62826+	 * USB 2.0 Mode
62827+	 */
62828+	if (DWC3_VER_IS_PRIOR(DWC3, 220A) &&
62829+	    !dwc->dis_metastability_quirk) {
62830+		reg |= DWC3_DCFG_SUPERSPEED;
62831+	} else {
62832+		switch (speed) {
62833+		case USB_SPEED_LOW:
62834+			reg |= DWC3_DCFG_LOWSPEED;
62835+			break;
62836+		case USB_SPEED_FULL:
62837+			reg |= DWC3_DCFG_FULLSPEED;
62838+			break;
62839+		case USB_SPEED_HIGH:
62840+			reg |= DWC3_DCFG_HIGHSPEED;
62841+			break;
62842+		case USB_SPEED_SUPER:
62843+			reg |= DWC3_DCFG_SUPERSPEED;
62844+			break;
62845+		case USB_SPEED_SUPER_PLUS:
62846+			if (DWC3_IP_IS(DWC3))
62847+				reg |= DWC3_DCFG_SUPERSPEED;
62848+			else
62849+				reg |= DWC3_DCFG_SUPERSPEED_PLUS;
62850+			break;
62851+		default:
62852+			dev_err(dwc->dev, "invalid speed (%d)\n", speed);
62853+
62854+			if (DWC3_IP_IS(DWC3))
62855+				reg |= DWC3_DCFG_SUPERSPEED;
62856+			else
62857+				reg |= DWC3_DCFG_SUPERSPEED_PLUS;
62858+		}
62859+	}
62860+
62861+	if (DWC3_IP_IS(DWC32) &&
62862+	    speed > USB_SPEED_UNKNOWN &&
62863+	    speed < USB_SPEED_SUPER_PLUS)
62864+		reg &= ~DWC3_DCFG_NUMLANES(~0);
62865+
62866+	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
62867+}
62868+
62869 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
62870 {
62871 	u32			reg;
62872@@ -2094,6 +2387,7 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
62873 		if (dwc->has_hibernation)
62874 			reg |= DWC3_DCTL_KEEP_CONNECT;
62875 
62876+		__dwc3_gadget_set_speed(dwc);
62877 		dwc->pullups_connected = true;
62878 	} else {
62879 		reg &= ~DWC3_DCTL_RUN_STOP;
62880@@ -2121,42 +2415,16 @@ static void dwc3_gadget_disable_irq(struct dwc3 *dwc);
62881 static void __dwc3_gadget_stop(struct dwc3 *dwc);
62882 static int __dwc3_gadget_start(struct dwc3 *dwc);
62883 
62884-static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
62885-{
62886-	unsigned long flags;
62887-
62888-	spin_lock_irqsave(&dwc->lock, flags);
62889-	dwc->connected = false;
62890-
62891-	/*
62892-	 * In the Synopsys DesignWare Cores USB3 Databook Rev. 3.30a
62893-	 * Section 4.1.8 Table 4-7, it states that for a device-initiated
62894-	 * disconnect, the SW needs to ensure that it sends "a DEPENDXFER
62895-	 * command for any active transfers" before clearing the RunStop
62896-	 * bit.
62897-	 */
62898-	dwc3_stop_active_transfers(dwc);
62899-	__dwc3_gadget_stop(dwc);
62900-	spin_unlock_irqrestore(&dwc->lock, flags);
62901-
62902-	/*
62903-	 * Note: if the GEVNTCOUNT indicates events in the event buffer, the
62904-	 * driver needs to acknowledge them before the controller can halt.
62905-	 * Simply let the interrupt handler acknowledges and handle the
62906-	 * remaining event generated by the controller while polling for
62907-	 * DSTS.DEVCTLHLT.
62908-	 */
62909-	return dwc3_gadget_run_stop(dwc, false, false);
62910-}
62911-
62912 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
62913 {
62914 	struct dwc3		*dwc = gadget_to_dwc(g);
62915+	struct dwc3_vendor	*vdwc = container_of(dwc, struct dwc3_vendor, dwc);
62916+	unsigned long		flags;
62917 	int			ret;
62918 
62919 	is_on = !!is_on;
62920+	vdwc->softconnect = is_on;
62921 
62922-	dwc->softconnect = is_on;
62923 	/*
62924 	 * Per databook, when we want to stop the gadget, if a control transfer
62925 	 * is still in process, complete it and get the core into setup phase.
62926@@ -2192,27 +2460,50 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
62927 		return 0;
62928 	}
62929 
62930-	if (dwc->pullups_connected == is_on) {
62931-		pm_runtime_put(dwc->dev);
62932-		return 0;
62933-	}
62934+	/*
62935+	 * Synchronize and disable any further event handling while controller
62936+	 * is being enabled/disabled.
62937+	 */
62938+	disable_irq(dwc->irq_gadget);
62939+
62940+	spin_lock_irqsave(&dwc->lock, flags);
62941 
62942 	if (!is_on) {
62943-		ret = dwc3_gadget_soft_disconnect(dwc);
62944-	} else {
62945+		u32 count;
62946+
62947+		dwc->connected = false;
62948 		/*
62949-		 * In the Synopsys DWC_usb31 1.90a programming guide section
62950-		 * 4.1.9, it specifies that for a reconnect after a
62951-		 * device-initiated disconnect requires a core soft reset
62952-		 * (DCTL.CSftRst) before enabling the run/stop bit.
62953+		 * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
62954+		 * Section 4.1.8 Table 4-7, it states that for a device-initiated
62955+		 * disconnect, the SW needs to ensure that it sends "a DEPENDXFER
62956+		 * command for any active transfers" before clearing the RunStop
62957+		 * bit.
62958 		 */
62959-		dwc3_core_soft_reset(dwc);
62960+		dwc3_stop_active_transfers(dwc);
62961+		__dwc3_gadget_stop(dwc);
62962 
62963-		dwc3_event_buffers_setup(dwc);
62964+		/*
62965+		 * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
62966+		 * Section 1.3.4, it mentions that for the DEVCTRLHLT bit, the
62967+		 * "software needs to acknowledge the events that are generated
62968+		 * (by writing to GEVNTCOUNTn) while it is waiting for this bit
62969+		 * to be set to '1'."
62970+		 */
62971+		count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
62972+		count &= DWC3_GEVNTCOUNT_MASK;
62973+		if (count > 0) {
62974+			dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
62975+			dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) %
62976+						dwc->ev_buf->length;
62977+		}
62978+	} else {
62979 		__dwc3_gadget_start(dwc);
62980-		ret = dwc3_gadget_run_stop(dwc, true, false);
62981 	}
62982 
62983+	ret = dwc3_gadget_run_stop(dwc, is_on, false);
62984+	spin_unlock_irqrestore(&dwc->lock, flags);
62985+	enable_irq(dwc->irq_gadget);
62986+
62987 	pm_runtime_put(dwc->dev);
62988 
62989 	return ret;
62990@@ -2223,8 +2514,7 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
62991 	u32			reg;
62992 
62993 	/* Enable all but Start and End of Frame IRQs */
62994-	reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
62995-			DWC3_DEVTEN_EVNTOVERFLOWEN |
62996+	reg = (DWC3_DEVTEN_EVNTOVERFLOWEN |
62997 			DWC3_DEVTEN_CMDCMPLTEN |
62998 			DWC3_DEVTEN_ERRTICERREN |
62999 			DWC3_DEVTEN_WKUPEVTEN |
63000@@ -2237,7 +2527,7 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
63001 
63002 	/* On 2.30a and above this bit enables U3/L2-L1 Suspend Events */
63003 	if (!DWC3_VER_IS_PRIOR(DWC3, 230A))
63004-		reg |= DWC3_DEVTEN_EOPFEN;
63005+		reg |= DWC3_DEVTEN_U3L2L1SUSPEN;
63006 
63007 	dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
63008 }
63009@@ -2280,9 +2570,7 @@ static void dwc3_gadget_setup_nump(struct dwc3 *dwc)
63010 	u32 reg;
63011 
63012 	ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7);
63013-	mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0);
63014-	if (DWC3_IP_IS(DWC32))
63015-		mdwidth += DWC3_GHWPARAMS6_MDWIDTH(dwc->hwparams.hwparams6);
63016+	mdwidth = dwc3_mdwidth(dwc);
63017 
63018 	nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024;
63019 	nump = min_t(u32, nump, 16);
63020@@ -2300,6 +2588,15 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
63021 	int			ret = 0;
63022 	u32			reg;
63023 
63024+	/*
63025+	 * If the DWC3 is in runtime suspend, the clocks maybe
63026+	 * disabled, so avoid enable the DWC3 endpoints here.
63027+	 * The DWC3 runtime PM resume routine will handle the
63028+	 * gadget start sequence.
63029+	 */
63030+	if (pm_runtime_suspended(dwc->dev))
63031+		return ret;
63032+
63033 	/*
63034 	 * Use IMOD if enabled via dwc->imod_interval. Otherwise, if
63035 	 * the core supports IMOD, disable it.
63036@@ -2328,6 +2625,17 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
63037 
63038 	dwc3_gadget_setup_nump(dwc);
63039 
63040+	/*
63041+	 * Currently the controller handles single stream only. So, Ignore
63042+	 * Packet Pending bit for stream selection and don't search for another
63043+	 * stream if the host sends Data Packet with PP=0 (for OUT direction) or
63044+	 * ACK with NumP=0 and PP=0 (for IN direction). This slightly improves
63045+	 * the stream performance.
63046+	 */
63047+	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
63048+	reg |= DWC3_DCFG_IGNSTRMPP;
63049+	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
63050+
63051 	/* Start with SuperSpeed Default */
63052 	dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
63053 
63054@@ -2415,6 +2723,7 @@ static int dwc3_gadget_stop(struct usb_gadget *g)
63055 
63056 	spin_lock_irqsave(&dwc->lock, flags);
63057 	dwc->gadget_driver	= NULL;
63058+	dwc->max_cfg_eps = 0;
63059 	spin_unlock_irqrestore(&dwc->lock, flags);
63060 
63061 	free_irq(dwc->irq_gadget, dwc->ev_buf);
63062@@ -2466,59 +2775,94 @@ static void dwc3_gadget_set_speed(struct usb_gadget *g,
63063 {
63064 	struct dwc3		*dwc = gadget_to_dwc(g);
63065 	unsigned long		flags;
63066-	u32			reg;
63067 
63068 	spin_lock_irqsave(&dwc->lock, flags);
63069-	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
63070-	reg &= ~(DWC3_DCFG_SPEED_MASK);
63071+	dwc->gadget_max_speed = speed;
63072+	spin_unlock_irqrestore(&dwc->lock, flags);
63073+}
63074 
63075-	/*
63076-	 * WORKAROUND: DWC3 revision < 2.20a have an issue
63077-	 * which would cause metastability state on Run/Stop
63078-	 * bit if we try to force the IP to USB2-only mode.
63079-	 *
63080-	 * Because of that, we cannot configure the IP to any
63081-	 * speed other than the SuperSpeed
63082-	 *
63083-	 * Refers to:
63084-	 *
63085-	 * STAR#9000525659: Clock Domain Crossing on DCTL in
63086-	 * USB 2.0 Mode
63087-	 */
63088-	if (DWC3_VER_IS_PRIOR(DWC3, 220A) &&
63089-	    !dwc->dis_metastability_quirk) {
63090-		reg |= DWC3_DCFG_SUPERSPEED;
63091-	} else {
63092-		switch (speed) {
63093-		case USB_SPEED_LOW:
63094-			reg |= DWC3_DCFG_LOWSPEED;
63095-			break;
63096-		case USB_SPEED_FULL:
63097-			reg |= DWC3_DCFG_FULLSPEED;
63098-			break;
63099-		case USB_SPEED_HIGH:
63100-			reg |= DWC3_DCFG_HIGHSPEED;
63101-			break;
63102-		case USB_SPEED_SUPER:
63103-			reg |= DWC3_DCFG_SUPERSPEED;
63104-			break;
63105-		case USB_SPEED_SUPER_PLUS:
63106-			if (DWC3_IP_IS(DWC3))
63107-				reg |= DWC3_DCFG_SUPERSPEED;
63108-			else
63109-				reg |= DWC3_DCFG_SUPERSPEED_PLUS;
63110-			break;
63111-		default:
63112-			dev_err(dwc->dev, "invalid speed (%d)\n", speed);
63113+static void dwc3_gadget_set_ssp_rate(struct usb_gadget *g,
63114+				     enum usb_ssp_rate rate)
63115+{
63116+	struct dwc3		*dwc = gadget_to_dwc(g);
63117+	unsigned long		flags;
63118 
63119-			if (DWC3_IP_IS(DWC3))
63120-				reg |= DWC3_DCFG_SUPERSPEED;
63121-			else
63122-				reg |= DWC3_DCFG_SUPERSPEED_PLUS;
63123-		}
63124+	spin_lock_irqsave(&dwc->lock, flags);
63125+	dwc->gadget_max_speed = USB_SPEED_SUPER_PLUS;
63126+	dwc->gadget_ssp_rate = rate;
63127+	spin_unlock_irqrestore(&dwc->lock, flags);
63128+}
63129+
63130+static int dwc3_gadget_vbus_draw(struct usb_gadget *g, unsigned int mA)
63131+{
63132+	struct dwc3		*dwc = gadget_to_dwc(g);
63133+	union power_supply_propval	val = {0};
63134+	int				ret;
63135+
63136+	if (dwc->usb2_phy)
63137+		return usb_phy_set_power(dwc->usb2_phy, mA);
63138+
63139+	if (!dwc->usb_psy)
63140+		return -EOPNOTSUPP;
63141+
63142+	val.intval = 1000 * mA;
63143+	ret = power_supply_set_property(dwc->usb_psy, POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, &val);
63144+
63145+	return ret;
63146+}
63147+
63148+/**
63149+ * dwc3_gadget_check_config - ensure dwc3 can support the USB configuration
63150+ * @g: pointer to the USB gadget
63151+ *
63152+ * Used to record the maximum number of endpoints being used in a USB composite
63153+ * device. (across all configurations)  This is to be used in the calculation
63154+ * of the TXFIFO sizes when resizing internal memory for individual endpoints.
63155+ * It will help ensured that the resizing logic reserves enough space for at
63156+ * least one max packet.
63157+ */
63158+static int dwc3_gadget_check_config(struct usb_gadget *g)
63159+{
63160+	struct dwc3 *dwc = gadget_to_dwc(g);
63161+	struct usb_ep *ep;
63162+	int fifo_size = 0;
63163+	int ram1_depth;
63164+	int ep_num = 0;
63165+
63166+	if (!dwc->do_fifo_resize)
63167+		return 0;
63168+
63169+	list_for_each_entry(ep, &g->ep_list, ep_list) {
63170+		/* Only interested in the IN endpoints */
63171+		if (ep->claimed && (ep->address & USB_DIR_IN))
63172+			ep_num++;
63173 	}
63174-	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
63175 
63176+	if (ep_num <= dwc->max_cfg_eps)
63177+		return 0;
63178+
63179+	/* Update the max number of eps in the composition */
63180+	dwc->max_cfg_eps = ep_num;
63181+
63182+	fifo_size = dwc3_gadget_calc_tx_fifo_size(dwc, dwc->max_cfg_eps);
63183+	/* Based on the equation, increment by one for every ep */
63184+	fifo_size += dwc->max_cfg_eps;
63185+
63186+	/* Check if we can fit a single fifo per endpoint */
63187+	ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
63188+	if (fifo_size > ram1_depth)
63189+		return -ENOMEM;
63190+
63191+	return 0;
63192+}
63193+
63194+static void dwc3_gadget_async_callbacks(struct usb_gadget *g, bool enable)
63195+{
63196+	struct dwc3		*dwc = gadget_to_dwc(g);
63197+	unsigned long		flags;
63198+
63199+	spin_lock_irqsave(&dwc->lock, flags);
63200+	dwc->async_callbacks = enable;
63201 	spin_unlock_irqrestore(&dwc->lock, flags);
63202 }
63203 
63204@@ -2530,7 +2874,11 @@ static const struct usb_gadget_ops dwc3_gadget_ops = {
63205 	.udc_start		= dwc3_gadget_start,
63206 	.udc_stop		= dwc3_gadget_stop,
63207 	.udc_set_speed		= dwc3_gadget_set_speed,
63208+	.udc_set_ssp_rate	= dwc3_gadget_set_ssp_rate,
63209 	.get_config_params	= dwc3_gadget_config_params,
63210+	.vbus_draw		= dwc3_gadget_vbus_draw,
63211+	.check_config		= dwc3_gadget_check_config,
63212+	.udc_async_callbacks	= dwc3_gadget_async_callbacks,
63213 };
63214 
63215 /* -------------------------------------------------------------------------- */
63216@@ -2553,12 +2901,10 @@ static int dwc3_gadget_init_control_endpoint(struct dwc3_ep *dep)
63217 static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep)
63218 {
63219 	struct dwc3 *dwc = dep->dwc;
63220-	int mdwidth;
63221+	u32 mdwidth;
63222 	int size;
63223 
63224-	mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
63225-	if (DWC3_IP_IS(DWC32))
63226-		mdwidth += DWC3_GHWPARAMS6_MDWIDTH(dwc->hwparams.hwparams6);
63227+	mdwidth = dwc3_mdwidth(dwc);
63228 
63229 	/* MDWIDTH is represented in bits, we need it in bytes */
63230 	mdwidth /= 8;
63231@@ -2600,12 +2946,10 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep)
63232 static int dwc3_gadget_init_out_endpoint(struct dwc3_ep *dep)
63233 {
63234 	struct dwc3 *dwc = dep->dwc;
63235-	int mdwidth;
63236+	u32 mdwidth;
63237 	int size;
63238 
63239-	mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
63240-	if (DWC3_IP_IS(DWC32))
63241-		mdwidth += DWC3_GHWPARAMS6_MDWIDTH(dwc->hwparams.hwparams6);
63242+	mdwidth = dwc3_mdwidth(dwc);
63243 
63244 	/* MDWIDTH is represented in bits, convert to bytes */
63245 	mdwidth /= 8;
63246@@ -2857,7 +3201,6 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
63247 		const struct dwc3_event_depevt *event,
63248 		struct dwc3_request *req, int status)
63249 {
63250-	int request_status;
63251 	int ret;
63252 
63253 	if (req->request.num_mapped_sgs)
63254@@ -2878,35 +3221,7 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
63255 		req->needs_extra_trb = false;
63256 	}
63257 
63258-	/*
63259-	 * The event status only reflects the status of the TRB with IOC set.
63260-	 * For the requests that don't set interrupt on completion, the driver
63261-	 * needs to check and return the status of the completed TRBs associated
63262-	 * with the request. Use the status of the last TRB of the request.
63263-	 */
63264-	if (req->request.no_interrupt) {
63265-		struct dwc3_trb *trb;
63266-
63267-		trb = dwc3_ep_prev_trb(dep, dep->trb_dequeue);
63268-		switch (DWC3_TRB_SIZE_TRBSTS(trb->size)) {
63269-		case DWC3_TRBSTS_MISSED_ISOC:
63270-			/* Isoc endpoint only */
63271-			request_status = -EXDEV;
63272-			break;
63273-		case DWC3_TRB_STS_XFER_IN_PROG:
63274-			/* Applicable when End Transfer with ForceRM=0 */
63275-		case DWC3_TRBSTS_SETUP_PENDING:
63276-			/* Control endpoint only */
63277-		case DWC3_TRBSTS_OK:
63278-		default:
63279-			request_status = 0;
63280-			break;
63281-		}
63282-	} else {
63283-		request_status = status;
63284-	}
63285-
63286-	dwc3_gadget_giveback(dep, req, request_status);
63287+	dwc3_gadget_giveback(dep, req, status);
63288 
63289 out:
63290 	return ret;
63291@@ -2931,6 +3246,11 @@ static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep,
63292 static bool dwc3_gadget_ep_should_continue(struct dwc3_ep *dep)
63293 {
63294 	struct dwc3_request	*req;
63295+	struct dwc3		*dwc = dep->dwc;
63296+
63297+	if (!dep->endpoint.desc || !dwc->pullups_connected ||
63298+	    !dwc->connected)
63299+		return false;
63300 
63301 	if (!list_empty(&dep->pending_list))
63302 		return true;
63303@@ -2958,14 +3278,14 @@ static bool dwc3_gadget_endpoint_trbs_complete(struct dwc3_ep *dep,
63304 	struct dwc3		*dwc = dep->dwc;
63305 	bool			no_started_trb = true;
63306 
63307+	if (!dep->endpoint.desc)
63308+		return no_started_trb;
63309+
63310 	dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
63311 
63312 	if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
63313 		goto out;
63314 
63315-	if (!dep->endpoint.desc)
63316-		return no_started_trb;
63317-
63318 	if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
63319 		list_empty(&dep->started_list) &&
63320 		(list_empty(&dep->pending_list) || status == -EXDEV))
63321@@ -3064,14 +3384,6 @@ static void dwc3_gadget_endpoint_command_complete(struct dwc3_ep *dep,
63322 	if (cmd != DWC3_DEPCMD_ENDTRANSFER)
63323 		return;
63324 
63325-	/*
63326-	 * The END_TRANSFER command will cause the controller to generate a
63327-	 * NoStream Event, and it's not due to the host DP NoStream rejection.
63328-	 * Ignore the next NoStream event.
63329-	 */
63330-	if (dep->stream_capable)
63331-		dep->flags |= DWC3_EP_IGNORE_NEXT_NOSTREAM;
63332-
63333 	dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
63334 	dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
63335 	dwc3_gadget_ep_cleanup_cancelled_requests(dep);
63336@@ -3210,7 +3522,7 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
63337 
63338 static void dwc3_disconnect_gadget(struct dwc3 *dwc)
63339 {
63340-	if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
63341+	if (dwc->async_callbacks && dwc->gadget_driver->disconnect) {
63342 		spin_unlock(&dwc->lock);
63343 		dwc->gadget_driver->disconnect(dwc->gadget);
63344 		spin_lock(&dwc->lock);
63345@@ -3219,7 +3531,7 @@ static void dwc3_disconnect_gadget(struct dwc3 *dwc)
63346 
63347 static void dwc3_suspend_gadget(struct dwc3 *dwc)
63348 {
63349-	if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
63350+	if (dwc->async_callbacks && dwc->gadget_driver->suspend) {
63351 		spin_unlock(&dwc->lock);
63352 		dwc->gadget_driver->suspend(dwc->gadget);
63353 		spin_lock(&dwc->lock);
63354@@ -3228,7 +3540,7 @@ static void dwc3_suspend_gadget(struct dwc3 *dwc)
63355 
63356 static void dwc3_resume_gadget(struct dwc3 *dwc)
63357 {
63358-	if (dwc->gadget_driver && dwc->gadget_driver->resume) {
63359+	if (dwc->async_callbacks && dwc->gadget_driver->resume) {
63360 		spin_unlock(&dwc->lock);
63361 		dwc->gadget_driver->resume(dwc->gadget);
63362 		spin_lock(&dwc->lock);
63363@@ -3240,14 +3552,14 @@ static void dwc3_reset_gadget(struct dwc3 *dwc)
63364 	if (!dwc->gadget_driver)
63365 		return;
63366 
63367-	if (dwc->gadget->speed != USB_SPEED_UNKNOWN) {
63368+	if (dwc->async_callbacks && dwc->gadget->speed != USB_SPEED_UNKNOWN) {
63369 		spin_unlock(&dwc->lock);
63370 		usb_gadget_udc_reset(dwc->gadget, dwc->gadget_driver);
63371 		spin_lock(&dwc->lock);
63372 	}
63373 }
63374 
63375-static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
63376+void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
63377 	bool interrupt)
63378 {
63379 	struct dwc3_gadget_ep_cmd_params params;
63380@@ -3294,11 +3606,20 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
63381 	WARN_ON_ONCE(ret);
63382 	dep->resource_index = 0;
63383 
63384+	/*
63385+	 * The END_TRANSFER command will cause the controller to generate a
63386+	 * NoStream Event, and it's not due to the host DP NoStream rejection.
63387+	 * Ignore the next NoStream event.
63388+	 */
63389+	if (dep->stream_capable)
63390+		dep->flags |= DWC3_EP_IGNORE_NEXT_NOSTREAM;
63391+
63392 	if (!interrupt)
63393 		dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
63394 	else
63395 		dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
63396 }
63397+EXPORT_SYMBOL_GPL(dwc3_stop_active_transfer);
63398 
63399 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
63400 {
63401@@ -3413,12 +3734,18 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
63402 	struct dwc3_ep		*dep;
63403 	int			ret;
63404 	u32			reg;
63405+	u8			lanes = 1;
63406 	u8			speed;
63407 
63408 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
63409 	speed = reg & DWC3_DSTS_CONNECTSPD;
63410 	dwc->speed = speed;
63411 
63412+	if (DWC3_IP_IS(DWC32))
63413+		lanes = DWC3_DSTS_CONNLANES(reg) + 1;
63414+
63415+	dwc->gadget->ssp_rate = USB_SSP_GEN_UNKNOWN;
63416+
63417 	/*
63418 	 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
63419 	 * each time on Connect Done.
63420@@ -3433,6 +3760,11 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
63421 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
63422 		dwc->gadget->ep0->maxpacket = 512;
63423 		dwc->gadget->speed = USB_SPEED_SUPER_PLUS;
63424+
63425+		if (lanes > 1)
63426+			dwc->gadget->ssp_rate = USB_SSP_GEN_2x2;
63427+		else
63428+			dwc->gadget->ssp_rate = USB_SSP_GEN_2x1;
63429 		break;
63430 	case DWC3_DSTS_SUPERSPEED:
63431 		/*
63432@@ -3454,6 +3786,11 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
63433 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
63434 		dwc->gadget->ep0->maxpacket = 512;
63435 		dwc->gadget->speed = USB_SPEED_SUPER;
63436+
63437+		if (lanes > 1) {
63438+			dwc->gadget->speed = USB_SPEED_SUPER_PLUS;
63439+			dwc->gadget->ssp_rate = USB_SSP_GEN_1x2;
63440+		}
63441 		break;
63442 	case DWC3_DSTS_HIGHSPEED:
63443 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
63444@@ -3545,7 +3882,7 @@ static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
63445 	 * implemented.
63446 	 */
63447 
63448-	if (dwc->gadget_driver && dwc->gadget_driver->resume) {
63449+	if (dwc->async_callbacks && dwc->gadget_driver->resume) {
63450 		spin_unlock(&dwc->lock);
63451 		dwc->gadget_driver->resume(dwc->gadget);
63452 		spin_lock(&dwc->lock);
63453@@ -3690,6 +4027,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
63454 {
63455 	switch (event->type) {
63456 	case DWC3_DEVICE_EVENT_DISCONNECT:
63457+		usb_notify_online_status(false);
63458 		dwc3_gadget_disconnect_interrupt(dwc);
63459 		break;
63460 	case DWC3_DEVICE_EVENT_RESET:
63461@@ -3697,6 +4035,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
63462 		break;
63463 	case DWC3_DEVICE_EVENT_CONNECT_DONE:
63464 		dwc3_gadget_conndone_interrupt(dwc);
63465+		usb_notify_online_status(true);
63466 		break;
63467 	case DWC3_DEVICE_EVENT_WAKEUP:
63468 		dwc3_gadget_wakeup_interrupt(dwc);
63469@@ -3711,7 +4050,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
63470 	case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
63471 		dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
63472 		break;
63473-	case DWC3_DEVICE_EVENT_EOPF:
63474+	case DWC3_DEVICE_EVENT_SUSPEND:
63475 		/* It changed to be suspend event for version 2.30a and above */
63476 		if (!DWC3_VER_IS_PRIOR(DWC3, 230A)) {
63477 			/*
63478@@ -3779,6 +4118,7 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
63479 	}
63480 
63481 	evt->count = 0;
63482+	evt->flags &= ~DWC3_EVENT_PENDING;
63483 	ret = IRQ_HANDLED;
63484 
63485 	/* Unmask interrupt */
63486@@ -3791,9 +4131,6 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
63487 		dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
63488 	}
63489 
63490-	/* Keep the clearing of DWC3_EVENT_PENDING at the end */
63491-	evt->flags &= ~DWC3_EVENT_PENDING;
63492-
63493 	return ret;
63494 }
63495 
63496@@ -3804,11 +4141,9 @@ static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
63497 	unsigned long flags;
63498 	irqreturn_t ret = IRQ_NONE;
63499 
63500-	local_bh_disable();
63501 	spin_lock_irqsave(&dwc->lock, flags);
63502 	ret = dwc3_process_event_buf(evt);
63503 	spin_unlock_irqrestore(&dwc->lock, flags);
63504-	local_bh_enable();
63505 
63506 	return ret;
63507 }
63508@@ -3959,6 +4294,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
63509 	dev->platform_data		= dwc;
63510 	dwc->gadget->ops		= &dwc3_gadget_ops;
63511 	dwc->gadget->speed		= USB_SPEED_UNKNOWN;
63512+	dwc->gadget->ssp_rate		= USB_SSP_GEN_UNKNOWN;
63513 	dwc->gadget->sg_supported	= true;
63514 	dwc->gadget->name		= "dwc3-gadget";
63515 	dwc->gadget->lpm_capable	= !dwc->usb2_gadget_lpm_disable;
63516@@ -3985,6 +4321,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
63517 				dwc->revision);
63518 
63519 	dwc->gadget->max_speed		= dwc->maximum_speed;
63520+	dwc->gadget->max_ssp_rate	= dwc->max_ssp_rate;
63521 
63522 	/*
63523 	 * REVISIT: Here we should clear all pending IRQs to be
63524@@ -4001,7 +4338,10 @@ int dwc3_gadget_init(struct dwc3 *dwc)
63525 		goto err5;
63526 	}
63527 
63528-	dwc3_gadget_set_speed(dwc->gadget, dwc->maximum_speed);
63529+	if (DWC3_IP_IS(DWC32) && dwc->maximum_speed == USB_SPEED_SUPER_PLUS)
63530+		dwc3_gadget_set_ssp_rate(dwc->gadget, dwc->max_ssp_rate);
63531+	else
63532+		dwc3_gadget_set_speed(dwc->gadget, dwc->maximum_speed);
63533 
63534 	return 0;
63535 
63536@@ -4056,9 +4396,10 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
63537 
63538 int dwc3_gadget_resume(struct dwc3 *dwc)
63539 {
63540+	struct dwc3_vendor	*vdwc = container_of(dwc, struct dwc3_vendor, dwc);
63541 	int			ret;
63542 
63543-	if (!dwc->gadget_driver || !dwc->softconnect)
63544+	if (!dwc->gadget_driver || !vdwc->softconnect)
63545 		return 0;
63546 
63547 	ret = __dwc3_gadget_start(dwc);
63548diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
63549index 0cd281949..77df4b6d6 100644
63550--- a/drivers/usb/dwc3/gadget.h
63551+++ b/drivers/usb/dwc3/gadget.h
63552@@ -90,15 +90,17 @@ static inline void dwc3_gadget_move_started_request(struct dwc3_request *req)
63553 /**
63554  * dwc3_gadget_move_cancelled_request - move @req to the cancelled_list
63555  * @req: the request to be moved
63556+ * @reason: cancelled reason for the dwc3 request
63557  *
63558  * Caller should take care of locking. This function will move @req from its
63559  * current list to the endpoint's cancelled_list.
63560  */
63561-static inline void dwc3_gadget_move_cancelled_request(struct dwc3_request *req)
63562+static inline void dwc3_gadget_move_cancelled_request(struct dwc3_request *req,
63563+		unsigned int reason)
63564 {
63565 	struct dwc3_ep		*dep = req->dep;
63566 
63567-	req->status = DWC3_REQUEST_STATUS_CANCELLED;
63568+	req->status = reason;
63569 	list_move_tail(&req->list, &dep->cancelled_list);
63570 }
63571 
63572diff --git a/drivers/usb/dwc3/trace.c b/drivers/usb/dwc3/trace.c
63573index 1b45a9723..8a4fe12bb 100644
63574--- a/drivers/usb/dwc3/trace.c
63575+++ b/drivers/usb/dwc3/trace.c
63576@@ -9,3 +9,10 @@
63577 
63578 #define CREATE_TRACE_POINTS
63579 #include "trace.h"
63580+
63581+EXPORT_TRACEPOINT_SYMBOL_GPL(dwc3_ep_queue);
63582+EXPORT_TRACEPOINT_SYMBOL_GPL(dwc3_readl);
63583+EXPORT_TRACEPOINT_SYMBOL_GPL(dwc3_writel);
63584+EXPORT_TRACEPOINT_SYMBOL_GPL(dwc3_event);
63585+EXPORT_TRACEPOINT_SYMBOL_GPL(dwc3_ctrl_req);
63586+EXPORT_TRACEPOINT_SYMBOL_GPL(dwc3_complete_trb);
63587diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
63588index a98079990..504c1cbc2 100644
63589--- a/drivers/usb/gadget/composite.c
63590+++ b/drivers/usb/gadget/composite.c
63591@@ -13,6 +13,7 @@
63592 #include <linux/module.h>
63593 #include <linux/device.h>
63594 #include <linux/utsname.h>
63595+#include <linux/bitfield.h>
63596 
63597 #include <linux/usb/composite.h>
63598 #include <linux/usb/otg.h>
63599@@ -734,47 +735,77 @@ static int bos_desc(struct usb_composite_dev *cdev)
63600 	/* The SuperSpeedPlus USB Device Capability descriptor */
63601 	if (gadget_is_superspeed_plus(cdev->gadget)) {
63602 		struct usb_ssp_cap_descriptor *ssp_cap;
63603+		u8 ssac = 1;
63604+		u8 ssic;
63605+		int i;
63606 
63607-		ssp_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
63608-		bos->bNumDeviceCaps++;
63609+		if (cdev->gadget->max_ssp_rate == USB_SSP_GEN_2x2)
63610+			ssac = 3;
63611 
63612 		/*
63613-		 * Report typical values.
63614+		 * Paired RX and TX sublink speed attributes share
63615+		 * the same SSID.
63616 		 */
63617+		ssic = (ssac + 1) / 2 - 1;
63618+
63619+		ssp_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
63620+		bos->bNumDeviceCaps++;
63621 
63622-		le16_add_cpu(&bos->wTotalLength, USB_DT_USB_SSP_CAP_SIZE(1));
63623-		ssp_cap->bLength = USB_DT_USB_SSP_CAP_SIZE(1);
63624+		le16_add_cpu(&bos->wTotalLength, USB_DT_USB_SSP_CAP_SIZE(ssac));
63625+		ssp_cap->bLength = USB_DT_USB_SSP_CAP_SIZE(ssac);
63626 		ssp_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
63627 		ssp_cap->bDevCapabilityType = USB_SSP_CAP_TYPE;
63628 		ssp_cap->bReserved = 0;
63629 		ssp_cap->wReserved = 0;
63630 
63631-		/* SSAC = 1 (2 attributes) */
63632-		ssp_cap->bmAttributes = cpu_to_le32(1);
63633+		ssp_cap->bmAttributes =
63634+			cpu_to_le32(FIELD_PREP(USB_SSP_SUBLINK_SPEED_ATTRIBS, ssac) |
63635+				    FIELD_PREP(USB_SSP_SUBLINK_SPEED_IDS, ssic));
63636 
63637-		/* Min RX/TX Lane Count = 1 */
63638 		ssp_cap->wFunctionalitySupport =
63639-			cpu_to_le16((1 << 8) | (1 << 12));
63640+			cpu_to_le16(FIELD_PREP(USB_SSP_MIN_SUBLINK_SPEED_ATTRIBUTE_ID, 0) |
63641+				    FIELD_PREP(USB_SSP_MIN_RX_LANE_COUNT, 1) |
63642+				    FIELD_PREP(USB_SSP_MIN_TX_LANE_COUNT, 1));
63643 
63644 		/*
63645-		 * bmSublinkSpeedAttr[0]:
63646-		 *   ST  = Symmetric, RX
63647-		 *   LSE =  3 (Gbps)
63648-		 *   LP  =  1 (SuperSpeedPlus)
63649-		 *   LSM = 10 (10 Gbps)
63650-		 */
63651-		ssp_cap->bmSublinkSpeedAttr[0] =
63652-			cpu_to_le32((3 << 4) | (1 << 14) | (0xa << 16));
63653-		/*
63654-		 * bmSublinkSpeedAttr[1] =
63655-		 *   ST  = Symmetric, TX
63656-		 *   LSE =  3 (Gbps)
63657-		 *   LP  =  1 (SuperSpeedPlus)
63658-		 *   LSM = 10 (10 Gbps)
63659+		 * Use 1 SSID if the gadget supports up to gen2x1 or not
63660+		 * specified:
63661+		 * - SSID 0 for symmetric RX/TX sublink speed of 10 Gbps.
63662+		 *
63663+		 * Use 1 SSID if the gadget supports up to gen1x2:
63664+		 * - SSID 0 for symmetric RX/TX sublink speed of 5 Gbps.
63665+		 *
63666+		 * Use 2 SSIDs if the gadget supports up to gen2x2:
63667+		 * - SSID 0 for symmetric RX/TX sublink speed of 5 Gbps.
63668+		 * - SSID 1 for symmetric RX/TX sublink speed of 10 Gbps.
63669 		 */
63670-		ssp_cap->bmSublinkSpeedAttr[1] =
63671-			cpu_to_le32((3 << 4) | (1 << 14) |
63672-				    (0xa << 16) | (1 << 7));
63673+		for (i = 0; i < ssac + 1; i++) {
63674+			u8 ssid;
63675+			u8 mantissa;
63676+			u8 type;
63677+
63678+			ssid = i >> 1;
63679+
63680+			if (cdev->gadget->max_ssp_rate == USB_SSP_GEN_2x1 ||
63681+			    cdev->gadget->max_ssp_rate == USB_SSP_GEN_UNKNOWN)
63682+				mantissa = 10;
63683+			else
63684+				mantissa = 5 << ssid;
63685+
63686+			if (i % 2)
63687+				type = USB_SSP_SUBLINK_SPEED_ST_SYM_TX;
63688+			else
63689+				type = USB_SSP_SUBLINK_SPEED_ST_SYM_RX;
63690+
63691+			ssp_cap->bmSublinkSpeedAttr[i] =
63692+				cpu_to_le32(FIELD_PREP(USB_SSP_SUBLINK_SPEED_SSID, ssid) |
63693+					    FIELD_PREP(USB_SSP_SUBLINK_SPEED_LSE,
63694+						       USB_SSP_SUBLINK_SPEED_LSE_GBPS) |
63695+					    FIELD_PREP(USB_SSP_SUBLINK_SPEED_ST, type) |
63696+					    FIELD_PREP(USB_SSP_SUBLINK_SPEED_LP,
63697+						       USB_SSP_SUBLINK_SPEED_LP_SSP) |
63698+					    FIELD_PREP(USB_SSP_SUBLINK_SPEED_LSM, mantissa));
63699+		}
63700 	}
63701 
63702 	return le16_to_cpu(bos->wTotalLength);
63703@@ -1648,18 +1679,6 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
63704 	struct usb_function		*f = NULL;
63705 	u8				endp;
63706 
63707-	if (w_length > USB_COMP_EP0_BUFSIZ) {
63708-		if (ctrl->bRequestType & USB_DIR_IN) {
63709-			/* Cast away the const, we are going to overwrite on purpose. */
63710-			__le16 *temp = (__le16 *)&ctrl->wLength;
63711-
63712-			*temp = cpu_to_le16(USB_COMP_EP0_BUFSIZ);
63713-			w_length = USB_COMP_EP0_BUFSIZ;
63714-		} else {
63715-			goto done;
63716-		}
63717-	}
63718-
63719 	/* partial re-init of the response message; the function or the
63720 	 * gadget might need to intercept e.g. a control-OUT completion
63721 	 * when we delegate to it.
63722@@ -1944,9 +1963,6 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
63723 				if (w_index != 0x5 || (w_value >> 8))
63724 					break;
63725 				interface = w_value & 0xFF;
63726-				if (interface >= MAX_CONFIG_INTERFACES ||
63727-				    !os_desc_cfg->interface[interface])
63728-					break;
63729 				buf[6] = w_index;
63730 				count = count_ext_prop(os_desc_cfg,
63731 					interface);
63732@@ -2061,7 +2077,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
63733 	return value;
63734 }
63735 
63736-void composite_disconnect(struct usb_gadget *gadget)
63737+static void __composite_disconnect(struct usb_gadget *gadget)
63738 {
63739 	struct usb_composite_dev	*cdev = get_gadget_data(gadget);
63740 	unsigned long			flags;
63741@@ -2078,6 +2094,23 @@ void composite_disconnect(struct usb_gadget *gadget)
63742 	spin_unlock_irqrestore(&cdev->lock, flags);
63743 }
63744 
63745+void composite_disconnect(struct usb_gadget *gadget)
63746+{
63747+	usb_gadget_vbus_draw(gadget, 0);
63748+	__composite_disconnect(gadget);
63749+}
63750+
63751+void composite_reset(struct usb_gadget *gadget)
63752+{
63753+	/*
63754+	 * Section 1.4.13 Standard Downstream Port of the USB battery charging
63755+	 * specification v1.2 states that a device connected on a SDP shall only
63756+	 * draw at max 100mA while in a connected, but unconfigured state.
63757+	 */
63758+	usb_gadget_vbus_draw(gadget, 100);
63759+	__composite_disconnect(gadget);
63760+}
63761+
63762 /*-------------------------------------------------------------------------*/
63763 
63764 static ssize_t suspended_show(struct device *dev, struct device_attribute *attr,
63765@@ -2176,7 +2209,7 @@ int composite_dev_prepare(struct usb_composite_driver *composite,
63766 	if (!cdev->req)
63767 		return -ENOMEM;
63768 
63769-	cdev->req->buf = kzalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL);
63770+	cdev->req->buf = kmalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL);
63771 	if (!cdev->req->buf)
63772 		goto fail;
63773 
63774@@ -2398,7 +2431,7 @@ static const struct usb_gadget_driver composite_driver_template = {
63775 	.unbind		= composite_unbind,
63776 
63777 	.setup		= composite_setup,
63778-	.reset		= composite_disconnect,
63779+	.reset		= composite_reset,
63780 	.disconnect	= composite_disconnect,
63781 
63782 	.suspend	= composite_suspend,
63783diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
63784index d51ea1c05..f2a2f1193 100644
63785--- a/drivers/usb/gadget/configfs.c
63786+++ b/drivers/usb/gadget/configfs.c
63787@@ -10,6 +10,32 @@
63788 #include "u_f.h"
63789 #include "u_os_desc.h"
63790 
63791+#ifdef CONFIG_USB_CONFIGFS_UEVENT
63792+#include <linux/platform_device.h>
63793+#include <linux/kdev_t.h>
63794+#include <linux/usb/ch9.h>
63795+
63796+#ifdef CONFIG_USB_CONFIGFS_F_ACC
63797+extern int acc_ctrlrequest(struct usb_composite_dev *cdev,
63798+				const struct usb_ctrlrequest *ctrl);
63799+void acc_disconnect(void);
63800+#endif
63801+static struct class *android_class;
63802+static struct device *android_device;
63803+static int index;
63804+static int gadget_index;
63805+
63806+struct device *create_function_device(char *name)
63807+{
63808+	if (android_device && !IS_ERR(android_device))
63809+		return device_create(android_class, android_device,
63810+			MKDEV(0, index++), NULL, name);
63811+	else
63812+		return ERR_PTR(-EINVAL);
63813+}
63814+EXPORT_SYMBOL_GPL(create_function_device);
63815+#endif
63816+
63817 int check_user_usb_string(const char *name,
63818 		struct usb_gadget_strings *stringtab_dev)
63819 {
63820@@ -51,6 +77,12 @@ struct gadget_info {
63821 	char qw_sign[OS_STRING_QW_SIGN_LEN];
63822 	spinlock_t spinlock;
63823 	bool unbind;
63824+#ifdef CONFIG_USB_CONFIGFS_UEVENT
63825+	bool connected;
63826+	bool sw_connected;
63827+	struct work_struct work;
63828+	struct device *dev;
63829+#endif
63830 };
63831 
63832 static inline struct gadget_info *to_gadget_info(struct config_item *item)
63833@@ -272,7 +304,7 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item,
63834 
63835 	mutex_lock(&gi->lock);
63836 
63837-	if (!strlen(name)) {
63838+	if (!strlen(name) || strcmp(name, "none") == 0) {
63839 		ret = unregister_gadget(gi);
63840 		if (ret)
63841 			goto err;
63842@@ -1270,6 +1302,9 @@ static void purge_configs_funcs(struct gadget_info *gi)
63843 					f->name, f);
63844 				f->unbind(c, f);
63845 			}
63846+
63847+			if (f->bind_deactivated)
63848+				usb_function_activate(f);
63849 		}
63850 		c->next_interface_id = 0;
63851 		memset(c->interface, 0, sizeof(c->interface));
63852@@ -1404,6 +1439,10 @@ static int configfs_composite_bind(struct usb_gadget *gadget,
63853 				goto err_purge_funcs;
63854 			}
63855 		}
63856+		ret = usb_gadget_check_config(cdev->gadget);
63857+		if (ret)
63858+			goto err_purge_funcs;
63859+
63860 		usb_ep_autoconfig_reset(cdev->gadget);
63861 	}
63862 	if (cdev->use_os_string) {
63863@@ -1422,6 +1461,57 @@ static int configfs_composite_bind(struct usb_gadget *gadget,
63864 	return ret;
63865 }
63866 
63867+#ifdef CONFIG_USB_CONFIGFS_UEVENT
63868+static void android_work(struct work_struct *data)
63869+{
63870+	struct gadget_info *gi = container_of(data, struct gadget_info, work);
63871+	struct usb_composite_dev *cdev = &gi->cdev;
63872+	char *disconnected[2] = { "USB_STATE=DISCONNECTED", NULL };
63873+	char *connected[2]    = { "USB_STATE=CONNECTED", NULL };
63874+	char *configured[2]   = { "USB_STATE=CONFIGURED", NULL };
63875+	/* 0-connected 1-configured 2-disconnected*/
63876+	bool status[3] = { false, false, false };
63877+	unsigned long flags;
63878+	bool uevent_sent = false;
63879+
63880+	spin_lock_irqsave(&cdev->lock, flags);
63881+	if (cdev->config)
63882+		status[1] = true;
63883+
63884+	if (gi->connected != gi->sw_connected) {
63885+		if (gi->connected)
63886+			status[0] = true;
63887+		else
63888+			status[2] = true;
63889+		gi->sw_connected = gi->connected;
63890+	}
63891+	spin_unlock_irqrestore(&cdev->lock, flags);
63892+
63893+	if (status[0]) {
63894+		kobject_uevent_env(&gi->dev->kobj, KOBJ_CHANGE, connected);
63895+		pr_info("%s: sent uevent %s\n", __func__, connected[0]);
63896+		uevent_sent = true;
63897+	}
63898+
63899+	if (status[1]) {
63900+		kobject_uevent_env(&gi->dev->kobj, KOBJ_CHANGE, configured);
63901+		pr_info("%s: sent uevent %s\n", __func__, configured[0]);
63902+		uevent_sent = true;
63903+	}
63904+
63905+	if (status[2]) {
63906+		kobject_uevent_env(&gi->dev->kobj, KOBJ_CHANGE, disconnected);
63907+		pr_info("%s: sent uevent %s\n", __func__, disconnected[0]);
63908+		uevent_sent = true;
63909+	}
63910+
63911+	if (!uevent_sent) {
63912+		pr_info("%s: did not send uevent (%d %d %p)\n", __func__,
63913+			gi->connected, gi->sw_connected, cdev->config);
63914+	}
63915+}
63916+#endif
63917+
63918 static void configfs_composite_unbind(struct usb_gadget *gadget)
63919 {
63920 	struct usb_composite_dev	*cdev;
63921@@ -1449,6 +1539,60 @@ static void configfs_composite_unbind(struct usb_gadget *gadget)
63922 	spin_unlock_irqrestore(&gi->spinlock, flags);
63923 }
63924 
63925+#ifdef CONFIG_USB_CONFIGFS_UEVENT
63926+static int android_setup(struct usb_gadget *gadget,
63927+			const struct usb_ctrlrequest *c)
63928+{
63929+	struct usb_composite_dev *cdev;
63930+	unsigned long flags;
63931+	struct gadget_info *gi;
63932+	int value = -EOPNOTSUPP;
63933+	struct usb_function_instance *fi;
63934+
63935+	if (!android_device)
63936+		return 0;
63937+
63938+	gi = dev_get_drvdata(android_device);
63939+	spin_lock_irqsave(&gi->spinlock, flags);
63940+	cdev = get_gadget_data(gadget);
63941+	if (!cdev || gi->unbind) {
63942+		spin_unlock_irqrestore(&gi->spinlock, flags);
63943+		return 0;
63944+	}
63945+
63946+	if (c->bRequest == USB_REQ_GET_DESCRIPTOR &&
63947+	    (c->wValue >> 8) == USB_DT_CONFIG && !gi->connected) {
63948+		gi->connected = 1;
63949+		schedule_work(&gi->work);
63950+	}
63951+
63952+	list_for_each_entry(fi, &gi->available_func, cfs_list) {
63953+		if (fi != NULL && fi->f != NULL && fi->f->setup != NULL) {
63954+			value = fi->f->setup(fi->f, c);
63955+			if (value >= 0)
63956+				break;
63957+		}
63958+	}
63959+
63960+#ifdef CONFIG_USB_CONFIGFS_F_ACC
63961+	if (value < 0)
63962+		value = acc_ctrlrequest(cdev, c);
63963+#endif
63964+
63965+	if (value < 0)
63966+		value = composite_setup(gadget, c);
63967+
63968+	if (c->bRequest == USB_REQ_SET_CONFIGURATION &&
63969+						cdev->config) {
63970+		schedule_work(&gi->work);
63971+	}
63972+	spin_unlock_irqrestore(&gi->spinlock, flags);
63973+
63974+	return value;
63975+}
63976+
63977+#else // CONFIG_USB_CONFIGFS_UEVENT
63978+
63979 static int configfs_composite_setup(struct usb_gadget *gadget,
63980 		const struct usb_ctrlrequest *ctrl)
63981 {
63982@@ -1474,6 +1618,8 @@ static int configfs_composite_setup(struct usb_gadget *gadget,
63983 	return ret;
63984 }
63985 
63986+#endif // CONFIG_USB_CONFIGFS_UEVENT
63987+
63988 static void configfs_composite_disconnect(struct usb_gadget *gadget)
63989 {
63990 	struct usb_composite_dev *cdev;
63991@@ -1484,6 +1630,14 @@ static void configfs_composite_disconnect(struct usb_gadget *gadget)
63992 	if (!cdev)
63993 		return;
63994 
63995+#ifdef CONFIG_USB_CONFIGFS_F_ACC
63996+	/*
63997+	 * accessory HID support can be active while the
63998+	 * accessory function is not actually enabled,
63999+	 * so we need to inform it when we are disconnected.
64000+	 */
64001+	acc_disconnect();
64002+#endif
64003 	gi = container_of(cdev, struct gadget_info, cdev);
64004 	spin_lock_irqsave(&gi->spinlock, flags);
64005 	cdev = get_gadget_data(gadget);
64006@@ -1492,10 +1646,36 @@ static void configfs_composite_disconnect(struct usb_gadget *gadget)
64007 		return;
64008 	}
64009 
64010+#ifdef CONFIG_USB_CONFIGFS_UEVENT
64011+	gi->connected = 0;
64012+	schedule_work(&gi->work);
64013+#endif
64014 	composite_disconnect(gadget);
64015 	spin_unlock_irqrestore(&gi->spinlock, flags);
64016 }
64017 
64018+static void configfs_composite_reset(struct usb_gadget *gadget)
64019+{
64020+	struct usb_composite_dev *cdev;
64021+	struct gadget_info *gi;
64022+	unsigned long flags;
64023+
64024+	cdev = get_gadget_data(gadget);
64025+	if (!cdev)
64026+		return;
64027+
64028+	gi = container_of(cdev, struct gadget_info, cdev);
64029+	spin_lock_irqsave(&gi->spinlock, flags);
64030+	cdev = get_gadget_data(gadget);
64031+	if (!cdev || gi->unbind) {
64032+		spin_unlock_irqrestore(&gi->spinlock, flags);
64033+		return;
64034+	}
64035+
64036+	composite_reset(gadget);
64037+	spin_unlock_irqrestore(&gi->spinlock, flags);
64038+}
64039+
64040 static void configfs_composite_suspend(struct usb_gadget *gadget)
64041 {
64042 	struct usb_composite_dev *cdev;
64043@@ -1544,10 +1724,13 @@ static const struct usb_gadget_driver configfs_driver_template = {
64044 	.bind           = configfs_composite_bind,
64045 	.unbind         = configfs_composite_unbind,
64046 
64047+#ifdef CONFIG_USB_CONFIGFS_UEVENT
64048+	.setup          = android_setup,
64049+#else
64050 	.setup          = configfs_composite_setup,
64051-	.reset          = configfs_composite_disconnect,
64052+#endif
64053+	.reset          = configfs_composite_reset,
64054 	.disconnect     = configfs_composite_disconnect,
64055-
64056 	.suspend	= configfs_composite_suspend,
64057 	.resume		= configfs_composite_resume,
64058 
64059@@ -1559,6 +1742,91 @@ static const struct usb_gadget_driver configfs_driver_template = {
64060 	.match_existing_only = 1,
64061 };
64062 
64063+#ifdef CONFIG_USB_CONFIGFS_UEVENT
64064+static ssize_t state_show(struct device *pdev, struct device_attribute *attr,
64065+			char *buf)
64066+{
64067+	struct gadget_info *dev = dev_get_drvdata(pdev);
64068+	struct usb_composite_dev *cdev;
64069+	char *state = "DISCONNECTED";
64070+	unsigned long flags;
64071+
64072+	if (!dev)
64073+		goto out;
64074+
64075+	cdev = &dev->cdev;
64076+
64077+	if (!cdev)
64078+		goto out;
64079+
64080+	spin_lock_irqsave(&cdev->lock, flags);
64081+	if (cdev->config)
64082+		state = "CONFIGURED";
64083+	else if (dev->connected)
64084+		state = "CONNECTED";
64085+	spin_unlock_irqrestore(&cdev->lock, flags);
64086+out:
64087+	return sprintf(buf, "%s\n", state);
64088+}
64089+
64090+static DEVICE_ATTR(state, S_IRUGO, state_show, NULL);
64091+
64092+static struct device_attribute *android_usb_attributes[] = {
64093+	&dev_attr_state,
64094+	NULL
64095+};
64096+
64097+static int android_device_create(struct gadget_info *gi)
64098+{
64099+	struct device_attribute **attrs;
64100+	struct device_attribute *attr;
64101+
64102+	INIT_WORK(&gi->work, android_work);
64103+	gi->dev = device_create(android_class, NULL,
64104+			MKDEV(0, 0), NULL, "android%d", gadget_index++);
64105+	if (IS_ERR(gi->dev))
64106+		return PTR_ERR(gi->dev);
64107+
64108+	dev_set_drvdata(gi->dev, gi);
64109+	if (!android_device)
64110+		android_device = gi->dev;
64111+
64112+	attrs = android_usb_attributes;
64113+	while ((attr = *attrs++)) {
64114+		int err;
64115+
64116+		err = device_create_file(gi->dev, attr);
64117+		if (err) {
64118+			device_destroy(gi->dev->class,
64119+				       gi->dev->devt);
64120+			return err;
64121+		}
64122+	}
64123+
64124+	return 0;
64125+}
64126+
64127+static void android_device_destroy(struct gadget_info *gi)
64128+{
64129+	struct device_attribute **attrs;
64130+	struct device_attribute *attr;
64131+
64132+	attrs = android_usb_attributes;
64133+	while ((attr = *attrs++))
64134+		device_remove_file(gi->dev, attr);
64135+	device_destroy(gi->dev->class, gi->dev->devt);
64136+}
64137+#else
64138+static inline int android_device_create(struct gadget_info *gi)
64139+{
64140+	return 0;
64141+}
64142+
64143+static inline void android_device_destroy(struct gadget_info *gi)
64144+{
64145+}
64146+#endif
64147+
64148 static struct config_group *gadgets_make(
64149 		struct config_group *group,
64150 		const char *name)
64151@@ -1611,7 +1879,11 @@ static struct config_group *gadgets_make(
64152 	if (!gi->composite.gadget_driver.function)
64153 		goto err;
64154 
64155+	if (android_device_create(gi) < 0)
64156+		goto err;
64157+
64158 	return &gi->group;
64159+
64160 err:
64161 	kfree(gi);
64162 	return ERR_PTR(-ENOMEM);
64163@@ -1619,7 +1891,11 @@ static struct config_group *gadgets_make(
64164 
64165 static void gadgets_drop(struct config_group *group, struct config_item *item)
64166 {
64167+	struct gadget_info *gi;
64168+
64169+	gi = container_of(to_config_group(item), struct gadget_info, group);
64170 	config_item_put(item);
64171+	android_device_destroy(gi);
64172 }
64173 
64174 static struct configfs_group_operations gadgets_ops = {
64175@@ -1659,6 +1935,13 @@ static int __init gadget_cfs_init(void)
64176 	config_group_init(&gadget_subsys.su_group);
64177 
64178 	ret = configfs_register_subsystem(&gadget_subsys);
64179+
64180+#ifdef CONFIG_USB_CONFIGFS_UEVENT
64181+	android_class = class_create(THIS_MODULE, "android_usb");
64182+	if (IS_ERR(android_class))
64183+		return PTR_ERR(android_class);
64184+#endif
64185+
64186 	return ret;
64187 }
64188 module_init(gadget_cfs_init);
64189@@ -1666,5 +1949,10 @@ module_init(gadget_cfs_init);
64190 static void __exit gadget_cfs_exit(void)
64191 {
64192 	configfs_unregister_subsystem(&gadget_subsys);
64193+#ifdef CONFIG_USB_CONFIGFS_UEVENT
64194+	if (!IS_ERR(android_class))
64195+		class_destroy(android_class);
64196+#endif
64197+
64198 }
64199 module_exit(gadget_cfs_exit);
64200diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
64201index 1eb4fa2e6..177645f32 100644
64202--- a/drivers/usb/gadget/epautoconf.c
64203+++ b/drivers/usb/gadget/epautoconf.c
64204@@ -67,6 +67,9 @@ struct usb_ep *usb_ep_autoconfig_ss(
64205 )
64206 {
64207 	struct usb_ep	*ep;
64208+#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
64209+	u8 type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
64210+#endif
64211 
64212 	if (gadget->ops->match_ep) {
64213 		ep = gadget->ops->match_ep(gadget, desc, ep_comp);
64214@@ -110,6 +113,27 @@ struct usb_ep *usb_ep_autoconfig_ss(
64215 	ep->desc = NULL;
64216 	ep->comp_desc = NULL;
64217 	ep->claimed = true;
64218+#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
64219+	ep->transfer_type = type;
64220+	if (gadget_is_superspeed(gadget) && ep_comp) {
64221+		switch (type) {
64222+		case USB_ENDPOINT_XFER_ISOC:
64223+			/* mult: bits 1:0 of bmAttributes */
64224+			ep->mult = (ep_comp->bmAttributes & 0x3) + 1;
64225+			fallthrough;
64226+		case USB_ENDPOINT_XFER_BULK:
64227+		case USB_ENDPOINT_XFER_INT:
64228+			ep->maxburst = ep_comp->bMaxBurst + 1;
64229+			break;
64230+		default:
64231+			break;
64232+		}
64233+	} else if (gadget_is_dualspeed(gadget) &&
64234+		   (type == USB_ENDPOINT_XFER_ISOC ||
64235+		    type == USB_ENDPOINT_XFER_INT)) {
64236+		ep->mult = usb_endpoint_maxp_mult(desc);
64237+	}
64238+#endif
64239 	return ep;
64240 }
64241 EXPORT_SYMBOL_GPL(usb_ep_autoconfig_ss);
64242diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
64243index 8c48c9f80..1e3acc3e3 100644
64244--- a/drivers/usb/gadget/function/f_fs.c
64245+++ b/drivers/usb/gadget/function/f_fs.c
64246@@ -39,7 +39,7 @@
64247 #include "u_fs.h"
64248 #include "u_f.h"
64249 #include "u_os_desc.h"
64250-#include "configfs.h"
64251+#include "../configfs.h"
64252 
64253 #define FUNCTIONFS_MAGIC	0xa647361 /* Chosen by a honest dice roll ;) */
64254 
64255@@ -71,7 +71,7 @@ struct ffs_function {
64256 	struct ffs_data			*ffs;
64257 
64258 	struct ffs_ep			*eps;
64259-	u8				eps_revmap[16];
64260+	u8				eps_revmap[32];
64261 	short				*interfaces_nums;
64262 
64263 	struct usb_function		function;
64264@@ -1715,24 +1715,16 @@ static void ffs_data_put(struct ffs_data *ffs)
64265 
64266 static void ffs_data_closed(struct ffs_data *ffs)
64267 {
64268-	struct ffs_epfile *epfiles;
64269-	unsigned long flags;
64270-
64271 	ENTER();
64272 
64273 	if (atomic_dec_and_test(&ffs->opened)) {
64274 		if (ffs->no_disconnect) {
64275 			ffs->state = FFS_DEACTIVATED;
64276-			spin_lock_irqsave(&ffs->eps_lock, flags);
64277-			epfiles = ffs->epfiles;
64278-			ffs->epfiles = NULL;
64279-			spin_unlock_irqrestore(&ffs->eps_lock,
64280-							flags);
64281-
64282-			if (epfiles)
64283-				ffs_epfiles_destroy(epfiles,
64284-						 ffs->eps_count);
64285-
64286+			if (ffs->epfiles) {
64287+				ffs_epfiles_destroy(ffs->epfiles,
64288+						   ffs->eps_count);
64289+				ffs->epfiles = NULL;
64290+			}
64291 			if (ffs->setup_state == FFS_SETUP_PENDING)
64292 				__ffs_ep0_stall(ffs);
64293 		} else {
64294@@ -1779,34 +1771,17 @@ static struct ffs_data *ffs_data_new(const char *dev_name)
64295 
64296 static void ffs_data_clear(struct ffs_data *ffs)
64297 {
64298-	struct ffs_epfile *epfiles;
64299-	unsigned long flags;
64300-
64301 	ENTER();
64302 
64303 	ffs_closed(ffs);
64304 
64305 	BUG_ON(ffs->gadget);
64306 
64307-	spin_lock_irqsave(&ffs->eps_lock, flags);
64308-	epfiles = ffs->epfiles;
64309-	ffs->epfiles = NULL;
64310-	spin_unlock_irqrestore(&ffs->eps_lock, flags);
64311-
64312-	/*
64313-	 * potential race possible between ffs_func_eps_disable
64314-	 * & ffs_epfile_release therefore maintaining a local
64315-	 * copy of epfile will save us from use-after-free.
64316-	 */
64317-	if (epfiles) {
64318-		ffs_epfiles_destroy(epfiles, ffs->eps_count);
64319-		ffs->epfiles = NULL;
64320-	}
64321+	if (ffs->epfiles)
64322+		ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
64323 
64324-	if (ffs->ffs_eventfd) {
64325+	if (ffs->ffs_eventfd)
64326 		eventfd_ctx_put(ffs->ffs_eventfd);
64327-		ffs->ffs_eventfd = NULL;
64328-	}
64329 
64330 	kfree(ffs->raw_descs_data);
64331 	kfree(ffs->raw_strings);
64332@@ -1819,6 +1794,7 @@ static void ffs_data_reset(struct ffs_data *ffs)
64333 
64334 	ffs_data_clear(ffs);
64335 
64336+	ffs->epfiles = NULL;
64337 	ffs->raw_descs_data = NULL;
64338 	ffs->raw_descs = NULL;
64339 	ffs->raw_strings = NULL;
64340@@ -1951,15 +1927,12 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
64341 
64342 static void ffs_func_eps_disable(struct ffs_function *func)
64343 {
64344-	struct ffs_ep *ep;
64345-	struct ffs_epfile *epfile;
64346-	unsigned short count;
64347+	struct ffs_ep *ep         = func->eps;
64348+	struct ffs_epfile *epfile = func->ffs->epfiles;
64349+	unsigned count            = func->ffs->eps_count;
64350 	unsigned long flags;
64351 
64352 	spin_lock_irqsave(&func->ffs->eps_lock, flags);
64353-	count = func->ffs->eps_count;
64354-	epfile = func->ffs->epfiles;
64355-	ep = func->eps;
64356 	while (count--) {
64357 		/* pending requests get nuked */
64358 		if (likely(ep->ep))
64359@@ -1977,18 +1950,14 @@ static void ffs_func_eps_disable(struct ffs_function *func)
64360 
64361 static int ffs_func_eps_enable(struct ffs_function *func)
64362 {
64363-	struct ffs_data *ffs;
64364-	struct ffs_ep *ep;
64365-	struct ffs_epfile *epfile;
64366-	unsigned short count;
64367+	struct ffs_data *ffs      = func->ffs;
64368+	struct ffs_ep *ep         = func->eps;
64369+	struct ffs_epfile *epfile = ffs->epfiles;
64370+	unsigned count            = ffs->eps_count;
64371 	unsigned long flags;
64372 	int ret = 0;
64373 
64374 	spin_lock_irqsave(&func->ffs->eps_lock, flags);
64375-	ffs = func->ffs;
64376-	ep = func->eps;
64377-	epfile = ffs->epfiles;
64378-	count = ffs->eps_count;
64379 	while(count--) {
64380 		ep->ep->driver_data = ep;
64381 
64382@@ -2836,7 +2805,7 @@ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
64383 	struct ffs_function *func = priv;
64384 	struct ffs_ep *ffs_ep;
64385 	unsigned ep_desc_id;
64386-	int idx;
64387+	int idx, ep_num;
64388 	static const char *speed_names[] = { "full", "high", "super" };
64389 
64390 	if (type != FFS_DESCRIPTOR)
64391@@ -2909,8 +2878,9 @@ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
64392 
64393 		ffs_ep->ep  = ep;
64394 		ffs_ep->req = req;
64395-		func->eps_revmap[ds->bEndpointAddress &
64396-				 USB_ENDPOINT_NUMBER_MASK] = idx + 1;
64397+		ep_num = ((ds->bEndpointAddress & USB_ENDPOINT_DIR_MASK) >> 3) |
64398+			 (ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
64399+		func->eps_revmap[ep_num] = idx + 1;
64400 		/*
64401 		 * If we use virtual address mapping, we restore
64402 		 * original bEndpointAddress value.
64403@@ -3445,7 +3415,10 @@ static void ffs_func_resume(struct usb_function *f)
64404 
64405 static int ffs_func_revmap_ep(struct ffs_function *func, u8 num)
64406 {
64407-	num = func->eps_revmap[num & USB_ENDPOINT_NUMBER_MASK];
64408+	int ep_num = ((num & USB_ENDPOINT_DIR_MASK) >> 3) |
64409+		     (num & USB_ENDPOINT_NUMBER_MASK);
64410+
64411+	num = func->eps_revmap[ep_num];
64412 	return num ? num : -EDOM;
64413 }
64414 
64415diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
64416index 950c9435b..86bc2bacc 100644
64417--- a/drivers/usb/gadget/function/f_mass_storage.c
64418+++ b/drivers/usb/gadget/function/f_mass_storage.c
64419@@ -224,7 +224,7 @@
64420 
64421 #include <linux/nospec.h>
64422 
64423-#include "configfs.h"
64424+#include "../configfs.h"
64425 
64426 
64427 /*------------------------------------------------------------------------*/
64428@@ -2301,6 +2301,16 @@ static void fsg_disable(struct usb_function *f)
64429 {
64430 	struct fsg_dev *fsg = fsg_from_func(f);
64431 
64432+	/* Disable the endpoints */
64433+	if (fsg->bulk_in_enabled) {
64434+		usb_ep_disable(fsg->bulk_in);
64435+		fsg->bulk_in_enabled = 0;
64436+	}
64437+	if (fsg->bulk_out_enabled) {
64438+		usb_ep_disable(fsg->bulk_out);
64439+		fsg->bulk_out_enabled = 0;
64440+	}
64441+
64442 	__raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
64443 }
64444 
64445diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
64446index 5d39aff26..c7dea38d9 100644
64447--- a/drivers/usb/gadget/function/f_uvc.c
64448+++ b/drivers/usb/gadget/function/f_uvc.c
64449@@ -124,6 +124,18 @@ static struct usb_interface_descriptor uvc_streaming_intf_alt0 = {
64450 	.iInterface		= 0,
64451 };
64452 
64453+static struct usb_interface_descriptor uvc_bulk_streaming_intf_alt0 = {
64454+	.bLength		= USB_DT_INTERFACE_SIZE,
64455+	.bDescriptorType	= USB_DT_INTERFACE,
64456+	.bInterfaceNumber	= UVC_INTF_VIDEO_STREAMING,
64457+	.bAlternateSetting	= 0,
64458+	.bNumEndpoints		= 1,
64459+	.bInterfaceClass	= USB_CLASS_VIDEO,
64460+	.bInterfaceSubClass	= UVC_SC_VIDEOSTREAMING,
64461+	.bInterfaceProtocol	= 0x00,
64462+	.iInterface		= 0,
64463+};
64464+
64465 static struct usb_interface_descriptor uvc_streaming_intf_alt1 = {
64466 	.bLength		= USB_DT_INTERFACE_SIZE,
64467 	.bDescriptorType	= USB_DT_INTERFACE,
64468@@ -147,6 +159,16 @@ static struct usb_endpoint_descriptor uvc_fs_streaming_ep = {
64469 	 */
64470 };
64471 
64472+static struct usb_endpoint_descriptor uvc_fs_bulk_streaming_ep = {
64473+	.bLength		= USB_DT_ENDPOINT_SIZE,
64474+	.bDescriptorType	= USB_DT_ENDPOINT,
64475+	.bEndpointAddress	= USB_DIR_IN,
64476+	.bmAttributes		= USB_ENDPOINT_XFER_BULK,
64477+	/* The wMaxPacketSize and bInterval values will be initialized from
64478+	 * module parameters.
64479+	 */
64480+};
64481+
64482 static struct usb_endpoint_descriptor uvc_hs_streaming_ep = {
64483 	.bLength		= USB_DT_ENDPOINT_SIZE,
64484 	.bDescriptorType	= USB_DT_ENDPOINT,
64485@@ -158,6 +180,16 @@ static struct usb_endpoint_descriptor uvc_hs_streaming_ep = {
64486 	 */
64487 };
64488 
64489+static struct usb_endpoint_descriptor uvc_hs_bulk_streaming_ep = {
64490+	.bLength		= USB_DT_ENDPOINT_SIZE,
64491+	.bDescriptorType	= USB_DT_ENDPOINT,
64492+	.bEndpointAddress	= USB_DIR_IN,
64493+	.bmAttributes		= USB_ENDPOINT_XFER_BULK,
64494+	/* The wMaxPacketSize and bInterval values will be initialized from
64495+	 * module parameters.
64496+	 */
64497+};
64498+
64499 static struct usb_endpoint_descriptor uvc_ss_streaming_ep = {
64500 	.bLength		= USB_DT_ENDPOINT_SIZE,
64501 	.bDescriptorType	= USB_DT_ENDPOINT,
64502@@ -170,6 +202,17 @@ static struct usb_endpoint_descriptor uvc_ss_streaming_ep = {
64503 	 */
64504 };
64505 
64506+static struct usb_endpoint_descriptor uvc_ss_bulk_streaming_ep = {
64507+	.bLength		= USB_DT_ENDPOINT_SIZE,
64508+	.bDescriptorType	= USB_DT_ENDPOINT,
64509+
64510+	.bEndpointAddress	= USB_DIR_IN,
64511+	.bmAttributes		= USB_ENDPOINT_XFER_BULK,
64512+	/* The wMaxPacketSize and bInterval values will be initialized from
64513+	 * module parameters.
64514+	 */
64515+};
64516+
64517 static struct usb_ss_ep_comp_descriptor uvc_ss_streaming_comp = {
64518 	.bLength		= sizeof(uvc_ss_streaming_comp),
64519 	.bDescriptorType	= USB_DT_SS_ENDPOINT_COMP,
64520@@ -178,18 +221,36 @@ static struct usb_ss_ep_comp_descriptor uvc_ss_streaming_comp = {
64521 	 */
64522 };
64523 
64524+static struct usb_ss_ep_comp_descriptor uvc_ss_bulk_streaming_comp = {
64525+	.bLength		= sizeof(uvc_ss_bulk_streaming_comp),
64526+	.bDescriptorType	= USB_DT_SS_ENDPOINT_COMP,
64527+	/* The bMaxBurst, bmAttributes and wBytesPerInterval values will be
64528+	 * initialized from module parameters.
64529+	 */
64530+};
64531+
64532 static const struct usb_descriptor_header * const uvc_fs_streaming[] = {
64533 	(struct usb_descriptor_header *) &uvc_streaming_intf_alt1,
64534 	(struct usb_descriptor_header *) &uvc_fs_streaming_ep,
64535 	NULL,
64536 };
64537 
64538+static const struct usb_descriptor_header * const uvc_fs_bulk_streaming[] = {
64539+	(struct usb_descriptor_header *)&uvc_fs_bulk_streaming_ep,
64540+	NULL,
64541+};
64542+
64543 static const struct usb_descriptor_header * const uvc_hs_streaming[] = {
64544 	(struct usb_descriptor_header *) &uvc_streaming_intf_alt1,
64545 	(struct usb_descriptor_header *) &uvc_hs_streaming_ep,
64546 	NULL,
64547 };
64548 
64549+static const struct usb_descriptor_header * const uvc_hs_bulk_streaming[] = {
64550+	(struct usb_descriptor_header *)&uvc_hs_bulk_streaming_ep,
64551+	NULL,
64552+};
64553+
64554 static const struct usb_descriptor_header * const uvc_ss_streaming[] = {
64555 	(struct usb_descriptor_header *) &uvc_streaming_intf_alt1,
64556 	(struct usb_descriptor_header *) &uvc_ss_streaming_ep,
64557@@ -197,6 +258,12 @@ static const struct usb_descriptor_header * const uvc_ss_streaming[] = {
64558 	NULL,
64559 };
64560 
64561+static const struct usb_descriptor_header * const uvc_ss_bulk_streaming[] = {
64562+	(struct usb_descriptor_header *)&uvc_ss_bulk_streaming_ep,
64563+	(struct usb_descriptor_header *)&uvc_ss_bulk_streaming_comp,
64564+	NULL,
64565+};
64566+
64567 /* --------------------------------------------------------------------------
64568  * Control requests
64569  */
64570@@ -208,6 +275,10 @@ uvc_function_ep0_complete(struct usb_ep *ep, struct usb_request *req)
64571 	struct v4l2_event v4l2_event;
64572 	struct uvc_event *uvc_event = (void *)&v4l2_event.u.data;
64573 
64574+	uvc_trace(UVC_TRACE_CONTROL,
64575+		  "event_setup_out %d, data len %d\n",
64576+		  uvc->event_setup_out, req->actual);
64577+
64578 	if (uvc->event_setup_out) {
64579 		uvc->event_setup_out = 0;
64580 
64581@@ -227,6 +298,11 @@ uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
64582 	struct v4l2_event v4l2_event;
64583 	struct uvc_event *uvc_event = (void *)&v4l2_event.u.data;
64584 
64585+	uvc_trace(UVC_TRACE_CONTROL,
64586+		  "setup request %02x %02x value %04x index %04x %04x\n",
64587+		  ctrl->bRequestType, ctrl->bRequest, le16_to_cpu(ctrl->wValue),
64588+		  le16_to_cpu(ctrl->wIndex), le16_to_cpu(ctrl->wLength));
64589+
64590 	if ((ctrl->bRequestType & USB_TYPE_MASK) != USB_TYPE_CLASS) {
64591 		uvcg_info(f, "invalid request type\n");
64592 		return -EINVAL;
64593@@ -261,15 +337,27 @@ static int
64594 uvc_function_get_alt(struct usb_function *f, unsigned interface)
64595 {
64596 	struct uvc_device *uvc = to_uvc(f);
64597+	struct f_uvc_opts *opts;
64598 
64599 	uvcg_info(f, "%s(%u)\n", __func__, interface);
64600 
64601+	opts = fi_to_f_uvc_opts(f->fi);
64602+
64603 	if (interface == uvc->control_intf)
64604 		return 0;
64605 	else if (interface != uvc->streaming_intf)
64606 		return -EINVAL;
64607-	else
64608+	else if (!opts->streaming_bulk)
64609 		return uvc->video.ep->enabled ? 1 : 0;
64610+	else
64611+		/*
64612+		 * Alt settings in an interface are supported only for
64613+		 * ISOC endpoints as there are different alt-settings for
64614+		 * zero-bandwidth and full-bandwidth cases, but the same
64615+		 * is not true for BULK endpoints, as they have a single
64616+		 * alt-setting.
64617+		 */
64618+		return 0;
64619 }
64620 
64621 static int
64622@@ -279,10 +367,13 @@ uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
64623 	struct usb_composite_dev *cdev = f->config->cdev;
64624 	struct v4l2_event v4l2_event;
64625 	struct uvc_event *uvc_event = (void *)&v4l2_event.u.data;
64626+	struct f_uvc_opts *opts;
64627 	int ret;
64628 
64629 	uvcg_info(f, "%s(%u, %u)\n", __func__, interface, alt);
64630 
64631+	opts = fi_to_f_uvc_opts(f->fi);
64632+
64633 	if (interface == uvc->control_intf) {
64634 		if (alt)
64635 			return -EINVAL;
64636@@ -296,6 +387,14 @@ uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
64637 
64638 		usb_ep_enable(uvc->control_ep);
64639 
64640+		if (uvc->event_suspend) {
64641+			memset(&v4l2_event, 0, sizeof(v4l2_event));
64642+			v4l2_event.type = UVC_EVENT_RESUME;
64643+			v4l2_event_queue(&uvc->vdev, &v4l2_event);
64644+			uvc->event_suspend = 0;
64645+			uvc_trace(UVC_TRACE_SUSPEND, "send UVC_EVENT_RESUME\n");
64646+		}
64647+
64648 		if (uvc->state == UVC_STATE_DISCONNECTED) {
64649 			memset(&v4l2_event, 0, sizeof(v4l2_event));
64650 			v4l2_event.type = UVC_EVENT_CONNECT;
64651@@ -311,49 +410,94 @@ uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
64652 	if (interface != uvc->streaming_intf)
64653 		return -EINVAL;
64654 
64655-	/* TODO
64656-	if (usb_endpoint_xfer_bulk(&uvc->desc.vs_ep))
64657-		return alt ? -EINVAL : 0;
64658-	*/
64659+	if (!opts->streaming_bulk) {
64660+		switch (alt) {
64661+		case 0:
64662+			if (uvc->state != UVC_STATE_STREAMING)
64663+				return 0;
64664 
64665-	switch (alt) {
64666-	case 0:
64667-		if (uvc->state != UVC_STATE_STREAMING)
64668+			if (uvc->video.ep)
64669+				usb_ep_disable(uvc->video.ep);
64670+
64671+			memset(&v4l2_event, 0, sizeof(v4l2_event));
64672+			v4l2_event.type = UVC_EVENT_STREAMOFF;
64673+			v4l2_event_queue(&uvc->vdev, &v4l2_event);
64674+
64675+			uvc->state = UVC_STATE_CONNECTED;
64676 			return 0;
64677 
64678-		if (uvc->video.ep)
64679-			usb_ep_disable(uvc->video.ep);
64680+		case 1:
64681+			if (uvc->state != UVC_STATE_CONNECTED)
64682+				return 0;
64683 
64684-		memset(&v4l2_event, 0, sizeof(v4l2_event));
64685-		v4l2_event.type = UVC_EVENT_STREAMOFF;
64686-		v4l2_event_queue(&uvc->vdev, &v4l2_event);
64687+			if (!uvc->video.ep)
64688+				return -EINVAL;
64689 
64690-		uvc->state = UVC_STATE_CONNECTED;
64691-		return 0;
64692+			INFO(cdev, "reset UVC\n");
64693+			usb_ep_disable(uvc->video.ep);
64694 
64695-	case 1:
64696-		if (uvc->state != UVC_STATE_CONNECTED)
64697-			return 0;
64698+			ret = config_ep_by_speed(f->config->cdev->gadget,
64699+						 &uvc->func, uvc->video.ep);
64700+			if (ret)
64701+				return ret;
64702+			usb_ep_enable(uvc->video.ep);
64703+
64704+			memset(&v4l2_event, 0, sizeof(v4l2_event));
64705+			v4l2_event.type = UVC_EVENT_STREAMON;
64706+			v4l2_event_queue(&uvc->vdev, &v4l2_event);
64707+			return USB_GADGET_DELAYED_STATUS;
64708 
64709-		if (!uvc->video.ep)
64710+		default:
64711 			return -EINVAL;
64712+		}
64713+	} else {
64714+		switch (uvc->state) {
64715+		case UVC_STATE_CONNECTED:
64716+			if (uvc->video.ep &&
64717+			    !uvc->video.ep->enabled) {
64718+				/*
64719+				 * Enable the video streaming endpoint,
64720+				 * but don't change the 'uvc->state'.
64721+				 */
64722+				ret = config_ep_by_speed(cdev->gadget,
64723+							 &uvc->func,
64724+							 uvc->video.ep);
64725+				if (ret)
64726+					return ret;
64727+				ret = usb_ep_enable(uvc->video.ep);
64728+				if (ret)
64729+					return ret;
64730+			} else {
64731+				memset(&v4l2_event, 0, sizeof(v4l2_event));
64732+				v4l2_event.type = UVC_EVENT_STREAMON;
64733+				v4l2_event_queue(&uvc->vdev, &v4l2_event);
64734+
64735+				uvc->state = UVC_STATE_STREAMING;
64736+			}
64737+			return 0;
64738 
64739-		uvcg_info(f, "reset UVC\n");
64740-		usb_ep_disable(uvc->video.ep);
64741+		case UVC_STATE_STREAMING:
64742+			if (!alt) {
64743+				INFO(cdev, "bulk streaming intf not support alt 0\n");
64744+				return 0;
64745+			}
64746 
64747-		ret = config_ep_by_speed(f->config->cdev->gadget,
64748-				&(uvc->func), uvc->video.ep);
64749-		if (ret)
64750-			return ret;
64751-		usb_ep_enable(uvc->video.ep);
64752+			if (uvc->video.ep &&
64753+			    uvc->video.ep->enabled) {
64754+				ret = usb_ep_disable(uvc->video.ep);
64755+				if (ret)
64756+					return ret;
64757+			}
64758 
64759-		memset(&v4l2_event, 0, sizeof(v4l2_event));
64760-		v4l2_event.type = UVC_EVENT_STREAMON;
64761-		v4l2_event_queue(&uvc->vdev, &v4l2_event);
64762-		return USB_GADGET_DELAYED_STATUS;
64763+			memset(&v4l2_event, 0, sizeof(v4l2_event));
64764+			v4l2_event.type = UVC_EVENT_STREAMOFF;
64765+			v4l2_event_queue(&uvc->vdev, &v4l2_event);
64766+			uvc->state = UVC_STATE_CONNECTED;
64767+			return 0;
64768 
64769-	default:
64770-		return -EINVAL;
64771+		default:
64772+			return -EINVAL;
64773+		}
64774 	}
64775 }
64776 
64777@@ -375,6 +519,30 @@ uvc_function_disable(struct usb_function *f)
64778 	usb_ep_disable(uvc->control_ep);
64779 }
64780 
64781+static void uvc_function_suspend(struct usb_function *f)
64782+{
64783+	struct uvc_device *uvc = to_uvc(f);
64784+	struct v4l2_event v4l2_event;
64785+
64786+	memset(&v4l2_event, 0, sizeof(v4l2_event));
64787+	v4l2_event.type = UVC_EVENT_SUSPEND;
64788+	v4l2_event_queue(&uvc->vdev, &v4l2_event);
64789+	uvc->event_suspend = 1;
64790+	uvc_trace(UVC_TRACE_SUSPEND, "send UVC_EVENT_SUSPEND\n");
64791+}
64792+
64793+static void uvc_function_resume(struct usb_function *f)
64794+{
64795+	struct uvc_device *uvc = to_uvc(f);
64796+	struct v4l2_event v4l2_event;
64797+
64798+	memset(&v4l2_event, 0, sizeof(v4l2_event));
64799+	v4l2_event.type = UVC_EVENT_RESUME;
64800+	v4l2_event_queue(&uvc->vdev, &v4l2_event);
64801+	uvc->event_suspend = 0;
64802+	uvc_trace(UVC_TRACE_SUSPEND, "send UVC_EVENT_RESUME\n");
64803+}
64804+
64805 /* --------------------------------------------------------------------------
64806  * Connection / disconnection
64807  */
64808@@ -468,32 +636,45 @@ uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
64809 	const struct uvc_descriptor_header * const *uvc_streaming_cls;
64810 	const struct usb_descriptor_header * const *uvc_streaming_std;
64811 	const struct usb_descriptor_header * const *src;
64812+	struct usb_interface_descriptor *streaming_intf_alt0;
64813 	struct usb_descriptor_header **dst;
64814 	struct usb_descriptor_header **hdr;
64815+	struct f_uvc_opts *opts;
64816 	unsigned int control_size;
64817 	unsigned int streaming_size;
64818 	unsigned int n_desc;
64819 	unsigned int bytes;
64820 	void *mem;
64821 
64822+	opts = fi_to_f_uvc_opts(uvc->func.fi);
64823+
64824 	switch (speed) {
64825 	case USB_SPEED_SUPER:
64826 		uvc_control_desc = uvc->desc.ss_control;
64827 		uvc_streaming_cls = uvc->desc.ss_streaming;
64828-		uvc_streaming_std = uvc_ss_streaming;
64829+		if (!opts->streaming_bulk)
64830+			uvc_streaming_std = uvc_ss_streaming;
64831+		else
64832+			uvc_streaming_std = uvc_ss_bulk_streaming;
64833 		break;
64834 
64835 	case USB_SPEED_HIGH:
64836 		uvc_control_desc = uvc->desc.fs_control;
64837 		uvc_streaming_cls = uvc->desc.hs_streaming;
64838-		uvc_streaming_std = uvc_hs_streaming;
64839+		if (!opts->streaming_bulk)
64840+			uvc_streaming_std = uvc_hs_streaming;
64841+		else
64842+			uvc_streaming_std = uvc_hs_bulk_streaming;
64843 		break;
64844 
64845 	case USB_SPEED_FULL:
64846 	default:
64847 		uvc_control_desc = uvc->desc.fs_control;
64848 		uvc_streaming_cls = uvc->desc.fs_streaming;
64849-		uvc_streaming_std = uvc_fs_streaming;
64850+		if (!opts->streaming_bulk)
64851+			uvc_streaming_std = uvc_fs_streaming;
64852+		else
64853+			uvc_streaming_std = uvc_fs_bulk_streaming;
64854 		break;
64855 	}
64856 
64857@@ -513,12 +694,17 @@ uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
64858 	 * uvc_{fs|hs}_streaming
64859 	 */
64860 
64861+	if (!opts->streaming_bulk)
64862+		streaming_intf_alt0 = &uvc_streaming_intf_alt0;
64863+	else
64864+		streaming_intf_alt0 = &uvc_bulk_streaming_intf_alt0;
64865+
64866 	/* Count descriptors and compute their size. */
64867 	control_size = 0;
64868 	streaming_size = 0;
64869 	bytes = uvc_iad.bLength + uvc_control_intf.bLength
64870 	      + uvc_control_ep.bLength + uvc_control_cs_ep.bLength
64871-	      + uvc_streaming_intf_alt0.bLength;
64872+	      + streaming_intf_alt0->bLength;
64873 
64874 	if (speed == USB_SPEED_SUPER) {
64875 		bytes += uvc_ss_control_comp.bLength;
64876@@ -568,7 +754,7 @@ uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
64877 		UVC_COPY_DESCRIPTOR(mem, dst, &uvc_ss_control_comp);
64878 
64879 	UVC_COPY_DESCRIPTOR(mem, dst, &uvc_control_cs_ep);
64880-	UVC_COPY_DESCRIPTOR(mem, dst, &uvc_streaming_intf_alt0);
64881+	UVC_COPY_DESCRIPTOR(mem, dst, streaming_intf_alt0);
64882 
64883 	uvc_streaming_header = mem;
64884 	UVC_COPY_DESCRIPTORS(mem, dst,
64885@@ -593,15 +779,24 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
64886 	struct usb_ep *ep;
64887 	struct f_uvc_opts *opts;
64888 	int ret = -EINVAL;
64889+	u8 address;
64890 
64891 	uvcg_info(f, "%s()\n", __func__);
64892 
64893 	opts = fi_to_f_uvc_opts(f->fi);
64894 	/* Sanity check the streaming endpoint module parameters.
64895 	 */
64896-	opts->streaming_interval = clamp(opts->streaming_interval, 1U, 16U);
64897-	opts->streaming_maxpacket = clamp(opts->streaming_maxpacket, 1U, 3072U);
64898-	opts->streaming_maxburst = min(opts->streaming_maxburst, 15U);
64899+	if (!opts->streaming_bulk) {
64900+		opts->streaming_interval = clamp(opts->streaming_interval,
64901+						 1U, 16U);
64902+		opts->streaming_maxpacket = clamp(opts->streaming_maxpacket,
64903+						  1U, 3072U);
64904+		opts->streaming_maxburst = min(opts->streaming_maxburst, 15U);
64905+	} else {
64906+		opts->streaming_maxpacket = clamp(opts->streaming_maxpacket,
64907+						  1U, 1024U);
64908+		opts->streaming_maxburst = min(opts->streaming_maxburst, 15U);
64909+	}
64910 
64911 	/* For SS, wMaxPacketSize has to be 1024 if bMaxBurst is not 0 */
64912 	if (opts->streaming_maxburst &&
64913@@ -628,26 +823,46 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
64914 		max_packet_size = opts->streaming_maxpacket / 3;
64915 	}
64916 
64917-	uvc_fs_streaming_ep.wMaxPacketSize =
64918-		cpu_to_le16(min(opts->streaming_maxpacket, 1023U));
64919-	uvc_fs_streaming_ep.bInterval = opts->streaming_interval;
64920-
64921-	uvc_hs_streaming_ep.wMaxPacketSize =
64922-		cpu_to_le16(max_packet_size | ((max_packet_mult - 1) << 11));
64923-
64924-	/* A high-bandwidth endpoint must specify a bInterval value of 1 */
64925-	if (max_packet_mult > 1)
64926-		uvc_hs_streaming_ep.bInterval = 1;
64927-	else
64928-		uvc_hs_streaming_ep.bInterval = opts->streaming_interval;
64929-
64930-	uvc_ss_streaming_ep.wMaxPacketSize = cpu_to_le16(max_packet_size);
64931-	uvc_ss_streaming_ep.bInterval = opts->streaming_interval;
64932-	uvc_ss_streaming_comp.bmAttributes = max_packet_mult - 1;
64933-	uvc_ss_streaming_comp.bMaxBurst = opts->streaming_maxburst;
64934-	uvc_ss_streaming_comp.wBytesPerInterval =
64935-		cpu_to_le16(max_packet_size * max_packet_mult *
64936-			    (opts->streaming_maxburst + 1));
64937+	if (!opts->streaming_bulk) {
64938+		uvc_fs_streaming_ep.wMaxPacketSize =
64939+			cpu_to_le16(min(opts->streaming_maxpacket, 1023U));
64940+		uvc_fs_streaming_ep.bInterval = opts->streaming_interval;
64941+
64942+		uvc_hs_streaming_ep.wMaxPacketSize =
64943+			cpu_to_le16(max_packet_size |
64944+				    ((max_packet_mult - 1) << 11));
64945+
64946+		/* A high-bandwidth endpoint must specify a bInterval value of 1 */
64947+		if (max_packet_mult > 1)
64948+			uvc_hs_streaming_ep.bInterval = 1;
64949+		else
64950+			uvc_hs_streaming_ep.bInterval = opts->streaming_interval;
64951+
64952+		uvc_ss_streaming_ep.wMaxPacketSize =
64953+			cpu_to_le16(max_packet_size);
64954+		uvc_ss_streaming_ep.bInterval = opts->streaming_interval;
64955+		uvc_ss_streaming_comp.bmAttributes = max_packet_mult - 1;
64956+		uvc_ss_streaming_comp.bMaxBurst = opts->streaming_maxburst;
64957+		uvc_ss_streaming_comp.wBytesPerInterval =
64958+			cpu_to_le16(max_packet_size * max_packet_mult *
64959+				    (opts->streaming_maxburst + 1));
64960+	} else {
64961+		uvc_fs_bulk_streaming_ep.wMaxPacketSize =
64962+			cpu_to_le16(min(opts->streaming_maxpacket, 64U));
64963+
64964+		uvc_hs_bulk_streaming_ep.wMaxPacketSize =
64965+			cpu_to_le16(min(opts->streaming_maxpacket, 512U));
64966+
64967+		uvc_ss_bulk_streaming_ep.wMaxPacketSize =
64968+			cpu_to_le16(max_packet_size);
64969+		uvc_ss_bulk_streaming_comp.bMaxBurst = opts->streaming_maxburst;
64970+		/*
64971+		 * As per USB 3.1 spec "Table 9-26. SuperSpeed Endpoint
64972+		 * Companion Descriptor", the wBytesPerInterval must be
64973+		 * set to zero for bulk endpoints.
64974+		 */
64975+		uvc_ss_bulk_streaming_comp.wBytesPerInterval = 0;
64976+	}
64977 
64978 	/* Allocate endpoints. */
64979 	ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep);
64980@@ -657,23 +872,57 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
64981 	}
64982 	uvc->control_ep = ep;
64983 
64984-	if (gadget_is_superspeed(c->cdev->gadget))
64985-		ep = usb_ep_autoconfig_ss(cdev->gadget, &uvc_ss_streaming_ep,
64986-					  &uvc_ss_streaming_comp);
64987-	else if (gadget_is_dualspeed(cdev->gadget))
64988-		ep = usb_ep_autoconfig(cdev->gadget, &uvc_hs_streaming_ep);
64989-	else
64990-		ep = usb_ep_autoconfig(cdev->gadget, &uvc_fs_streaming_ep);
64991+	if (gadget_is_superspeed(c->cdev->gadget)) {
64992+		if (!opts->streaming_bulk)
64993+			ep = usb_ep_autoconfig_ss(cdev->gadget,
64994+						  &uvc_ss_streaming_ep,
64995+						  &uvc_ss_streaming_comp);
64996+		else
64997+			ep = usb_ep_autoconfig_ss(cdev->gadget,
64998+						  &uvc_ss_bulk_streaming_ep,
64999+						  &uvc_ss_bulk_streaming_comp);
65000+	} else if (gadget_is_dualspeed(cdev->gadget)) {
65001+		if (!opts->streaming_bulk) {
65002+			ep = usb_ep_autoconfig(cdev->gadget,
65003+					       &uvc_hs_streaming_ep);
65004+		} else {
65005+			ep = usb_ep_autoconfig(cdev->gadget,
65006+					       &uvc_hs_bulk_streaming_ep);
65007+			/*
65008+			 * In ep_matches(), it will set wMaxPacketSize to 64
65009+			 * bytes if ep is Bulk and ep_comp is NULL for hs/fs
65010+			 * bulk maxpacket. So we need to set hs bulk maxpacket
65011+			 * 512 bytes again here.
65012+			 */
65013+			uvc_hs_bulk_streaming_ep.wMaxPacketSize =
65014+				cpu_to_le16(min(opts->streaming_maxpacket,
65015+						512U));
65016+		}
65017+	} else {
65018+		if (!opts->streaming_bulk)
65019+			ep = usb_ep_autoconfig(cdev->gadget,
65020+					       &uvc_fs_streaming_ep);
65021+		else
65022+			ep = usb_ep_autoconfig(cdev->gadget,
65023+					       &uvc_fs_bulk_streaming_ep);
65024+	}
65025 
65026 	if (!ep) {
65027 		uvcg_info(f, "Unable to allocate streaming EP\n");
65028 		goto error;
65029 	}
65030 	uvc->video.ep = ep;
65031+	address = uvc->video.ep->address;
65032 
65033-	uvc_fs_streaming_ep.bEndpointAddress = uvc->video.ep->address;
65034-	uvc_hs_streaming_ep.bEndpointAddress = uvc->video.ep->address;
65035-	uvc_ss_streaming_ep.bEndpointAddress = uvc->video.ep->address;
65036+	if (!opts->streaming_bulk) {
65037+		uvc_fs_streaming_ep.bEndpointAddress = address;
65038+		uvc_hs_streaming_ep.bEndpointAddress = address;
65039+		uvc_ss_streaming_ep.bEndpointAddress = address;
65040+	} else {
65041+		uvc_fs_bulk_streaming_ep.bEndpointAddress = address;
65042+		uvc_hs_bulk_streaming_ep.bEndpointAddress = address;
65043+		uvc_ss_bulk_streaming_ep.bEndpointAddress = address;
65044+	}
65045 
65046 	us = usb_gstrings_attach(cdev, uvc_function_strings,
65047 				 ARRAY_SIZE(uvc_en_us_strings));
65048@@ -684,8 +933,12 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
65049 	uvc_iad.iFunction = us[UVC_STRING_CONTROL_IDX].id;
65050 	uvc_control_intf.iInterface = us[UVC_STRING_CONTROL_IDX].id;
65051 	ret = us[UVC_STRING_STREAMING_IDX].id;
65052-	uvc_streaming_intf_alt0.iInterface = ret;
65053-	uvc_streaming_intf_alt1.iInterface = ret;
65054+	if (!opts->streaming_bulk) {
65055+		uvc_streaming_intf_alt0.iInterface = ret;
65056+		uvc_streaming_intf_alt1.iInterface = ret;
65057+	} else {
65058+		uvc_bulk_streaming_intf_alt0.iInterface = ret;
65059+	}
65060 
65061 	/* Allocate interface IDs. */
65062 	if ((ret = usb_interface_id(c, f)) < 0)
65063@@ -697,8 +950,14 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
65064 
65065 	if ((ret = usb_interface_id(c, f)) < 0)
65066 		goto error;
65067-	uvc_streaming_intf_alt0.bInterfaceNumber = ret;
65068-	uvc_streaming_intf_alt1.bInterfaceNumber = ret;
65069+
65070+	if (!opts->streaming_bulk) {
65071+		uvc_streaming_intf_alt0.bInterfaceNumber = ret;
65072+		uvc_streaming_intf_alt1.bInterfaceNumber = ret;
65073+	} else {
65074+		uvc_bulk_streaming_intf_alt0.bInterfaceNumber = ret;
65075+	}
65076+
65077 	uvc->streaming_intf = ret;
65078 	opts->streaming_interface = ret;
65079 
65080@@ -748,6 +1007,8 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
65081 	if (ret < 0)
65082 		goto v4l2_error;
65083 
65084+	if (opts->streaming_bulk)
65085+		uvc->video.max_payload_size = uvc->video.imagesize;
65086 	/* Register a V4L2 device. */
65087 	ret = uvc_register_video(uvc);
65088 	if (ret < 0) {
65089@@ -786,6 +1047,7 @@ static struct usb_function_instance *uvc_alloc_inst(void)
65090 	struct uvc_camera_terminal_descriptor *cd;
65091 	struct uvc_processing_unit_descriptor *pd;
65092 	struct uvc_output_terminal_descriptor *od;
65093+	struct UVC_EXTENSION_UNIT_DESCRIPTOR(1, 1) *ed;
65094 	struct uvc_color_matching_descriptor *md;
65095 	struct uvc_descriptor_header **ctl_cls;
65096 	int ret;
65097@@ -835,6 +1097,34 @@ static struct usb_function_instance *uvc_alloc_inst(void)
65098 	od->bSourceID			= 2;
65099 	od->iTerminal			= 0;
65100 
65101+	ed = &opts->uvc_extension;
65102+	ed->bLength = UVC_DT_EXTENSION_UNIT_SIZE(1, 1);
65103+	ed->bDescriptorType = USB_DT_CS_INTERFACE;
65104+	ed->bDescriptorSubType = UVC_VC_EXTENSION_UNIT;
65105+	ed->bUnitID = 6;
65106+	ed->guidExtensionCode[0] = 0xa2;
65107+	ed->guidExtensionCode[1] = 0x9e;
65108+	ed->guidExtensionCode[2] = 0x76;
65109+	ed->guidExtensionCode[3] = 0x41;
65110+	ed->guidExtensionCode[4] = 0xde;
65111+	ed->guidExtensionCode[5] = 0x04;
65112+	ed->guidExtensionCode[6] = 0x47;
65113+	ed->guidExtensionCode[7] = 0xe3;
65114+	ed->guidExtensionCode[8] = 0x8b;
65115+	ed->guidExtensionCode[9] = 0x2b;
65116+	ed->guidExtensionCode[10] = 0xf4;
65117+	ed->guidExtensionCode[11] = 0x34;
65118+	ed->guidExtensionCode[12] = 0x1a;
65119+	ed->guidExtensionCode[13] = 0xff;
65120+	ed->guidExtensionCode[14] = 0x00;
65121+	ed->guidExtensionCode[15] = 0x3b;
65122+	ed->bNumControls = 3;
65123+	ed->bNrInPins = 1;
65124+	ed->baSourceID[0] = 2;
65125+	ed->bControlSize = 1;
65126+	ed->bmControls[0] = 7;
65127+	ed->iExtension = 0;
65128+
65129 	md = &opts->uvc_color_matching;
65130 	md->bLength			= UVC_DT_COLOR_MATCHING_SIZE;
65131 	md->bDescriptorType		= USB_DT_CS_INTERFACE;
65132@@ -849,7 +1139,8 @@ static struct usb_function_instance *uvc_alloc_inst(void)
65133 	ctl_cls[1] = (struct uvc_descriptor_header *)cd;
65134 	ctl_cls[2] = (struct uvc_descriptor_header *)pd;
65135 	ctl_cls[3] = (struct uvc_descriptor_header *)od;
65136-	ctl_cls[4] = NULL;	/* NULL-terminate */
65137+	ctl_cls[4] = (struct uvc_descriptor_header *)ed;
65138+	ctl_cls[5] = NULL;	/* NULL-terminate */
65139 	opts->fs_control =
65140 		(const struct uvc_descriptor_header * const *)ctl_cls;
65141 
65142@@ -859,12 +1150,15 @@ static struct usb_function_instance *uvc_alloc_inst(void)
65143 	ctl_cls[1] = (struct uvc_descriptor_header *)cd;
65144 	ctl_cls[2] = (struct uvc_descriptor_header *)pd;
65145 	ctl_cls[3] = (struct uvc_descriptor_header *)od;
65146-	ctl_cls[4] = NULL;	/* NULL-terminate */
65147+	ctl_cls[4] = (struct uvc_descriptor_header *)ed;
65148+	ctl_cls[5] = NULL;	/* NULL-terminate */
65149 	opts->ss_control =
65150 		(const struct uvc_descriptor_header * const *)ctl_cls;
65151 
65152 	opts->streaming_interval = 1;
65153 	opts->streaming_maxpacket = 1024;
65154+	opts->uvc_num_request = UVC_NUM_REQUESTS;
65155+	opts->pm_qos_latency = 0;
65156 
65157 	ret = uvcg_attach_configfs(opts);
65158 	if (ret < 0) {
65159@@ -884,42 +1178,17 @@ static void uvc_free(struct usb_function *f)
65160 	kfree(uvc);
65161 }
65162 
65163-static void uvc_function_unbind(struct usb_configuration *c,
65164-				struct usb_function *f)
65165+static void uvc_unbind(struct usb_configuration *c, struct usb_function *f)
65166 {
65167 	struct usb_composite_dev *cdev = c->cdev;
65168 	struct uvc_device *uvc = to_uvc(f);
65169-	long wait_ret = 1;
65170 
65171-	uvcg_info(f, "%s()\n", __func__);
65172-
65173-	/* If we know we're connected via v4l2, then there should be a cleanup
65174-	 * of the device from userspace either via UVC_EVENT_DISCONNECT or
65175-	 * though the video device removal uevent. Allow some time for the
65176-	 * application to close out before things get deleted.
65177-	 */
65178-	if (uvc->func_connected) {
65179-		uvcg_dbg(f, "waiting for clean disconnect\n");
65180-		wait_ret = wait_event_interruptible_timeout(uvc->func_connected_queue,
65181-				uvc->func_connected == false, msecs_to_jiffies(500));
65182-		uvcg_dbg(f, "done waiting with ret: %ld\n", wait_ret);
65183-	}
65184+	uvcg_info(f, "%s\n", __func__);
65185 
65186 	device_remove_file(&uvc->vdev.dev, &dev_attr_function_name);
65187 	video_unregister_device(&uvc->vdev);
65188 	v4l2_device_unregister(&uvc->v4l2_dev);
65189 
65190-	if (uvc->func_connected) {
65191-		/* Wait for the release to occur to ensure there are no longer any
65192-		 * pending operations that may cause panics when resources are cleaned
65193-		 * up.
65194-		 */
65195-		uvcg_warn(f, "%s no clean disconnect, wait for release\n", __func__);
65196-		wait_ret = wait_event_interruptible_timeout(uvc->func_connected_queue,
65197-				uvc->func_connected == false, msecs_to_jiffies(1000));
65198-		uvcg_dbg(f, "done waiting for release with ret: %ld\n", wait_ret);
65199-	}
65200-
65201 	usb_ep_free_request(cdev->gadget->ep0, uvc->control_req);
65202 	kfree(uvc->control_buf);
65203 
65204@@ -938,7 +1207,6 @@ static struct usb_function *uvc_alloc(struct usb_function_instance *fi)
65205 
65206 	mutex_init(&uvc->video.mutex);
65207 	uvc->state = UVC_STATE_DISCONNECTED;
65208-	init_waitqueue_head(&uvc->func_connected_queue);
65209 	opts = fi_to_f_uvc_opts(fi);
65210 
65211 	mutex_lock(&opts->lock);
65212@@ -969,12 +1237,14 @@ static struct usb_function *uvc_alloc(struct usb_function_instance *fi)
65213 	/* Register the function. */
65214 	uvc->func.name = "uvc";
65215 	uvc->func.bind = uvc_function_bind;
65216-	uvc->func.unbind = uvc_function_unbind;
65217+	uvc->func.unbind = uvc_unbind;
65218 	uvc->func.get_alt = uvc_function_get_alt;
65219 	uvc->func.set_alt = uvc_function_set_alt;
65220 	uvc->func.disable = uvc_function_disable;
65221 	uvc->func.setup = uvc_function_setup;
65222 	uvc->func.free_func = uvc_free;
65223+	uvc->func.suspend = uvc_function_suspend;
65224+	uvc->func.resume = uvc_function_resume;
65225 	uvc->func.bind_deactivated = true;
65226 
65227 	return &uvc->func;
65228diff --git a/drivers/usb/gadget/function/u_audio.h b/drivers/usb/gadget/function/u_audio.h
65229index 5ea6b86f1..a218cdf77 100644
65230--- a/drivers/usb/gadget/function/u_audio.h
65231+++ b/drivers/usb/gadget/function/u_audio.h
65232@@ -11,6 +11,14 @@
65233 
65234 #include <linux/usb/composite.h>
65235 
65236+/*
65237+ * Same maximum frequency deviation on the slower side as in
65238+ * sound/usb/endpoint.c. Value is expressed in per-mil deviation.
65239+ * The maximum deviation on the faster side will be provided as
65240+ * parameter, as it impacts the endpoint required bandwidth.
65241+ */
65242+#define FBACK_SLOW_MAX	250
65243+
65244 struct uac_params {
65245 	/* playback */
65246 	int p_chmask;	/* channel mask */
65247@@ -23,6 +31,7 @@ struct uac_params {
65248 	int c_ssize;	/* sample size */
65249 
65250 	int req_number; /* number of preallocated requests */
65251+	int fb_max;	/* upper frequency drift feedback limit per-mil */
65252 };
65253 
65254 struct g_audio {
65255@@ -30,7 +39,10 @@ struct g_audio {
65256 	struct usb_gadget *gadget;
65257 
65258 	struct usb_ep *in_ep;
65259+
65260 	struct usb_ep *out_ep;
65261+	/* feedback IN endpoint corresponding to out_ep */
65262+	struct usb_ep *in_ep_fback;
65263 
65264 	/* Max packet size for all in_ep possible speeds */
65265 	unsigned int in_ep_maxpsize;
65266diff --git a/drivers/usb/gadget/function/u_ether.h b/drivers/usb/gadget/function/u_ether.h
65267index 10dd64068..40144546d 100644
65268--- a/drivers/usb/gadget/function/u_ether.h
65269+++ b/drivers/usb/gadget/function/u_ether.h
65270@@ -244,6 +244,18 @@ unsigned gether_get_qmult(struct net_device *net);
65271  */
65272 int gether_get_ifname(struct net_device *net, char *name, int len);
65273 
65274+/**
65275+ * gether_set_ifname - set an ethernet-over-usb link interface name
65276+ * @net: device representing this link
65277+ * @name: new interface name
65278+ * @len: length of @name
65279+ *
65280+ * This sets the interface name of this ethernet-over-usb link.
65281+ * A single terminating newline, if any, is ignored.
65282+ * Returns zero on success, else negative errno.
65283+ */
65284+int gether_set_ifname(struct net_device *net, const char *name, int len);
65285+
65286 void gether_cleanup(struct eth_dev *dev);
65287 
65288 /* connect/disconnect is handled by individual functions */
65289diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h
65290index f982e18a5..f558c3139 100644
65291--- a/drivers/usb/gadget/function/u_ether_configfs.h
65292+++ b/drivers/usb/gadget/function/u_ether_configfs.h
65293@@ -148,7 +148,20 @@ out:									\
65294 		return ret;						\
65295 	}								\
65296 									\
65297-	CONFIGFS_ATTR_RO(_f_##_opts_, ifname)
65298+	static ssize_t _f_##_opts_ifname_store(struct config_item *item, \
65299+					       const char *page, size_t len)\
65300+	{								\
65301+		struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item);	\
65302+		int ret = -EBUSY;					\
65303+									\
65304+		mutex_lock(&opts->lock);				\
65305+		if (!opts->refcnt)					\
65306+			ret = gether_set_ifname(opts->net, page, len);	\
65307+		mutex_unlock(&opts->lock);				\
65308+		return ret ?: len;					\
65309+	}								\
65310+									\
65311+	CONFIGFS_ATTR(_f_##_opts_, ifname)
65312 
65313 #define USB_ETHER_CONFIGFS_ITEM_ATTR_U8_RW(_f_, _n_)			\
65314 	static ssize_t _f_##_opts_##_n_##_show(struct config_item *item,\
65315diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
65316index 7b54e814a..948eabf3c 100644
65317--- a/drivers/usb/gadget/function/u_serial.c
65318+++ b/drivers/usb/gadget/function/u_serial.c
65319@@ -261,9 +261,7 @@ __acquires(&port->port_lock)
65320 		list_del(&req->list);
65321 		req->zero = kfifo_is_empty(&port->port_write_buf);
65322 
65323-		pr_vdebug("ttyGS%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
65324-			  port->port_num, len, *((u8 *)req->buf),
65325-			  *((u8 *)req->buf+1), *((u8 *)req->buf+2));
65326+		pr_vdebug("ttyGS%d: tx len=%d, %3ph ...\n", port->port_num, len, req->buf);
65327 
65328 		/* Drop lock while we call out of driver; completions
65329 		 * could be issued while we do so.  Disconnection may
65330@@ -349,7 +347,7 @@ __acquires(&port->port_lock)
65331 }
65332 
65333 /*
65334- * RX tasklet takes data out of the RX queue and hands it up to the TTY
65335+ * RX work takes data out of the RX queue and hands it up to the TTY
65336  * layer until it refuses to take any more data (or is throttled back).
65337  * Then it issues reads for any further data.
65338  *
65339@@ -712,7 +710,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
65340 
65341 	/* Iff we're disconnected, there can be no I/O in flight so it's
65342 	 * ok to free the circular buffer; else just scrub it.  And don't
65343-	 * let the push tasklet fire again until we're re-opened.
65344+	 * let the push async work fire again until we're re-opened.
65345 	 */
65346 	if (gser == NULL)
65347 		kfifo_free(&port->port_write_buf);
65348diff --git a/drivers/usb/gadget/function/u_uac2.h b/drivers/usb/gadget/function/u_uac2.h
65349index b50357111..179d3ef6a 100644
65350--- a/drivers/usb/gadget/function/u_uac2.h
65351+++ b/drivers/usb/gadget/function/u_uac2.h
65352@@ -21,7 +21,9 @@
65353 #define UAC2_DEF_CCHMASK 0x3
65354 #define UAC2_DEF_CSRATE 64000
65355 #define UAC2_DEF_CSSIZE 2
65356+#define UAC2_DEF_CSYNC		USB_ENDPOINT_SYNC_ASYNC
65357 #define UAC2_DEF_REQ_NUM 2
65358+#define UAC2_DEF_FB_MAX 5
65359 
65360 struct f_uac2_opts {
65361 	struct usb_function_instance	func_inst;
65362@@ -31,7 +33,9 @@ struct f_uac2_opts {
65363 	int				c_chmask;
65364 	int				c_srate;
65365 	int				c_ssize;
65366+	int				c_sync;
65367 	int				req_number;
65368+	int				fb_max;
65369 	bool				bound;
65370 
65371 	struct mutex			lock;
65372diff --git a/drivers/usb/gadget/function/u_uvc.h b/drivers/usb/gadget/function/u_uvc.h
65373index 9a01a7d4f..3e92a28d8 100644
65374--- a/drivers/usb/gadget/function/u_uvc.h
65375+++ b/drivers/usb/gadget/function/u_uvc.h
65376@@ -18,15 +18,18 @@
65377 #include <linux/usb/video.h>
65378 
65379 #define fi_to_f_uvc_opts(f)	container_of(f, struct f_uvc_opts, func_inst)
65380+DECLARE_UVC_EXTENSION_UNIT_DESCRIPTOR(1, 1);
65381 
65382 struct f_uvc_opts {
65383 	struct usb_function_instance			func_inst;
65384+	bool						streaming_bulk;
65385 	unsigned int					streaming_interval;
65386 	unsigned int					streaming_maxpacket;
65387 	unsigned int					streaming_maxburst;
65388 
65389 	unsigned int					control_interface;
65390 	unsigned int					streaming_interface;
65391+	unsigned int					uvc_num_request;
65392 
65393 	/*
65394 	 * Control descriptors array pointers for full-/high-speed and
65395@@ -51,6 +54,7 @@ struct f_uvc_opts {
65396 	struct uvc_camera_terminal_descriptor		uvc_camera_terminal;
65397 	struct uvc_processing_unit_descriptor		uvc_processing;
65398 	struct uvc_output_terminal_descriptor		uvc_output_terminal;
65399+	struct UVC_EXTENSION_UNIT_DESCRIPTOR(1, 1)	uvc_extension;
65400 	struct uvc_color_matching_descriptor		uvc_color_matching;
65401 
65402 	/*
65403@@ -60,8 +64,8 @@ struct f_uvc_opts {
65404 	 * descriptors. Used by configfs only, must not be touched by legacy
65405 	 * gadgets.
65406 	 */
65407-	struct uvc_descriptor_header			*uvc_fs_control_cls[5];
65408-	struct uvc_descriptor_header			*uvc_ss_control_cls[5];
65409+	struct uvc_descriptor_header			*uvc_fs_control_cls[6];
65410+	struct uvc_descriptor_header			*uvc_ss_control_cls[6];
65411 
65412 	/*
65413 	 * Streaming descriptors for full-speed, high-speed and super-speed.
65414@@ -81,6 +85,7 @@ struct f_uvc_opts {
65415 	 */
65416 	struct mutex			lock;
65417 	int				refcnt;
65418+	int				pm_qos_latency;
65419 };
65420 
65421 #endif /* U_UVC_H */
65422diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
65423index 6c4fc4913..8fb61023d 100644
65424--- a/drivers/usb/gadget/function/uvc.h
65425+++ b/drivers/usb/gadget/function/uvc.h
65426@@ -14,7 +14,7 @@
65427 #include <linux/spinlock.h>
65428 #include <linux/usb/composite.h>
65429 #include <linux/videodev2.h>
65430-#include <linux/wait.h>
65431+#include <linux/pm_qos.h>
65432 
65433 #include <media/v4l2-device.h>
65434 #include <media/v4l2-dev.h>
65435@@ -69,6 +69,7 @@ extern unsigned int uvc_gadget_trace_param;
65436 #define UVC_NUM_REQUESTS			4
65437 #define UVC_MAX_REQUEST_SIZE			64
65438 #define UVC_MAX_EVENTS				4
65439+#define UVC_MAX_NUM_REQUESTS			8
65440 
65441 /* ------------------------------------------------------------------------
65442  * Structures
65443@@ -90,8 +91,8 @@ struct uvc_video {
65444 
65445 	/* Requests */
65446 	unsigned int req_size;
65447-	struct usb_request *req[UVC_NUM_REQUESTS];
65448-	__u8 *req_buffer[UVC_NUM_REQUESTS];
65449+	struct usb_request *req[UVC_MAX_NUM_REQUESTS];
65450+	__u8 *req_buffer[UVC_MAX_NUM_REQUESTS];
65451 	struct list_head req_free;
65452 	spinlock_t req_lock;
65453 
65454@@ -118,8 +119,8 @@ struct uvc_device {
65455 	enum uvc_state state;
65456 	struct usb_function func;
65457 	struct uvc_video video;
65458-	bool func_connected;
65459-	wait_queue_head_t func_connected_queue;
65460+	/* for creating and issuing QoS requests */
65461+	struct pm_qos_request pm_qos;
65462 
65463 	/* Descriptors */
65464 	struct {
65465@@ -140,6 +141,7 @@ struct uvc_device {
65466 	/* Events */
65467 	unsigned int event_length;
65468 	unsigned int event_setup_out : 1;
65469+	unsigned int event_suspend : 1;
65470 };
65471 
65472 static inline struct uvc_device *to_uvc(struct usb_function *f)
65473@@ -150,7 +152,6 @@ static inline struct uvc_device *to_uvc(struct usb_function *f)
65474 struct uvc_file_handle {
65475 	struct v4l2_fh vfh;
65476 	struct uvc_video *device;
65477-	bool is_uvc_app_handle;
65478 };
65479 
65480 #define to_uvc_file_handle(handle) \
65481diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
65482index 00fb58e50..62c20c0bc 100644
65483--- a/drivers/usb/gadget/function/uvc_configfs.c
65484+++ b/drivers/usb/gadget/function/uvc_configfs.c
65485@@ -12,6 +12,7 @@
65486 
65487 #include <linux/sort.h>
65488 
65489+#include "uvc.h"
65490 #include "u_uvc.h"
65491 #include "uvc_configfs.h"
65492 
65493@@ -767,11 +768,13 @@ static const struct uvcg_config_group_type uvcg_control_grp_type = {
65494 static const char * const uvcg_format_names[] = {
65495 	"uncompressed",
65496 	"mjpeg",
65497+	"framebased",
65498 };
65499 
65500 enum uvcg_format_type {
65501 	UVCG_UNCOMPRESSED = 0,
65502 	UVCG_MJPEG,
65503+	UVCG_FRAMEBASED,
65504 };
65505 
65506 struct uvcg_format {
65507@@ -1077,9 +1080,15 @@ struct uvcg_frame {
65508 		u16	w_height;
65509 		u32	dw_min_bit_rate;
65510 		u32	dw_max_bit_rate;
65511+		/*
65512+		 * dw_max_video_frame_buffer_size is only for uncompressed and
65513+		 * mjpeg format
65514+		 */
65515 		u32	dw_max_video_frame_buffer_size;
65516 		u32	dw_default_frame_interval;
65517 		u8	b_frame_interval_type;
65518+		/* dw_bytes_perline is only for framebased format */
65519+		u32	dw_bytes_perline;
65520 	} __attribute__((packed)) frame;
65521 	u32 *dw_frame_interval;
65522 };
65523@@ -1190,6 +1199,7 @@ UVCG_FRAME_ATTR(dw_min_bit_rate, dwMinBitRate, 32);
65524 UVCG_FRAME_ATTR(dw_max_bit_rate, dwMaxBitRate, 32);
65525 UVCG_FRAME_ATTR(dw_max_video_frame_buffer_size, dwMaxVideoFrameBufferSize, 32);
65526 UVCG_FRAME_ATTR(dw_default_frame_interval, dwDefaultFrameInterval, 32);
65527+UVCG_FRAME_ATTR(dw_bytes_perline, dwBytesPerLine, 32);
65528 
65529 #undef UVCG_FRAME_ATTR
65530 
65531@@ -1324,7 +1334,7 @@ static ssize_t uvcg_frame_dw_frame_interval_store(struct config_item *item,
65532 
65533 UVC_ATTR(uvcg_frame_, dw_frame_interval, dwFrameInterval);
65534 
65535-static struct configfs_attribute *uvcg_frame_attrs[] = {
65536+static struct configfs_attribute *uvcg_frame_attrs1[] = {
65537 	&uvcg_frame_attr_b_frame_index,
65538 	&uvcg_frame_attr_bm_capabilities,
65539 	&uvcg_frame_attr_w_width,
65540@@ -1337,9 +1347,28 @@ static struct configfs_attribute *uvcg_frame_attrs[] = {
65541 	NULL,
65542 };
65543 
65544-static const struct config_item_type uvcg_frame_type = {
65545+static struct configfs_attribute *uvcg_frame_attrs2[] = {
65546+	&uvcg_frame_attr_b_frame_index,
65547+	&uvcg_frame_attr_bm_capabilities,
65548+	&uvcg_frame_attr_w_width,
65549+	&uvcg_frame_attr_w_height,
65550+	&uvcg_frame_attr_dw_min_bit_rate,
65551+	&uvcg_frame_attr_dw_max_bit_rate,
65552+	&uvcg_frame_attr_dw_default_frame_interval,
65553+	&uvcg_frame_attr_dw_frame_interval,
65554+	&uvcg_frame_attr_dw_bytes_perline,
65555+	NULL,
65556+};
65557+
65558+static const struct config_item_type uvcg_frame_type1 = {
65559 	.ct_item_ops	= &uvcg_config_item_ops,
65560-	.ct_attrs	= uvcg_frame_attrs,
65561+	.ct_attrs	= uvcg_frame_attrs1,
65562+	.ct_owner	= THIS_MODULE,
65563+};
65564+
65565+static const struct config_item_type uvcg_frame_type2 = {
65566+	.ct_item_ops	= &uvcg_config_item_ops,
65567+	.ct_attrs	= uvcg_frame_attrs2,
65568 	.ct_owner	= THIS_MODULE,
65569 };
65570 
65571@@ -1363,6 +1392,7 @@ static struct config_item *uvcg_frame_make(struct config_group *group,
65572 	h->frame.dw_max_bit_rate		= 55296000;
65573 	h->frame.dw_max_video_frame_buffer_size	= 460800;
65574 	h->frame.dw_default_frame_interval	= 666666;
65575+	h->frame.dw_bytes_perline = 0;
65576 
65577 	opts_item = group->cg_item.ci_parent->ci_parent->ci_parent;
65578 	opts = to_f_uvc_opts(opts_item);
65579@@ -1375,6 +1405,9 @@ static struct config_item *uvcg_frame_make(struct config_group *group,
65580 	} else if (fmt->type == UVCG_MJPEG) {
65581 		h->frame.b_descriptor_subtype = UVC_VS_FRAME_MJPEG;
65582 		h->fmt_type = UVCG_MJPEG;
65583+	} else if (fmt->type == UVCG_FRAMEBASED) {
65584+		h->frame.b_descriptor_subtype = UVC_VS_FRAME_FRAME_BASED;
65585+		h->fmt_type = UVCG_FRAMEBASED;
65586 	} else {
65587 		mutex_unlock(&opts->lock);
65588 		kfree(h);
65589@@ -1383,7 +1416,10 @@ static struct config_item *uvcg_frame_make(struct config_group *group,
65590 	++fmt->num_frames;
65591 	mutex_unlock(&opts->lock);
65592 
65593-	config_item_init_type_name(&h->item, name, &uvcg_frame_type);
65594+	if (fmt->type == UVCG_FRAMEBASED)
65595+		config_item_init_type_name(&h->item, name, &uvcg_frame_type2);
65596+	else
65597+		config_item_init_type_name(&h->item, name, &uvcg_frame_type1);
65598 
65599 	return &h->item;
65600 }
65601@@ -1413,7 +1449,8 @@ static void uvcg_format_set_indices(struct config_group *fmt)
65602 	list_for_each_entry(ci, &fmt->cg_children, ci_entry) {
65603 		struct uvcg_frame *frm;
65604 
65605-		if (ci->ci_type != &uvcg_frame_type)
65606+		if (ci->ci_type != &uvcg_frame_type1 &&
65607+		    ci->ci_type != &uvcg_frame_type2)
65608 			continue;
65609 
65610 		frm = to_uvcg_frame(ci);
65611@@ -1856,6 +1893,261 @@ static const struct uvcg_config_group_type uvcg_mjpeg_grp_type = {
65612 	.name = "mjpeg",
65613 };
65614 
65615+/* -----------------------------------------------------------------------------
65616+ * streaming/framebased/<NAME>
65617+ */
65618+
65619+struct uvcg_framebased {
65620+	struct uvcg_format		fmt;
65621+	struct uvc_format_framebased	desc;
65622+};
65623+
65624+static struct uvcg_framebased *to_uvcg_framebased(struct config_item *item)
65625+{
65626+	return container_of(
65627+		container_of(to_config_group(item), struct uvcg_format, group),
65628+		struct uvcg_framebased, fmt);
65629+}
65630+
65631+static struct configfs_group_operations uvcg_framebased_group_ops = {
65632+	.make_item		= uvcg_frame_make,
65633+	.drop_item		= uvcg_frame_drop,
65634+};
65635+
65636+#define UVCG_FRAMEBASED_ATTR_RO(cname, aname, bits)			\
65637+static ssize_t uvcg_framebased_##cname##_show(struct config_item *item,\
65638+					char *page)			\
65639+{									\
65640+	struct uvcg_framebased *u = to_uvcg_framebased(item);		\
65641+	struct f_uvc_opts *opts;					\
65642+	struct config_item *opts_item;					\
65643+	struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex;	\
65644+	int result;							\
65645+									\
65646+	mutex_lock(su_mutex); /* for navigating configfs hierarchy */	\
65647+									\
65648+	opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\
65649+	opts = to_f_uvc_opts(opts_item);				\
65650+									\
65651+	mutex_lock(&opts->lock);					\
65652+	result = sprintf(page, "%u\n", le##bits##_to_cpu(u->desc.aname));\
65653+	mutex_unlock(&opts->lock);					\
65654+									\
65655+	mutex_unlock(su_mutex);						\
65656+	return result;							\
65657+}									\
65658+									\
65659+UVC_ATTR_RO(uvcg_framebased_, cname, aname)
65660+
65661+#define UVCG_FRAMEBASED_ATTR(cname, aname, bits)			\
65662+static ssize_t uvcg_framebased_##cname##_show(struct config_item *item,\
65663+				char *page)\
65664+{									\
65665+	struct uvcg_framebased *u = to_uvcg_framebased(item);		\
65666+	struct f_uvc_opts *opts;					\
65667+	struct config_item *opts_item;					\
65668+	struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex;	\
65669+	int result;							\
65670+									\
65671+	mutex_lock(su_mutex); /* for navigating configfs hierarchy */	\
65672+									\
65673+	opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\
65674+	opts = to_f_uvc_opts(opts_item);				\
65675+									\
65676+	mutex_lock(&opts->lock);					\
65677+	result = sprintf(page, "%u\n", le##bits##_to_cpu(u->desc.aname));\
65678+	mutex_unlock(&opts->lock);					\
65679+									\
65680+	mutex_unlock(su_mutex);						\
65681+	return result;							\
65682+}									\
65683+									\
65684+static ssize_t								\
65685+uvcg_framebased_##cname##_store(struct config_item *item,		\
65686+			   const char *page, size_t len)		\
65687+{									\
65688+	struct uvcg_framebased *u = to_uvcg_framebased(item);		\
65689+	struct f_uvc_opts *opts;					\
65690+	struct config_item *opts_item;					\
65691+	struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex;	\
65692+	int ret;							\
65693+	u8 num;								\
65694+									\
65695+	mutex_lock(su_mutex); /* for navigating configfs hierarchy */	\
65696+									\
65697+	opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\
65698+	opts = to_f_uvc_opts(opts_item);				\
65699+									\
65700+	mutex_lock(&opts->lock);					\
65701+	if (u->fmt.linked || opts->refcnt) {				\
65702+		ret = -EBUSY;						\
65703+		goto end;						\
65704+	}								\
65705+									\
65706+	ret = kstrtou8(page, 0, &num);					\
65707+	if (ret)							\
65708+		goto end;						\
65709+									\
65710+	if (num > 255) {						\
65711+		ret = -EINVAL;						\
65712+		goto end;						\
65713+	}								\
65714+	u->desc.aname = num;						\
65715+	ret = len;							\
65716+end:									\
65717+	mutex_unlock(&opts->lock);					\
65718+	mutex_unlock(su_mutex);						\
65719+	return ret;							\
65720+}									\
65721+									\
65722+UVC_ATTR(uvcg_framebased_, cname, aname)
65723+
65724+UVCG_FRAMEBASED_ATTR_RO(b_format_index, bFormatIndex, 8);
65725+UVCG_FRAMEBASED_ATTR_RO(b_bits_per_pixel, bBitsPerPixel, 8);
65726+UVCG_FRAMEBASED_ATTR(b_default_frame_index, bDefaultFrameIndex, 8);
65727+UVCG_FRAMEBASED_ATTR_RO(b_aspect_ratio_x, bAspectRatioX, 8);
65728+UVCG_FRAMEBASED_ATTR_RO(b_aspect_ratio_y, bAspectRatioY, 8);
65729+UVCG_FRAMEBASED_ATTR_RO(bm_interface_flags, bmInterfaceFlags, 8);
65730+
65731+#undef UVCG_FRAMEBASED_ATTR
65732+#undef UVCG_FRAMEBASED_ATTR_RO
65733+
65734+static ssize_t uvcg_framebased_guid_format_show(struct config_item *item,
65735+							char *page)
65736+{
65737+	struct uvcg_framebased *ch = to_uvcg_framebased(item);
65738+	struct f_uvc_opts *opts;
65739+	struct config_item *opts_item;
65740+	struct mutex *su_mutex = &ch->fmt.group.cg_subsys->su_mutex;
65741+
65742+	mutex_lock(su_mutex); /* for navigating configfs hierarchy */
65743+
65744+	opts_item = ch->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;
65745+	opts = to_f_uvc_opts(opts_item);
65746+
65747+	mutex_lock(&opts->lock);
65748+	memcpy(page, ch->desc.guidFormat, sizeof(ch->desc.guidFormat));
65749+	mutex_unlock(&opts->lock);
65750+
65751+	mutex_unlock(su_mutex);
65752+
65753+	return sizeof(ch->desc.guidFormat);
65754+}
65755+
65756+static ssize_t uvcg_framebased_guid_format_store(struct config_item *item,
65757+						   const char *page, size_t len)
65758+{
65759+	struct uvcg_framebased *ch = to_uvcg_framebased(item);
65760+	struct f_uvc_opts *opts;
65761+	struct config_item *opts_item;
65762+	struct mutex *su_mutex = &ch->fmt.group.cg_subsys->su_mutex;
65763+	int ret;
65764+
65765+	mutex_lock(su_mutex); /* for navigating configfs hierarchy */
65766+
65767+	opts_item = ch->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;
65768+	opts = to_f_uvc_opts(opts_item);
65769+
65770+	mutex_lock(&opts->lock);
65771+	if (ch->fmt.linked || opts->refcnt) {
65772+		ret = -EBUSY;
65773+		goto end;
65774+	}
65775+
65776+	memcpy(ch->desc.guidFormat, page,
65777+	       min(sizeof(ch->desc.guidFormat), len));
65778+	ret = sizeof(ch->desc.guidFormat);
65779+
65780+end:
65781+	mutex_unlock(&opts->lock);
65782+	mutex_unlock(su_mutex);
65783+	return ret;
65784+}
65785+
65786+UVC_ATTR(uvcg_framebased_, guid_format, guidFormat);
65787+
65788+static inline ssize_t
65789+uvcg_framebased_bma_controls_show(struct config_item *item, char *page)
65790+{
65791+	struct uvcg_framebased *u = to_uvcg_framebased(item);
65792+
65793+	return uvcg_format_bma_controls_show(&u->fmt, page);
65794+}
65795+
65796+static inline ssize_t
65797+uvcg_framebased_bma_controls_store(struct config_item *item,
65798+				     const char *page, size_t len)
65799+{
65800+	struct uvcg_framebased *u = to_uvcg_framebased(item);
65801+
65802+	return uvcg_format_bma_controls_store(&u->fmt, page, len);
65803+}
65804+
65805+UVC_ATTR(uvcg_framebased_, bma_controls, bmaControls);
65806+
65807+static struct configfs_attribute *uvcg_framebased_attrs[] = {
65808+	&uvcg_framebased_attr_b_format_index,
65809+	&uvcg_framebased_attr_b_default_frame_index,
65810+	&uvcg_framebased_attr_b_bits_per_pixel,
65811+	&uvcg_framebased_attr_b_aspect_ratio_x,
65812+	&uvcg_framebased_attr_b_aspect_ratio_y,
65813+	&uvcg_framebased_attr_bm_interface_flags,
65814+	&uvcg_framebased_attr_bma_controls,
65815+	&uvcg_framebased_attr_guid_format,
65816+	NULL,
65817+};
65818+
65819+static const struct config_item_type uvcg_framebased_type = {
65820+	.ct_item_ops	= &uvcg_config_item_ops,
65821+	.ct_group_ops	= &uvcg_framebased_group_ops,
65822+	.ct_attrs	= uvcg_framebased_attrs,
65823+	.ct_owner	= THIS_MODULE,
65824+};
65825+
65826+static struct config_group *uvcg_framebased_make(struct config_group *group,
65827+						   const char *name)
65828+{
65829+	static char guid[] = { /*Declear frame frame based as H264*/
65830+		'H',  '2',  '6',  '4', 0x00, 0x00, 0x10, 0x00,
65831+		0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71
65832+	};
65833+	struct uvcg_framebased *f;
65834+
65835+	f = kzalloc(sizeof(*f), GFP_KERNEL);
65836+	if (!f)
65837+		return ERR_PTR(-ENOMEM);
65838+
65839+	f->desc.bLength			= UVC_DT_FORMAT_FRAMEBASED_SIZE;
65840+	f->desc.bDescriptorType		= USB_DT_CS_INTERFACE;
65841+	f->desc.bDescriptorSubType	= UVC_VS_FORMAT_FRAME_BASED;
65842+	memcpy(f->desc.guidFormat, guid, sizeof(guid));
65843+	f->desc.bBitsPerPixel		= 16;
65844+	f->desc.bDefaultFrameIndex	= 1;
65845+	f->desc.bAspectRatioX		= 0;
65846+	f->desc.bAspectRatioY		= 0;
65847+	f->desc.bmInterfaceFlags	= 0;
65848+	f->desc.bCopyProtect		= 0;
65849+	f->desc.bVariableSize		= 1;
65850+
65851+	f->fmt.type = UVCG_FRAMEBASED;
65852+	config_group_init_type_name(&f->fmt.group, name,
65853+				    &uvcg_framebased_type);
65854+
65855+	return &f->fmt.group;
65856+}
65857+
65858+static struct configfs_group_operations uvcg_framebased_grp_ops = {
65859+	.make_group		= uvcg_framebased_make,
65860+};
65861+static const struct uvcg_config_group_type uvcg_framebased_grp_type = {
65862+	.type = {
65863+		.ct_item_ops	= &uvcg_config_item_ops,
65864+		.ct_group_ops	= &uvcg_framebased_grp_ops,
65865+		.ct_owner	= THIS_MODULE,
65866+	},
65867+	.name = "framebased",
65868+};
65869+
65870 /* -----------------------------------------------------------------------------
65871  * streaming/color_matching/default
65872  */
65873@@ -2049,6 +2341,10 @@ static int __uvcg_cnt_strm(void *priv1, void *priv2, void *priv3, int n,
65874 				container_of(fmt, struct uvcg_mjpeg, fmt);
65875 
65876 			*size += sizeof(m->desc);
65877+		} else if (fmt->type == UVCG_FRAMEBASED) {
65878+			struct uvcg_framebased *f =
65879+				container_of(fmt, struct uvcg_framebased, fmt);
65880+			*size += sizeof(f->desc);
65881 		} else {
65882 			return -EINVAL;
65883 		}
65884@@ -2059,6 +2355,11 @@ static int __uvcg_cnt_strm(void *priv1, void *priv2, void *priv3, int n,
65885 		int sz = sizeof(frm->dw_frame_interval);
65886 
65887 		*size += sizeof(frm->frame);
65888+		/*
65889+		 * framebased has duplicate member with uncompressed and
65890+		 * mjpeg, so minus it
65891+		 */
65892+		*size -= sizeof(u32);
65893 		*size += frm->frame.b_frame_interval_type * sz;
65894 	}
65895 	break;
65896@@ -2069,6 +2370,27 @@ static int __uvcg_cnt_strm(void *priv1, void *priv2, void *priv3, int n,
65897 	return 0;
65898 }
65899 
65900+static int __uvcg_copy_framebased_desc(void *dest, struct uvcg_frame *frm,
65901+				       int sz)
65902+{
65903+	struct uvc_frame_framebased *desc = dest;
65904+
65905+	desc->bLength = frm->frame.b_length;
65906+	desc->bDescriptorType = frm->frame.b_descriptor_type;
65907+	desc->bDescriptorSubType = frm->frame.b_descriptor_subtype;
65908+	desc->bFrameIndex = frm->frame.b_frame_index;
65909+	desc->bmCapabilities = frm->frame.bm_capabilities;
65910+	desc->wWidth = frm->frame.w_width;
65911+	desc->wHeight = frm->frame.w_height;
65912+	desc->dwMinBitRate = frm->frame.dw_min_bit_rate;
65913+	desc->dwMaxBitRate = frm->frame.dw_max_bit_rate;
65914+	desc->dwDefaultFrameInterval = frm->frame.dw_default_frame_interval;
65915+	desc->bFrameIntervalType = frm->frame.b_frame_interval_type;
65916+	desc->dwBytesPerLine = frm->frame.dw_bytes_perline;
65917+
65918+	return 0;
65919+}
65920+
65921 /*
65922  * Fill an array of streaming descriptors.
65923  *
65924@@ -2123,6 +2445,15 @@ static int __uvcg_fill_strm(void *priv1, void *priv2, void *priv3, int n,
65925 			m->desc.bNumFrameDescriptors = fmt->num_frames;
65926 			memcpy(*dest, &m->desc, sizeof(m->desc));
65927 			*dest += sizeof(m->desc);
65928+		} else if (fmt->type == UVCG_FRAMEBASED) {
65929+			struct uvcg_framebased *f =
65930+				container_of(fmt, struct uvcg_framebased,
65931+					     fmt);
65932+
65933+			f->desc.bFormatIndex = n + 1;
65934+			f->desc.bNumFrameDescriptors = fmt->num_frames;
65935+			memcpy(*dest, &f->desc, sizeof(f->desc));
65936+			*dest += sizeof(f->desc);
65937 		} else {
65938 			return -EINVAL;
65939 		}
65940@@ -2132,8 +2463,11 @@ static int __uvcg_fill_strm(void *priv1, void *priv2, void *priv3, int n,
65941 		struct uvcg_frame *frm = priv1;
65942 		struct uvc_descriptor_header *h = *dest;
65943 
65944-		sz = sizeof(frm->frame);
65945-		memcpy(*dest, &frm->frame, sz);
65946+		sz = sizeof(frm->frame) - 4;
65947+		if (frm->fmt_type != UVCG_FRAMEBASED)
65948+			memcpy(*dest, &frm->frame, sz);
65949+		else
65950+			__uvcg_copy_framebased_desc(*dest, frm, sz);
65951 		*dest += sz;
65952 		sz = frm->frame.b_frame_interval_type *
65953 			sizeof(*frm->dw_frame_interval);
65954@@ -2145,6 +2479,9 @@ static int __uvcg_fill_strm(void *priv1, void *priv2, void *priv3, int n,
65955 		else if (frm->fmt_type == UVCG_MJPEG)
65956 			h->bLength = UVC_DT_FRAME_MJPEG_SIZE(
65957 				frm->frame.b_frame_interval_type);
65958+		else if (frm->fmt_type == UVCG_FRAMEBASED)
65959+			h->bLength = UVC_DT_FRAME_FRAMEBASED_SIZE(
65960+				 frm->frame.b_frame_interval_type);
65961 	}
65962 	break;
65963 	}
65964@@ -2357,6 +2694,7 @@ static const struct uvcg_config_group_type uvcg_streaming_grp_type = {
65965 		&uvcg_streaming_header_grp_type,
65966 		&uvcg_uncompressed_grp_type,
65967 		&uvcg_mjpeg_grp_type,
65968+		&uvcg_framebased_grp_type,
65969 		&uvcg_color_matching_grp_type,
65970 		&uvcg_streaming_class_grp_type,
65971 		NULL,
65972@@ -2424,16 +2762,22 @@ end:									\
65973 									\
65974 UVC_ATTR(f_uvc_opts_, cname, cname)
65975 
65976+UVCG_OPTS_ATTR(streaming_bulk, streaming_bulk, 1);
65977 UVCG_OPTS_ATTR(streaming_interval, streaming_interval, 16);
65978 UVCG_OPTS_ATTR(streaming_maxpacket, streaming_maxpacket, 3072);
65979 UVCG_OPTS_ATTR(streaming_maxburst, streaming_maxburst, 15);
65980+UVCG_OPTS_ATTR(uvc_num_request, uvc_num_request, UVC_MAX_NUM_REQUESTS);
65981+UVCG_OPTS_ATTR(pm_qos_latency, pm_qos_latency, PM_QOS_LATENCY_ANY);
65982 
65983 #undef UVCG_OPTS_ATTR
65984 
65985 static struct configfs_attribute *uvc_attrs[] = {
65986+	&f_uvc_opts_attr_streaming_bulk,
65987 	&f_uvc_opts_attr_streaming_interval,
65988 	&f_uvc_opts_attr_streaming_maxpacket,
65989 	&f_uvc_opts_attr_streaming_maxburst,
65990+	&f_uvc_opts_attr_uvc_num_request,
65991+	&f_uvc_opts_attr_pm_qos_latency,
65992 	NULL,
65993 };
65994 
65995diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c
65996index cab1e3046..0f5e6fb93 100644
65997--- a/drivers/usb/gadget/function/uvc_queue.c
65998+++ b/drivers/usb/gadget/function/uvc_queue.c
65999@@ -124,6 +124,14 @@ int uvcg_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
66000 	queue->queue.mem_ops = &vb2_vmalloc_memops;
66001 	queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
66002 				     | V4L2_BUF_FLAG_TSTAMP_SRC_EOF;
66003+	/*
66004+	 * For rockchip platform, the userspace uvc application
66005+	 * use bytesused == 0 as a way to indicate that the data
66006+	 * is all zero and unused.
66007+	 */
66008+#ifdef CONFIG_ARCH_ROCKCHIP
66009+	queue->queue.allow_zero_bytesused = 1;
66010+#endif
66011 	ret = vb2_queue_init(&queue->queue);
66012 	if (ret)
66013 		return ret;
66014diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
66015index 65abd55ce..2528c8942 100644
66016--- a/drivers/usb/gadget/function/uvc_v4l2.c
66017+++ b/drivers/usb/gadget/function/uvc_v4l2.c
66018@@ -41,6 +41,7 @@ uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data)
66019 	req->length = min_t(unsigned int, uvc->event_length, data->length);
66020 	req->zero = data->length < uvc->event_length;
66021 
66022+	uvc_trace(UVC_TRACE_CONTROL, "%s: req len %d\n", __func__, req->length);
66023 	memcpy(req->buf, data->data, req->length);
66024 
66025 	return usb_ep_queue(cdev->gadget->ep0, req, GFP_KERNEL);
66026@@ -58,6 +59,8 @@ struct uvc_format {
66027 static struct uvc_format uvc_formats[] = {
66028 	{ 16, V4L2_PIX_FMT_YUYV  },
66029 	{ 0,  V4L2_PIX_FMT_MJPEG },
66030+	{ 0,  V4L2_PIX_FMT_H264  },
66031+	{ 0,  V4L2_PIX_FMT_H265  },
66032 };
66033 
66034 static int
66035@@ -201,11 +204,21 @@ uvc_v4l2_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
66036 		return ret;
66037 
66038 	/*
66039-	 * Complete the alternate setting selection setup phase now that
66040-	 * userspace is ready to provide video frames.
66041+	 * Alt settings in an interface are supported only
66042+	 * for ISOC endpoints as there are different alt-
66043+	 * settings for zero-bandwidth and full-bandwidth
66044+	 * cases, but the same is not true for BULK endpoints,
66045+	 * as they have a single alt-setting.
66046 	 */
66047-	uvc_function_setup_continue(uvc);
66048-	uvc->state = UVC_STATE_STREAMING;
66049+	if (!usb_endpoint_xfer_bulk(video->ep->desc)) {
66050+		/*
66051+		 * Complete the alternate setting selection
66052+		 * setup phase now that userspace is ready
66053+		 * to provide video frames.
66054+		 */
66055+		uvc_function_setup_continue(uvc);
66056+		uvc->state = UVC_STATE_STREAMING;
66057+	}
66058 
66059 	return 0;
66060 }
66061@@ -227,56 +240,17 @@ static int
66062 uvc_v4l2_subscribe_event(struct v4l2_fh *fh,
66063 			 const struct v4l2_event_subscription *sub)
66064 {
66065-	struct uvc_device *uvc = video_get_drvdata(fh->vdev);
66066-	struct uvc_file_handle *handle = to_uvc_file_handle(fh);
66067-	int ret;
66068-
66069 	if (sub->type < UVC_EVENT_FIRST || sub->type > UVC_EVENT_LAST)
66070 		return -EINVAL;
66071 
66072-	if (sub->type == UVC_EVENT_SETUP && uvc->func_connected)
66073-		return -EBUSY;
66074-
66075-	ret = v4l2_event_subscribe(fh, sub, 2, NULL);
66076-	if (ret < 0)
66077-		return ret;
66078-
66079-	if (sub->type == UVC_EVENT_SETUP) {
66080-		uvc->func_connected = true;
66081-		handle->is_uvc_app_handle = true;
66082-		uvc_function_connect(uvc);
66083-	}
66084-
66085-	return 0;
66086-}
66087-
66088-static void uvc_v4l2_disable(struct uvc_device *uvc)
66089-{
66090-	uvc_function_disconnect(uvc);
66091-	uvcg_video_enable(&uvc->video, 0);
66092-	uvcg_free_buffers(&uvc->video.queue);
66093-	uvc->func_connected = false;
66094-	wake_up_interruptible(&uvc->func_connected_queue);
66095+	return v4l2_event_subscribe(fh, sub, 2, NULL);
66096 }
66097 
66098 static int
66099 uvc_v4l2_unsubscribe_event(struct v4l2_fh *fh,
66100 			   const struct v4l2_event_subscription *sub)
66101 {
66102-	struct uvc_device *uvc = video_get_drvdata(fh->vdev);
66103-	struct uvc_file_handle *handle = to_uvc_file_handle(fh);
66104-	int ret;
66105-
66106-	ret = v4l2_event_unsubscribe(fh, sub);
66107-	if (ret < 0)
66108-		return ret;
66109-
66110-	if (sub->type == UVC_EVENT_SETUP && handle->is_uvc_app_handle) {
66111-		uvc_v4l2_disable(uvc);
66112-		handle->is_uvc_app_handle = false;
66113-	}
66114-
66115-	return 0;
66116+	return v4l2_event_unsubscribe(fh, sub);
66117 }
66118 
66119 static long
66120@@ -331,6 +305,7 @@ uvc_v4l2_open(struct file *file)
66121 	handle->device = &uvc->video;
66122 	file->private_data = &handle->vfh;
66123 
66124+	uvc_function_connect(uvc);
66125 	return 0;
66126 }
66127 
66128@@ -342,9 +317,11 @@ uvc_v4l2_release(struct file *file)
66129 	struct uvc_file_handle *handle = to_uvc_file_handle(file->private_data);
66130 	struct uvc_video *video = handle->device;
66131 
66132+	uvc_function_disconnect(uvc);
66133+
66134 	mutex_lock(&video->mutex);
66135-	if (handle->is_uvc_app_handle)
66136-		uvc_v4l2_disable(uvc);
66137+	uvcg_video_enable(video, 0);
66138+	uvcg_free_buffers(&video->queue);
66139 	mutex_unlock(&video->mutex);
66140 
66141 	file->private_data = NULL;
66142@@ -390,6 +367,9 @@ const struct v4l2_file_operations uvc_v4l2_fops = {
66143 	.open		= uvc_v4l2_open,
66144 	.release	= uvc_v4l2_release,
66145 	.unlocked_ioctl	= video_ioctl2,
66146+#ifdef CONFIG_COMPAT
66147+	.compat_ioctl32	= video_ioctl2,
66148+#endif
66149 	.mmap		= uvc_v4l2_mmap,
66150 	.poll		= uvc_v4l2_poll,
66151 #ifndef CONFIG_MMU
66152diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
66153index 5ce548c23..cae5c2b62 100644
66154--- a/drivers/usb/gadget/function/uvc_video.c
66155+++ b/drivers/usb/gadget/function/uvc_video.c
66156@@ -12,12 +12,14 @@
66157 #include <linux/usb/ch9.h>
66158 #include <linux/usb/gadget.h>
66159 #include <linux/usb/video.h>
66160+#include <linux/pm_qos.h>
66161 
66162 #include <media/v4l2-dev.h>
66163 
66164 #include "uvc.h"
66165 #include "uvc_queue.h"
66166 #include "uvc_video.h"
66167+#include "u_uvc.h"
66168 
66169 /* --------------------------------------------------------------------------
66170  * Video codecs
66171@@ -87,6 +89,7 @@ uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video,
66172 		video->fid ^= UVC_STREAM_FID;
66173 
66174 		video->payload_size = 0;
66175+		req->zero = 1;
66176 	}
66177 
66178 	if (video->payload_size == video->max_payload_size ||
66179@@ -135,7 +138,7 @@ static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req)
66180 			 ret);
66181 
66182 		/* Isochronous endpoints can't be halted. */
66183-		if (usb_endpoint_xfer_bulk(video->ep->desc))
66184+		if (video->ep->desc && usb_endpoint_xfer_bulk(video->ep->desc))
66185 			usb_ep_set_halt(video->ep);
66186 	}
66187 
66188@@ -176,8 +179,13 @@ static int
66189 uvc_video_free_requests(struct uvc_video *video)
66190 {
66191 	unsigned int i;
66192+	struct uvc_device *uvc;
66193+	struct f_uvc_opts *opts;
66194 
66195-	for (i = 0; i < UVC_NUM_REQUESTS; ++i) {
66196+	uvc = container_of(video, struct uvc_device, video);
66197+	opts = fi_to_f_uvc_opts(uvc->func.fi);
66198+
66199+	for (i = 0; i < opts->uvc_num_request; ++i) {
66200 		if (video->req[i]) {
66201 			usb_ep_free_request(video->ep, video->req[i]);
66202 			video->req[i] = NULL;
66203@@ -200,14 +208,24 @@ uvc_video_alloc_requests(struct uvc_video *video)
66204 	unsigned int req_size;
66205 	unsigned int i;
66206 	int ret = -ENOMEM;
66207+	struct uvc_device *uvc;
66208+	struct f_uvc_opts *opts;
66209+
66210+	uvc = container_of(video, struct uvc_device, video);
66211+	opts = fi_to_f_uvc_opts(uvc->func.fi);
66212 
66213 	BUG_ON(video->req_size);
66214 
66215-	req_size = video->ep->maxpacket
66216-		 * max_t(unsigned int, video->ep->maxburst, 1)
66217-		 * (video->ep->mult);
66218+	if (!usb_endpoint_xfer_bulk(video->ep->desc)) {
66219+		req_size = video->ep->maxpacket
66220+			 * max_t(unsigned int, video->ep->maxburst, 1)
66221+			 * (video->ep->mult);
66222+	} else {
66223+		req_size = video->ep->maxpacket
66224+			 * max_t(unsigned int, video->ep->maxburst, 1);
66225+	}
66226 
66227-	for (i = 0; i < UVC_NUM_REQUESTS; ++i) {
66228+	for (i = 0; i < opts->uvc_num_request; ++i) {
66229 		video->req_buffer[i] = kmalloc(req_size, GFP_KERNEL);
66230 		if (video->req_buffer[i] == NULL)
66231 			goto error;
66232@@ -301,6 +319,8 @@ int uvcg_video_enable(struct uvc_video *video, int enable)
66233 {
66234 	unsigned int i;
66235 	int ret;
66236+	struct uvc_device *uvc;
66237+	struct f_uvc_opts *opts;
66238 
66239 	if (video->ep == NULL) {
66240 		uvcg_info(&video->uvc->func,
66241@@ -308,19 +328,25 @@ int uvcg_video_enable(struct uvc_video *video, int enable)
66242 		return -ENODEV;
66243 	}
66244 
66245+	uvc = container_of(video, struct uvc_device, video);
66246+	opts = fi_to_f_uvc_opts(uvc->func.fi);
66247+
66248 	if (!enable) {
66249 		cancel_work_sync(&video->pump);
66250 		uvcg_queue_cancel(&video->queue, 0);
66251 
66252-		for (i = 0; i < UVC_NUM_REQUESTS; ++i)
66253+		for (i = 0; i < opts->uvc_num_request; ++i)
66254 			if (video->req[i])
66255 				usb_ep_dequeue(video->ep, video->req[i]);
66256 
66257 		uvc_video_free_requests(video);
66258 		uvcg_queue_enable(&video->queue, 0);
66259+		if (cpu_latency_qos_request_active(&uvc->pm_qos))
66260+			cpu_latency_qos_remove_request(&uvc->pm_qos);
66261 		return 0;
66262 	}
66263 
66264+	cpu_latency_qos_add_request(&uvc->pm_qos, opts->pm_qos_latency);
66265 	if ((ret = uvcg_queue_enable(&video->queue, 1)) < 0)
66266 		return ret;
66267 
66268diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
66269index f28e1bbd5..6c8842e54 100644
66270--- a/drivers/usb/gadget/udc/Kconfig
66271+++ b/drivers/usb/gadget/udc/Kconfig
66272@@ -330,7 +330,6 @@ config USB_AMD5536UDC
66273 config USB_FSL_QE
66274 	tristate "Freescale QE/CPM USB Device Controller"
66275 	depends on FSL_SOC && (QUICC_ENGINE || CPM)
66276-	depends on !64BIT || BROKEN
66277 	help
66278 	   Some of Freescale PowerPC processors have a Full Speed
66279 	   QE/CPM2 USB controller, which support device mode with 4
66280diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
66281index 3a3b5a03d..a0c281293 100644
66282--- a/drivers/usb/gadget/udc/core.c
66283+++ b/drivers/usb/gadget/udc/core.c
66284@@ -29,6 +29,7 @@
66285  * @list: for use by the udc class driver
66286  * @vbus: for udcs who care about vbus status, this value is real vbus status;
66287  * for udcs who do not care about vbus status, this value is always true
66288+ * @started: the UDC's started state. True if the UDC had started.
66289  *
66290  * This represents the internal data structure which is used by the UDC-class
66291  * to hold information about udc driver and gadget together.
66292@@ -39,6 +40,7 @@ struct usb_udc {
66293 	struct device			dev;
66294 	struct list_head		list;
66295 	bool				vbus;
66296+	bool				started;
66297 };
66298 
66299 static struct class *udc_class;
66300@@ -87,7 +89,7 @@ EXPORT_SYMBOL_GPL(usb_ep_set_maxpacket_limit);
66301  * configurable, with more generic names like "ep-a".  (remember that for
66302  * USB, "in" means "towards the USB host".)
66303  *
66304- * This routine must be called in process context.
66305+ * This routine may be called in an atomic (interrupt) context.
66306  *
66307  * returns zero, or a negative error code.
66308  */
66309@@ -132,7 +134,7 @@ EXPORT_SYMBOL_GPL(usb_ep_enable);
66310  * gadget drivers must call usb_ep_enable() again before queueing
66311  * requests to the endpoint.
66312  *
66313- * This routine must be called in process context.
66314+ * This routine may be called in an atomic (interrupt) context.
66315  *
66316  * returns zero, or a negative error code.
66317  */
66318@@ -755,7 +757,7 @@ int usb_gadget_deactivate(struct usb_gadget *gadget)
66319 {
66320 	int ret = 0;
66321 
66322-	if (gadget->deactivated)
66323+	if (!gadget || gadget->deactivated)
66324 		goto out;
66325 
66326 	if (gadget->connected) {
66327@@ -1004,6 +1006,25 @@ int usb_gadget_ep_match_desc(struct usb_gadget *gadget,
66328 }
66329 EXPORT_SYMBOL_GPL(usb_gadget_ep_match_desc);
66330 
66331+/**
66332+ * usb_gadget_check_config - checks if the UDC can support the binded
66333+ *	configuration
66334+ * @gadget: controller to check the USB configuration
66335+ *
66336+ * Ensure that a UDC is able to support the requested resources by a
66337+ * configuration, and that there are no resource limitations, such as
66338+ * internal memory allocated to all requested endpoints.
66339+ *
66340+ * Returns zero on success, else a negative errno.
66341+ */
66342+int usb_gadget_check_config(struct usb_gadget *gadget)
66343+{
66344+	if (gadget->ops->check_config)
66345+		return gadget->ops->check_config(gadget);
66346+	return 0;
66347+}
66348+EXPORT_SYMBOL_GPL(usb_gadget_check_config);
66349+
66350 /* ------------------------------------------------------------------------- */
66351 
66352 static void usb_gadget_state_work(struct work_struct *work)
66353@@ -1085,7 +1106,18 @@ EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
66354  */
66355 static inline int usb_gadget_udc_start(struct usb_udc *udc)
66356 {
66357-	return udc->gadget->ops->udc_start(udc->gadget, udc->driver);
66358+	int ret;
66359+
66360+	if (udc->started) {
66361+		dev_err(&udc->dev, "UDC had already started\n");
66362+		return -EBUSY;
66363+	}
66364+
66365+	ret = udc->gadget->ops->udc_start(udc->gadget, udc->driver);
66366+	if (!ret)
66367+		udc->started = true;
66368+
66369+	return ret;
66370 }
66371 
66372 /**
66373@@ -1101,7 +1133,13 @@ static inline int usb_gadget_udc_start(struct usb_udc *udc)
66374  */
66375 static inline void usb_gadget_udc_stop(struct usb_udc *udc)
66376 {
66377+	if (!udc->started) {
66378+		dev_err(&udc->dev, "UDC had already stopped\n");
66379+		return;
66380+	}
66381+
66382 	udc->gadget->ops->udc_stop(udc->gadget);
66383+	udc->started = false;
66384 }
66385 
66386 /**
66387@@ -1117,12 +1155,65 @@ static inline void usb_gadget_udc_stop(struct usb_udc *udc)
66388 static inline void usb_gadget_udc_set_speed(struct usb_udc *udc,
66389 					    enum usb_device_speed speed)
66390 {
66391-	if (udc->gadget->ops->udc_set_speed) {
66392-		enum usb_device_speed s;
66393+	struct usb_gadget *gadget = udc->gadget;
66394+	enum usb_device_speed s;
66395 
66396-		s = min(speed, udc->gadget->max_speed);
66397-		udc->gadget->ops->udc_set_speed(udc->gadget, s);
66398-	}
66399+	if (speed == USB_SPEED_UNKNOWN)
66400+		s = gadget->max_speed;
66401+	else
66402+		s = min(speed, gadget->max_speed);
66403+
66404+	if (s == USB_SPEED_SUPER_PLUS && gadget->ops->udc_set_ssp_rate)
66405+		gadget->ops->udc_set_ssp_rate(gadget, gadget->max_ssp_rate);
66406+	else if (gadget->ops->udc_set_speed)
66407+		gadget->ops->udc_set_speed(gadget, s);
66408+}
66409+
66410+/**
66411+ * usb_gadget_enable_async_callbacks - tell usb device controller to enable asynchronous callbacks
66412+ * @udc: The UDC which should enable async callbacks
66413+ *
66414+ * This routine is used when binding gadget drivers.  It undoes the effect
66415+ * of usb_gadget_disable_async_callbacks(); the UDC driver should enable IRQs
66416+ * (if necessary) and resume issuing callbacks.
66417+ *
66418+ * This routine will always be called in process context.
66419+ */
66420+static inline void usb_gadget_enable_async_callbacks(struct usb_udc *udc)
66421+{
66422+	struct usb_gadget *gadget = udc->gadget;
66423+
66424+	if (gadget->ops->udc_async_callbacks)
66425+		gadget->ops->udc_async_callbacks(gadget, true);
66426+}
66427+
66428+/**
66429+ * usb_gadget_disable_async_callbacks - tell usb device controller to disable asynchronous callbacks
66430+ * @udc: The UDC which should disable async callbacks
66431+ *
66432+ * This routine is used when unbinding gadget drivers.  It prevents a race:
66433+ * The UDC driver doesn't know when the gadget driver's ->unbind callback
66434+ * runs, so unless it is told to disable asynchronous callbacks, it might
66435+ * issue a callback (such as ->disconnect) after the unbind has completed.
66436+ *
66437+ * After this function runs, the UDC driver must suppress all ->suspend,
66438+ * ->resume, ->disconnect, ->reset, and ->setup callbacks to the gadget driver
66439+ * until async callbacks are again enabled.  A simple-minded but effective
66440+ * way to accomplish this is to tell the UDC hardware not to generate any
66441+ * more IRQs.
66442+ *
66443+ * Request completion callbacks must still be issued.  However, it's okay
66444+ * to defer them until the request is cancelled, since the pull-up will be
66445+ * turned off during the time period when async callbacks are disabled.
66446+ *
66447+ * This routine will always be called in process context.
66448+ */
66449+static inline void usb_gadget_disable_async_callbacks(struct usb_udc *udc)
66450+{
66451+	struct usb_gadget *gadget = udc->gadget;
66452+
66453+	if (gadget->ops->udc_async_callbacks)
66454+		gadget->ops->udc_async_callbacks(gadget, false);
66455 }
66456 
66457 /**
66458@@ -1225,6 +1316,8 @@ int usb_add_gadget(struct usb_gadget *gadget)
66459 	udc->gadget = gadget;
66460 	gadget->udc = udc;
66461 
66462+	udc->started = false;
66463+
66464 	mutex_lock(&udc_lock);
66465 	list_add_tail(&udc->list, &udc_list);
66466 
66467@@ -1337,6 +1430,7 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
66468 	kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
66469 
66470 	usb_gadget_disconnect(udc->gadget);
66471+	usb_gadget_disable_async_callbacks(udc);
66472 	if (udc->gadget->irq)
66473 		synchronize_irq(udc->gadget->irq);
66474 	udc->driver->unbind(udc->gadget);
66475@@ -1416,6 +1510,7 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri
66476 		driver->unbind(udc->gadget);
66477 		goto err1;
66478 	}
66479+	usb_gadget_enable_async_callbacks(udc);
66480 	usb_udc_connect_control(udc);
66481 
66482 	kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
66483diff --git a/drivers/usb/gadget/udc/trace.c b/drivers/usb/gadget/udc/trace.c
66484index 7430624c0..e0e617280 100644
66485--- a/drivers/usb/gadget/udc/trace.c
66486+++ b/drivers/usb/gadget/udc/trace.c
66487@@ -8,3 +8,6 @@
66488 
66489 #define CREATE_TRACE_POINTS
66490 #include "trace.h"
66491+
66492+EXPORT_TRACEPOINT_SYMBOL_GPL(usb_gadget_connect);
66493+EXPORT_TRACEPOINT_SYMBOL_GPL(usb_gadget_disconnect);
66494diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
66495index bc731332f..2c7155fae 100644
66496--- a/drivers/usb/host/Makefile
66497+++ b/drivers/usb/host/Makefile
66498@@ -19,9 +19,7 @@ ifneq ($(CONFIG_USB_XHCI_DBGCAP), )
66499 	xhci-hcd-y += xhci-dbgcap.o xhci-dbgtty.o
66500 endif
66501 
66502-ifneq ($(CONFIG_USB_XHCI_MTK), )
66503-	xhci-hcd-y += xhci-mtk-sch.o
66504-endif
66505+xhci-mtk-hcd-y := xhci-mtk.o xhci-mtk-sch.o
66506 
66507 xhci-plat-hcd-y := xhci-plat.o
66508 ifneq ($(CONFIG_USB_XHCI_MVEBU), )
66509@@ -75,7 +73,7 @@ obj-$(CONFIG_USB_XHCI_PCI)	+= xhci-pci.o
66510 obj-$(CONFIG_USB_XHCI_PCI_RENESAS)	+= xhci-pci-renesas.o
66511 obj-$(CONFIG_USB_XHCI_PLATFORM) += xhci-plat-hcd.o
66512 obj-$(CONFIG_USB_XHCI_HISTB)	+= xhci-histb.o
66513-obj-$(CONFIG_USB_XHCI_MTK)	+= xhci-mtk.o
66514+obj-$(CONFIG_USB_XHCI_MTK)	+= xhci-mtk-hcd.o
66515 obj-$(CONFIG_USB_XHCI_TEGRA)	+= xhci-tegra.o
66516 obj-$(CONFIG_USB_SL811_HCD)	+= sl811-hcd.o
66517 obj-$(CONFIG_USB_SL811_CS)	+= sl811_cs.o
66518diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
66519index 2dcfc67f2..3e48737f8 100644
66520--- a/drivers/usb/host/ehci-platform.c
66521+++ b/drivers/usb/host/ehci-platform.c
66522@@ -28,6 +28,7 @@
66523 #include <linux/module.h>
66524 #include <linux/of.h>
66525 #include <linux/platform_device.h>
66526+#include <linux/pm_runtime.h>
66527 #include <linux/reset.h>
66528 #include <linux/sys_soc.h>
66529 #include <linux/timer.h>
66530@@ -56,6 +57,37 @@ struct ehci_platform_priv {
66531 
66532 static const char hcd_name[] = "ehci-platform";
66533 
66534+static void ehci_rockchip_relinquish_port(struct usb_hcd *hcd, int portnum)
66535+{
66536+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
66537+	u32 __iomem *status_reg = &ehci->regs->port_status[--portnum];
66538+	u32 portsc;
66539+
66540+	portsc = ehci_readl(ehci, status_reg);
66541+	portsc &= ~(PORT_OWNER | PORT_RWC_BITS);
66542+
66543+	ehci_writel(ehci, portsc, status_reg);
66544+}
66545+
66546+#define USIC_MICROFRAME_OFFSET	0x90
66547+#define USIC_SCALE_DOWN_OFFSET	0xa0
66548+#define USIC_ENABLE_OFFSET	0xb0
66549+#define USIC_ENABLE		BIT(0)
66550+#define USIC_SCALE_DOWN		BIT(2)
66551+#define USIC_MICROFRAME_COUNT	0x1d4d
66552+
66553+static void ehci_usic_init(struct usb_hcd *hcd)
66554+{
66555+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
66556+
66557+	ehci_writel(ehci, USIC_ENABLE,
66558+		    hcd->regs + USIC_ENABLE_OFFSET);
66559+	ehci_writel(ehci, USIC_MICROFRAME_COUNT,
66560+		    hcd->regs + USIC_MICROFRAME_OFFSET);
66561+	ehci_writel(ehci, USIC_SCALE_DOWN,
66562+		    hcd->regs + USIC_SCALE_DOWN_OFFSET);
66563+}
66564+
66565 static int ehci_platform_reset(struct usb_hcd *hcd)
66566 {
66567 	struct platform_device *pdev = to_platform_device(hcd->self.controller);
66568@@ -303,6 +335,12 @@ static int ehci_platform_probe(struct platform_device *dev)
66569 		if (soc_device_match(quirk_poll_match))
66570 			priv->quirk_poll = true;
66571 
66572+		if (of_machine_is_compatible("rockchip,rk3288") &&
66573+		    of_property_read_bool(dev->dev.of_node,
66574+					  "rockchip-relinquish-port"))
66575+			ehci_platform_hc_driver.relinquish_port =
66576+					  ehci_rockchip_relinquish_port;
66577+
66578 		for (clk = 0; clk < EHCI_MAX_CLKS; clk++) {
66579 			priv->clks[clk] = of_clk_get(dev->dev.of_node, clk);
66580 			if (IS_ERR(priv->clks[clk])) {
66581@@ -351,6 +389,9 @@ static int ehci_platform_probe(struct platform_device *dev)
66582 	}
66583 #endif
66584 
66585+	pm_runtime_set_active(&dev->dev);
66586+	pm_runtime_enable(&dev->dev);
66587+	pm_runtime_get_sync(&dev->dev);
66588 	if (pdata->power_on) {
66589 		err = pdata->power_on(dev);
66590 		if (err < 0)
66591@@ -370,6 +411,9 @@ static int ehci_platform_probe(struct platform_device *dev)
66592 	if (err)
66593 		goto err_power;
66594 
66595+	if (of_usb_get_phy_mode(dev->dev.of_node) == USBPHY_INTERFACE_MODE_HSIC)
66596+		ehci_usic_init(hcd);
66597+
66598 	device_wakeup_enable(hcd->self.controller);
66599 	device_enable_async_suspend(hcd->self.controller);
66600 	platform_set_drvdata(dev, hcd);
66601@@ -383,6 +427,8 @@ static int ehci_platform_probe(struct platform_device *dev)
66602 	if (pdata->power_off)
66603 		pdata->power_off(dev);
66604 err_reset:
66605+	pm_runtime_put_sync(&dev->dev);
66606+	pm_runtime_disable(&dev->dev);
66607 	reset_control_assert(priv->rsts);
66608 err_put_clks:
66609 	while (--clk >= 0)
66610@@ -418,6 +464,9 @@ static int ehci_platform_remove(struct platform_device *dev)
66611 
66612 	usb_put_hcd(hcd);
66613 
66614+	pm_runtime_put_sync(&dev->dev);
66615+	pm_runtime_disable(&dev->dev);
66616+
66617 	if (pdata == &ehci_platform_defaults)
66618 		dev->dev.platform_data = NULL;
66619 
66620diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
66621index 59fd523c5..55e517248 100644
66622--- a/drivers/usb/host/ehci.h
66623+++ b/drivers/usb/host/ehci.h
66624@@ -218,7 +218,13 @@ struct ehci_hcd {			/* one per controller */
66625 	unsigned		frame_index_bug:1; /* MosChip (AKA NetMos) */
66626 	unsigned		need_oc_pp_cycle:1; /* MPC834X port power */
66627 	unsigned		imx28_write_fix:1; /* For Freescale i.MX28 */
66628+	/*
66629+	 * __GENKSYMS__ test is an abi workaround for commit
66630+	 * 7f2d73788d90 ("usb: ehci: handshake CMD_RUN * instead of STS_HALT")
66631+	 */
66632+#ifndef __GENKSYMS__
66633 	unsigned		is_aspeed:1;
66634+#endif
66635 
66636 	/* required for usb32 quirk */
66637 	#define OHCI_CTRL_HCFS          (3 << 6)
66638diff --git a/drivers/usb/host/fotg210.h b/drivers/usb/host/fotg210.h
66639index 67f59517e..6cee40ec6 100644
66640--- a/drivers/usb/host/fotg210.h
66641+++ b/drivers/usb/host/fotg210.h
66642@@ -686,6 +686,11 @@ static inline unsigned fotg210_read_frame_index(struct fotg210_hcd *fotg210)
66643 	return fotg210_readl(fotg210, &fotg210->regs->frame_index);
66644 }
66645 
66646+#define fotg210_itdlen(urb, desc, t) ({			\
66647+	usb_pipein((urb)->pipe) ?				\
66648+	(desc)->length - FOTG210_ITD_LENGTH(t) :			\
66649+	FOTG210_ITD_LENGTH(t);					\
66650+})
66651 /*-------------------------------------------------------------------------*/
66652 
66653 #endif /* __LINUX_FOTG210_H */
66654diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
66655index 4a8456f12..492353678 100644
66656--- a/drivers/usb/host/ohci-platform.c
66657+++ b/drivers/usb/host/ohci-platform.c
66658@@ -32,7 +32,7 @@
66659 #include "ohci.h"
66660 
66661 #define DRIVER_DESC "OHCI generic platform driver"
66662-#define OHCI_MAX_CLKS 3
66663+#define OHCI_MAX_CLKS 4
66664 #define hcd_to_ohci_priv(h) ((struct ohci_platform_priv *)hcd_to_ohci(h)->priv)
66665 
66666 struct ohci_platform_priv {
66667@@ -96,7 +96,7 @@ static int ohci_platform_probe(struct platform_device *dev)
66668 	struct ohci_hcd *ohci;
66669 	int err, irq, clk = 0;
66670 
66671-	if (usb_disabled())
66672+	if (usb_disabled() || of_machine_is_compatible("rockchip,rk3288"))
66673 		return -ENODEV;
66674 
66675 	/*
66676diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
66677index 7bb306741..9fd15d347 100644
66678--- a/drivers/usb/host/xhci-hub.c
66679+++ b/drivers/usb/host/xhci-hub.c
66680@@ -449,8 +449,13 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
66681 	    cmd->status == COMP_COMMAND_RING_STOPPED) {
66682 		xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
66683 		ret = -ETIME;
66684+		goto cmd_cleanup;
66685 	}
66686 
66687+	ret = xhci_vendor_sync_dev_ctx(xhci, slot_id);
66688+	if (ret)
66689+		xhci_warn(xhci, "Sync device context failed, ret=%d\n", ret);
66690+
66691 cmd_cleanup:
66692 	xhci_free_command(xhci, cmd);
66693 	return ret;
66694@@ -676,7 +681,7 @@ static int xhci_exit_test_mode(struct xhci_hcd *xhci)
66695 	}
66696 	pm_runtime_allow(xhci_to_hcd(xhci)->self.controller);
66697 	xhci->test_mode = 0;
66698-	return xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
66699+	return xhci_reset(xhci);
66700 }
66701 
66702 void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,
66703@@ -1002,9 +1007,6 @@ static void xhci_get_usb2_port_status(struct xhci_port *port, u32 *status,
66704 		if (link_state == XDEV_U2)
66705 			*status |= USB_PORT_STAT_L1;
66706 		if (link_state == XDEV_U0) {
66707-			if (bus_state->resume_done[portnum])
66708-				usb_hcd_end_port_resume(&port->rhub->hcd->self,
66709-							portnum);
66710 			bus_state->resume_done[portnum] = 0;
66711 			clear_bit(portnum, &bus_state->resuming_ports);
66712 			if (bus_state->suspended_ports & (1 << portnum)) {
66713@@ -1348,7 +1350,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
66714 				}
66715 				spin_unlock_irqrestore(&xhci->lock, flags);
66716 				if (!wait_for_completion_timeout(&bus_state->u3exit_done[wIndex],
66717-								 msecs_to_jiffies(500)))
66718+								 msecs_to_jiffies(100)))
66719 					xhci_dbg(xhci, "missing U0 port change event for port %d-%d\n",
66720 						 hcd->self.busnum, wIndex + 1);
66721 				spin_lock_irqsave(&xhci->lock, flags);
66722@@ -1561,17 +1563,6 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
66723 
66724 	status = bus_state->resuming_ports;
66725 
66726-	/*
66727-	 * SS devices are only visible to roothub after link training completes.
66728-	 * Keep polling roothubs for a grace period after xHC start
66729-	 */
66730-	if (xhci->run_graceperiod) {
66731-		if (time_before(jiffies, xhci->run_graceperiod))
66732-			status = 1;
66733-		else
66734-			xhci->run_graceperiod = 0;
66735-	}
66736-
66737 	mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
66738 
66739 	/* For each port, did anything change?  If so, set that bit in buf. */
66740@@ -1597,7 +1588,8 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
66741 			status = 1;
66742 	}
66743 	if (!status && !reset_change) {
66744-		xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
66745+		xhci_dbg(xhci, "%s: stopping usb%d port polling\n",
66746+			 __func__, hcd->self.busnum);
66747 		clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
66748 	}
66749 	spin_unlock_irqrestore(&xhci->lock, flags);
66750@@ -1629,7 +1621,8 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
66751 		if (bus_state->resuming_ports ||	/* USB2 */
66752 		    bus_state->port_remote_wakeup) {	/* USB3 */
66753 			spin_unlock_irqrestore(&xhci->lock, flags);
66754-			xhci_dbg(xhci, "suspend failed because a port is resuming\n");
66755+			xhci_dbg(xhci, "usb%d bus suspend to fail because a port is resuming\n",
66756+				 hcd->self.busnum);
66757 			return -EBUSY;
66758 		}
66759 	}
66760@@ -1680,7 +1673,21 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
66761 			t2 &= ~PORT_PLS_MASK;
66762 			t2 |= PORT_LINK_STROBE | XDEV_U3;
66763 			set_bit(port_index, &bus_state->bus_suspended);
66764+		} else if ((xhci->quirks & XHCI_U2_BROKEN_SUSPEND) &&
66765+			   (hcd->speed < HCD_USB3) &&
66766+			   (t1 & PORT_PLS_MASK) == XDEV_U3) {
66767+			/*
66768+			 * Rockchip SNPS xHC 3.0 set USB 2.0 PHY enter
66769+			 * suspend mode from DWC3 core if the suspend
66770+			 * conditions are valid. In this case, it need
66771+			 * to set the bus_suspended bit for USB 2.0, so
66772+			 * that in xhci_bus_resume, it can set the xHC
66773+			 * link state to XDEV_RESUME and send USB resume
66774+			 * signal to USB 2.0 device.
66775+			 */
66776+			set_bit(port_index, &bus_state->bus_suspended);
66777 		}
66778+
66779 		/* USB core sets remote wake mask for USB 3.0 hubs,
66780 		 * including the USB 3.0 roothub, but only if CONFIG_PM
66781 		 * is enabled, so also enable remote wake here.
66782@@ -1735,6 +1742,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
66783 
66784 	return 0;
66785 }
66786+EXPORT_SYMBOL_GPL(xhci_bus_suspend);
66787 
66788 /*
66789  * Workaround for missing Cold Attach Status (CAS) if device re-plugged in S3.
66790@@ -1879,6 +1887,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
66791 	spin_unlock_irqrestore(&xhci->lock, flags);
66792 	return 0;
66793 }
66794+EXPORT_SYMBOL_GPL(xhci_bus_resume);
66795 
66796 unsigned long xhci_get_resuming_ports(struct usb_hcd *hcd)
66797 {
66798diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
66799index a8a9addb4..4324fd31b 100644
66800--- a/drivers/usb/host/xhci-mem.c
66801+++ b/drivers/usb/host/xhci-mem.c
66802@@ -65,7 +65,7 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
66803 	return seg;
66804 }
66805 
66806-static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
66807+void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
66808 {
66809 	if (seg->trbs) {
66810 		dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
66811@@ -74,8 +74,9 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
66812 	kfree(seg->bounce_buf);
66813 	kfree(seg);
66814 }
66815+EXPORT_SYMBOL_GPL(xhci_segment_free);
66816 
66817-static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
66818+void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
66819 				struct xhci_segment *first)
66820 {
66821 	struct xhci_segment *seg;
66822@@ -96,9 +97,9 @@ static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
66823  * DMA address of the next segment.  The caller needs to set any Link TRB
66824  * related flags, such as End TRB, Toggle Cycle, and no snoop.
66825  */
66826-static void xhci_link_segments(struct xhci_segment *prev,
66827-			       struct xhci_segment *next,
66828-			       enum xhci_ring_type type, bool chain_links)
66829+void xhci_link_segments(struct xhci_segment *prev,
66830+			struct xhci_segment *next,
66831+			enum xhci_ring_type type, bool chain_links)
66832 {
66833 	u32 val;
66834 
66835@@ -118,6 +119,7 @@ static void xhci_link_segments(struct xhci_segment *prev,
66836 		prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
66837 	}
66838 }
66839+EXPORT_SYMBOL_GPL(xhci_link_segments);
66840 
66841 /*
66842  * Link the ring to the new segments.
66843@@ -292,6 +294,7 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
66844 
66845 	kfree(ring);
66846 }
66847+EXPORT_SYMBOL_GPL(xhci_ring_free);
66848 
66849 void xhci_initialize_ring_info(struct xhci_ring *ring,
66850 			       unsigned int cycle_state)
66851@@ -316,6 +319,7 @@ void xhci_initialize_ring_info(struct xhci_ring *ring,
66852 	 */
66853 	ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
66854 }
66855+EXPORT_SYMBOL_GPL(xhci_initialize_ring_info);
66856 
66857 /* Allocate segments and link them for a ring */
66858 static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
66859@@ -361,6 +365,54 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
66860 	return 0;
66861 }
66862 
66863+static void xhci_vendor_free_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
66864+{
66865+	struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
66866+
66867+	if (ops && ops->free_container_ctx)
66868+		ops->free_container_ctx(xhci, ctx);
66869+}
66870+
66871+static void xhci_vendor_alloc_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
66872+					    int type, gfp_t flags)
66873+{
66874+	struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
66875+
66876+	if (ops && ops->alloc_container_ctx)
66877+		ops->alloc_container_ctx(xhci, ctx, type, flags);
66878+}
66879+
66880+static struct xhci_ring *xhci_vendor_alloc_transfer_ring(struct xhci_hcd *xhci,
66881+		u32 endpoint_type, enum xhci_ring_type ring_type,
66882+		unsigned int max_packet, gfp_t mem_flags)
66883+{
66884+	struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
66885+
66886+	if (ops && ops->alloc_transfer_ring)
66887+		return ops->alloc_transfer_ring(xhci, endpoint_type, ring_type,
66888+				max_packet, mem_flags);
66889+	return 0;
66890+}
66891+
66892+void xhci_vendor_free_transfer_ring(struct xhci_hcd *xhci,
66893+		struct xhci_virt_device *virt_dev, unsigned int ep_index)
66894+{
66895+	struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
66896+
66897+	if (ops && ops->free_transfer_ring)
66898+		ops->free_transfer_ring(xhci, virt_dev, ep_index);
66899+}
66900+
66901+bool xhci_vendor_is_usb_offload_enabled(struct xhci_hcd *xhci,
66902+		struct xhci_virt_device *virt_dev, unsigned int ep_index)
66903+{
66904+	struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
66905+
66906+	if (ops && ops->is_usb_offload_enabled)
66907+		return ops->is_usb_offload_enabled(xhci, virt_dev, ep_index);
66908+	return false;
66909+}
66910+
66911 /*
66912  * Create a new ring with zero or more segments.
66913  *
66914@@ -407,12 +459,17 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
66915 	kfree(ring);
66916 	return NULL;
66917 }
66918+EXPORT_SYMBOL_GPL(xhci_ring_alloc);
66919 
66920 void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
66921 		struct xhci_virt_device *virt_dev,
66922 		unsigned int ep_index)
66923 {
66924-	xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
66925+	if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, ep_index))
66926+		xhci_vendor_free_transfer_ring(xhci, virt_dev, ep_index);
66927+	else
66928+		xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
66929+
66930 	virt_dev->eps[ep_index].ring = NULL;
66931 }
66932 
66933@@ -471,6 +528,7 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
66934 {
66935 	struct xhci_container_ctx *ctx;
66936 	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
66937+	struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
66938 
66939 	if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
66940 		return NULL;
66941@@ -484,7 +542,12 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
66942 	if (type == XHCI_CTX_TYPE_INPUT)
66943 		ctx->size += CTX_SIZE(xhci->hcc_params);
66944 
66945-	ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma);
66946+	if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0) &&
66947+	    (ops && ops->alloc_container_ctx))
66948+		xhci_vendor_alloc_container_ctx(xhci, ctx, type, flags);
66949+	else
66950+		ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma);
66951+
66952 	if (!ctx->bytes) {
66953 		kfree(ctx);
66954 		return NULL;
66955@@ -495,9 +558,16 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
66956 void xhci_free_container_ctx(struct xhci_hcd *xhci,
66957 			     struct xhci_container_ctx *ctx)
66958 {
66959+	struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
66960+
66961 	if (!ctx)
66962 		return;
66963-	dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
66964+	if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0) &&
66965+	    (ops && ops->free_container_ctx))
66966+		xhci_vendor_free_container_ctx(xhci, ctx);
66967+	else
66968+		dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
66969+
66970 	kfree(ctx);
66971 }
66972 
66973@@ -519,6 +589,7 @@ struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
66974 	return (struct xhci_slot_ctx *)
66975 		(ctx->bytes + CTX_SIZE(xhci->hcc_params));
66976 }
66977+EXPORT_SYMBOL_GPL(xhci_get_slot_ctx);
66978 
66979 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
66980 				    struct xhci_container_ctx *ctx,
66981@@ -532,6 +603,7 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
66982 	return (struct xhci_ep_ctx *)
66983 		(ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
66984 }
66985+EXPORT_SYMBOL_GPL(xhci_get_ep_ctx);
66986 
66987 
66988 /***************** Streams structures manipulation *************************/
66989@@ -592,23 +664,6 @@ struct xhci_ring *xhci_dma_to_transfer_ring(
66990 	return ep->ring;
66991 }
66992 
66993-struct xhci_ring *xhci_stream_id_to_ring(
66994-		struct xhci_virt_device *dev,
66995-		unsigned int ep_index,
66996-		unsigned int stream_id)
66997-{
66998-	struct xhci_virt_ep *ep = &dev->eps[ep_index];
66999-
67000-	if (stream_id == 0)
67001-		return ep->ring;
67002-	if (!ep->stream_info)
67003-		return NULL;
67004-
67005-	if (stream_id >= ep->stream_info->num_streams)
67006-		return NULL;
67007-	return ep->stream_info->stream_rings[stream_id];
67008-}
67009-
67010 /*
67011  * Change an endpoint's internal structure so it supports stream IDs.  The
67012  * number of requested streams includes stream 0, which cannot be used by device
67013@@ -659,7 +714,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
67014 			num_stream_ctxs, &stream_info->ctx_array_dma,
67015 			mem_flags);
67016 	if (!stream_info->stream_ctx_array)
67017-		goto cleanup_ring_array;
67018+		goto cleanup_ctx;
67019 	memset(stream_info->stream_ctx_array, 0,
67020 			sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
67021 
67022@@ -720,11 +775,6 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
67023 	}
67024 	xhci_free_command(xhci, stream_info->free_streams_command);
67025 cleanup_ctx:
67026-	xhci_free_stream_ctx(xhci,
67027-		stream_info->num_stream_ctxs,
67028-		stream_info->stream_ctx_array,
67029-		stream_info->ctx_array_dma);
67030-cleanup_ring_array:
67031 	kfree(stream_info->stream_rings);
67032 cleanup_info:
67033 	kfree(stream_info);
67034@@ -911,23 +961,19 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
67035 
67036 	for (i = 0; i < 31; i++) {
67037 		if (dev->eps[i].ring)
67038-			xhci_ring_free(xhci, dev->eps[i].ring);
67039+			xhci_free_endpoint_ring(xhci, dev, i);
67040 		if (dev->eps[i].stream_info)
67041 			xhci_free_stream_info(xhci,
67042 					dev->eps[i].stream_info);
67043-		/*
67044-		 * Endpoints are normally deleted from the bandwidth list when
67045-		 * endpoints are dropped, before device is freed.
67046-		 * If host is dying or being removed then endpoints aren't
67047-		 * dropped cleanly, so delete the endpoint from list here.
67048-		 * Only applicable for hosts with software bandwidth checking.
67049+		/* Endpoints on the TT/root port lists should have been removed
67050+		 * when usb_disable_device() was called for the device.
67051+		 * We can't drop them anyway, because the udev might have gone
67052+		 * away by this point, and we can't tell what speed it was.
67053 		 */
67054-
67055-		if (!list_empty(&dev->eps[i].bw_endpoint_list)) {
67056-			list_del_init(&dev->eps[i].bw_endpoint_list);
67057-			xhci_dbg(xhci, "Slot %u endpoint %u not removed from BW list!\n",
67058-				 slot_id, i);
67059-		}
67060+		if (!list_empty(&dev->eps[i].bw_endpoint_list))
67061+			xhci_warn(xhci, "Slot %u endpoint %u "
67062+					"not removed from BW list!\n",
67063+					slot_id, i);
67064 	}
67065 	/* If this is a hub, free the TT(s) from the TT list */
67066 	xhci_free_tt_info(xhci, dev, slot_id);
67067@@ -1514,8 +1560,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
67068 		mult = 0;
67069 
67070 	/* Set up the endpoint ring */
67071-	virt_dev->eps[ep_index].new_ring =
67072-		xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
67073+	if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, ep_index) &&
67074+	    usb_endpoint_xfer_isoc(&ep->desc)) {
67075+		virt_dev->eps[ep_index].new_ring =
67076+			xhci_vendor_alloc_transfer_ring(xhci, endpoint_type, ring_type,
67077+							max_packet, mem_flags);
67078+	} else {
67079+		virt_dev->eps[ep_index].new_ring =
67080+			xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
67081+	}
67082+
67083 	if (!virt_dev->eps[ep_index].new_ring)
67084 		return -ENOMEM;
67085 
67086@@ -1782,6 +1836,7 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
67087 	INIT_LIST_HEAD(&command->cmd_list);
67088 	return command;
67089 }
67090+EXPORT_SYMBOL_GPL(xhci_alloc_command);
67091 
67092 struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci,
67093 		bool allocate_completion, gfp_t mem_flags)
67094@@ -1815,6 +1870,7 @@ void xhci_free_command(struct xhci_hcd *xhci,
67095 	kfree(command->completion);
67096 	kfree(command);
67097 }
67098+EXPORT_SYMBOL_GPL(xhci_free_command);
67099 
67100 int xhci_alloc_erst(struct xhci_hcd *xhci,
67101 		    struct xhci_ring *evt_ring,
67102@@ -1845,6 +1901,7 @@ int xhci_alloc_erst(struct xhci_hcd *xhci,
67103 
67104 	return 0;
67105 }
67106+EXPORT_SYMBOL_GPL(xhci_alloc_erst);
67107 
67108 void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
67109 {
67110@@ -1858,6 +1915,25 @@ void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
67111 				erst->erst_dma_addr);
67112 	erst->entries = NULL;
67113 }
67114+EXPORT_SYMBOL_GPL(xhci_free_erst);
67115+
67116+static struct xhci_device_context_array *xhci_vendor_alloc_dcbaa(
67117+		struct xhci_hcd *xhci, gfp_t flags)
67118+{
67119+	struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
67120+
67121+	if (ops && ops->alloc_dcbaa)
67122+		return ops->alloc_dcbaa(xhci, flags);
67123+	return 0;
67124+}
67125+
67126+static void xhci_vendor_free_dcbaa(struct xhci_hcd *xhci)
67127+{
67128+	struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
67129+
67130+	if (ops && ops->free_dcbaa)
67131+		ops->free_dcbaa(xhci);
67132+}
67133 
67134 void xhci_mem_cleanup(struct xhci_hcd *xhci)
67135 {
67136@@ -1913,9 +1989,13 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
67137 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
67138 			"Freed medium stream array pool");
67139 
67140-	if (xhci->dcbaa)
67141-		dma_free_coherent(dev, sizeof(*xhci->dcbaa),
67142-				xhci->dcbaa, xhci->dcbaa->dma);
67143+	if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0)) {
67144+		xhci_vendor_free_dcbaa(xhci);
67145+	} else {
67146+		if (xhci->dcbaa)
67147+			dma_free_coherent(dev, sizeof(*xhci->dcbaa),
67148+					xhci->dcbaa, xhci->dcbaa->dma);
67149+	}
67150 	xhci->dcbaa = NULL;
67151 
67152 	scratchpad_free(xhci);
67153@@ -1996,7 +2076,7 @@ static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
67154 }
67155 
67156 /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
67157-static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
67158+int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
67159 {
67160 	struct {
67161 		dma_addr_t		input_dma;
67162@@ -2116,6 +2196,7 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
67163 	xhci_dbg(xhci, "TRB math tests passed.\n");
67164 	return 0;
67165 }
67166+EXPORT_SYMBOL_GPL(xhci_check_trb_in_td_math);
67167 
67168 static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
67169 {
67170@@ -2455,15 +2536,21 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
67171 	 * xHCI section 5.4.6 - doorbell array must be
67172 	 * "physically contiguous and 64-byte (cache line) aligned".
67173 	 */
67174-	xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
67175-			flags);
67176-	if (!xhci->dcbaa)
67177-		goto fail;
67178-	xhci->dcbaa->dma = dma;
67179+	if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0)) {
67180+		xhci->dcbaa = xhci_vendor_alloc_dcbaa(xhci, flags);
67181+		if (!xhci->dcbaa)
67182+			goto fail;
67183+	} else {
67184+		xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
67185+				flags);
67186+		if (!xhci->dcbaa)
67187+			goto fail;
67188+		xhci->dcbaa->dma = dma;
67189+	}
67190 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
67191 			"// Device context base array address = 0x%llx (DMA), %p (virt)",
67192 			(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
67193-	xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
67194+	xhci_write_64(xhci, xhci->dcbaa->dma, &xhci->op_regs->dcbaa_ptr);
67195 
67196 	/*
67197 	 * Initialize the ring segment pool.  The ring must be a contiguous
67198@@ -2608,7 +2695,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
67199 
67200 fail:
67201 	xhci_halt(xhci);
67202-	xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
67203+	xhci_reset(xhci);
67204 	xhci_mem_cleanup(xhci);
67205 	return -ENOMEM;
67206 }
67207diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
67208index aff65cefe..dafb58f05 100644
67209--- a/drivers/usb/host/xhci-pci.c
67210+++ b/drivers/usb/host/xhci-pci.c
67211@@ -58,13 +58,20 @@
67212 #define PCI_DEVICE_ID_INTEL_CML_XHCI			0xa3af
67213 #define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI		0x9a13
67214 #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI		0x1138
67215-#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI		0x51ed
67216-#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI	0x54ed
67217+#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI		0x461e
67218 
67219 #define PCI_DEVICE_ID_AMD_PROMONTORYA_4			0x43b9
67220 #define PCI_DEVICE_ID_AMD_PROMONTORYA_3			0x43ba
67221 #define PCI_DEVICE_ID_AMD_PROMONTORYA_2			0x43bb
67222 #define PCI_DEVICE_ID_AMD_PROMONTORYA_1			0x43bc
67223+#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_1		0x161a
67224+#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_2		0x161b
67225+#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_3		0x161d
67226+#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4		0x161e
67227+#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5		0x15d6
67228+#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6		0x15d7
67229+#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_7		0x161c
67230+#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_8		0x161f
67231 
67232 #define PCI_DEVICE_ID_ASMEDIA_1042_XHCI			0x1042
67233 #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI		0x1142
67234@@ -77,12 +84,9 @@ static const char hcd_name[] = "xhci_hcd";
67235 static struct hc_driver __read_mostly xhci_pci_hc_driver;
67236 
67237 static int xhci_pci_setup(struct usb_hcd *hcd);
67238-static int xhci_pci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
67239-				      struct usb_tt *tt, gfp_t mem_flags);
67240 
67241 static const struct xhci_driver_overrides xhci_pci_overrides __initconst = {
67242 	.reset = xhci_pci_setup,
67243-	.update_hub_device = xhci_pci_update_hub_device,
67244 };
67245 
67246 /* called after powerup, by probe or system-pm "wakeup" */
67247@@ -245,11 +249,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
67248 	     pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
67249 		xhci->quirks |= XHCI_MISSING_CAS;
67250 
67251-	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
67252-	    (pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI ||
67253-	     pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI))
67254-		xhci->quirks |= XHCI_RESET_TO_DEFAULT;
67255-
67256 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
67257 	    (pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI ||
67258 	     pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_XHCI ||
67259@@ -261,7 +260,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
67260 	     pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI ||
67261 	     pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI ||
67262 	     pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI ||
67263-	     pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI))
67264+	     pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI ||
67265+	     pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI))
67266 		xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
67267 
67268 	if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
67269@@ -294,14 +294,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
67270 	}
67271 
67272 	if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
67273-		pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) {
67274-		/*
67275-		 * try to tame the ASMedia 1042 controller which reports 0.96
67276-		 * but appears to behave more like 1.0
67277-		 */
67278-		xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
67279+		pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI)
67280 		xhci->quirks |= XHCI_BROKEN_STREAMS;
67281-	}
67282 	if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
67283 		pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) {
67284 		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
67285@@ -330,8 +324,15 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
67286 	     pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4))
67287 		xhci->quirks |= XHCI_NO_SOFT_RETRY;
67288 
67289-	/* xHC spec requires PCI devices to support D3hot and D3cold */
67290-	if (xhci->hci_version >= 0x120)
67291+	if (pdev->vendor == PCI_VENDOR_ID_AMD &&
67292+	    (pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_1 ||
67293+	    pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_2 ||
67294+	    pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_3 ||
67295+	    pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4 ||
67296+	    pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5 ||
67297+	    pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6 ||
67298+	    pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_7 ||
67299+	    pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_8))
67300 		xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
67301 
67302 	if (xhci->quirks & XHCI_RESET_ON_RESUME)
67303@@ -351,38 +352,8 @@ static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
67304 				NULL);
67305 	ACPI_FREE(obj);
67306 }
67307-
67308-static void xhci_find_lpm_incapable_ports(struct usb_hcd *hcd, struct usb_device *hdev)
67309-{
67310-	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
67311-	struct xhci_hub *rhub = &xhci->usb3_rhub;
67312-	int ret;
67313-	int i;
67314-
67315-	/* This is not the usb3 roothub we are looking for */
67316-	if (hcd != rhub->hcd)
67317-		return;
67318-
67319-	if (hdev->maxchild > rhub->num_ports) {
67320-		dev_err(&hdev->dev, "USB3 roothub port number mismatch\n");
67321-		return;
67322-	}
67323-
67324-	for (i = 0; i < hdev->maxchild; i++) {
67325-		ret = usb_acpi_port_lpm_incapable(hdev, i);
67326-
67327-		dev_dbg(&hdev->dev, "port-%d disable U1/U2 _DSM: %d\n", i + 1, ret);
67328-
67329-		if (ret >= 0) {
67330-			rhub->ports[i]->lpm_incapable = ret;
67331-			continue;
67332-		}
67333-	}
67334-}
67335-
67336 #else
67337 static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
67338-static void xhci_find_lpm_incapable_ports(struct usb_hcd *hcd, struct usb_device *hdev) { }
67339 #endif /* CONFIG_ACPI */
67340 
67341 /* called during probe() after chip reset completes */
67342@@ -415,16 +386,6 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
67343 	return xhci_pci_reinit(xhci, pdev);
67344 }
67345 
67346-static int xhci_pci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
67347-				      struct usb_tt *tt, gfp_t mem_flags)
67348-{
67349-	/* Check if acpi claims some USB3 roothub ports are lpm incapable */
67350-	if (!hdev->parent)
67351-		xhci_find_lpm_incapable_ports(hcd, hdev);
67352-
67353-	return xhci_update_hub_device(hcd, hdev, tt, mem_flags);
67354-}
67355-
67356 /*
67357  * We need to register our own PCI probe function (instead of the USB core's
67358  * function) in order to create a second roothub under xHCI.
67359@@ -494,8 +455,6 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
67360 	if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
67361 		pm_runtime_allow(&dev->dev);
67362 
67363-	dma_set_max_seg_size(&dev->dev, UINT_MAX);
67364-
67365 	return 0;
67366 
67367 put_usb3_hcd:
67368diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
67369index 972a44b2a..6384c895d 100644
67370--- a/drivers/usb/host/xhci-plat.c
67371+++ b/drivers/usb/host/xhci-plat.c
67372@@ -134,7 +134,7 @@ static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = {
67373 };
67374 
67375 static const struct xhci_plat_priv xhci_plat_brcm = {
67376-	.quirks = XHCI_RESET_ON_RESUME | XHCI_SUSPEND_RESUME_CLKS,
67377+	.quirks = XHCI_RESET_ON_RESUME,
67378 };
67379 
67380 static const struct of_device_id usb_xhci_of_match[] = {
67381@@ -184,6 +184,41 @@ static const struct of_device_id usb_xhci_of_match[] = {
67382 MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
67383 #endif
67384 
67385+static struct xhci_plat_priv_overwrite xhci_plat_vendor_overwrite;
67386+
67387+int xhci_plat_register_vendor_ops(struct xhci_vendor_ops *vendor_ops)
67388+{
67389+	if (vendor_ops == NULL)
67390+		return -EINVAL;
67391+
67392+	xhci_plat_vendor_overwrite.vendor_ops = vendor_ops;
67393+
67394+	return 0;
67395+}
67396+EXPORT_SYMBOL_GPL(xhci_plat_register_vendor_ops);
67397+
67398+static int xhci_vendor_init(struct xhci_hcd *xhci)
67399+{
67400+	struct xhci_vendor_ops *ops = NULL;
67401+
67402+	if (xhci_plat_vendor_overwrite.vendor_ops)
67403+		ops = xhci->vendor_ops = xhci_plat_vendor_overwrite.vendor_ops;
67404+
67405+	if (ops && ops->vendor_init)
67406+		return ops->vendor_init(xhci);
67407+	return 0;
67408+}
67409+
67410+static void xhci_vendor_cleanup(struct xhci_hcd *xhci)
67411+{
67412+	struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
67413+
67414+	if (ops && ops->vendor_cleanup)
67415+		ops->vendor_cleanup(xhci);
67416+
67417+	xhci->vendor_ops = NULL;
67418+}
67419+
67420 static int xhci_plat_probe(struct platform_device *pdev)
67421 {
67422 	const struct xhci_plat_priv *priv_match;
67423@@ -323,6 +358,9 @@ static int xhci_plat_probe(struct platform_device *pdev)
67424 		if (device_property_read_bool(tmpdev, "quirk-broken-port-ped"))
67425 			xhci->quirks |= XHCI_BROKEN_PORT_PED;
67426 
67427+		if (device_property_read_bool(tmpdev, "quirk-skip-phy-init"))
67428+			xhci->quirks |= XHCI_SKIP_PHY_INIT;
67429+
67430 		device_property_read_u32(tmpdev, "imod-interval-ns",
67431 					 &xhci->imod_interval);
67432 	}
67433@@ -339,6 +377,10 @@ static int xhci_plat_probe(struct platform_device *pdev)
67434 			goto put_usb3_hcd;
67435 	}
67436 
67437+	ret = xhci_vendor_init(xhci);
67438+	if (ret)
67439+		goto disable_usb_phy;
67440+
67441 	hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
67442 	xhci->shared_hcd->tpl_support = hcd->tpl_support;
67443 
67444@@ -418,8 +460,10 @@ static int xhci_plat_remove(struct platform_device *dev)
67445 	usb_phy_shutdown(hcd->usb_phy);
67446 
67447 	usb_remove_hcd(hcd);
67448-	usb_put_hcd(shared_hcd);
67449 
67450+	xhci_vendor_cleanup(xhci);
67451+
67452+	usb_put_hcd(shared_hcd);
67453 	clk_disable_unprepare(clk);
67454 	clk_disable_unprepare(reg_clk);
67455 	usb_put_hcd(hcd);
67456@@ -437,9 +481,6 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev)
67457 	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
67458 	int ret;
67459 
67460-	if (pm_runtime_suspended(dev))
67461-		pm_runtime_resume(dev);
67462-
67463 	ret = xhci_priv_suspend_quirk(hcd);
67464 	if (ret)
67465 		return ret;
67466@@ -447,16 +488,7 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev)
67467 	 * xhci_suspend() needs `do_wakeup` to know whether host is allowed
67468 	 * to do wakeup during suspend.
67469 	 */
67470-	ret = xhci_suspend(xhci, device_may_wakeup(dev));
67471-	if (ret)
67472-		return ret;
67473-
67474-	if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) {
67475-		clk_disable_unprepare(xhci->clk);
67476-		clk_disable_unprepare(xhci->reg_clk);
67477-	}
67478-
67479-	return 0;
67480+	return xhci_suspend(xhci, device_may_wakeup(dev));
67481 }
67482 
67483 static int __maybe_unused xhci_plat_resume(struct device *dev)
67484@@ -465,11 +497,6 @@ static int __maybe_unused xhci_plat_resume(struct device *dev)
67485 	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
67486 	int ret;
67487 
67488-	if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) {
67489-		clk_prepare_enable(xhci->clk);
67490-		clk_prepare_enable(xhci->reg_clk);
67491-	}
67492-
67493 	ret = xhci_priv_resume_quirk(hcd);
67494 	if (ret)
67495 		return ret;
67496diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
67497index 561d0b7bc..e726a5723 100644
67498--- a/drivers/usb/host/xhci-plat.h
67499+++ b/drivers/usb/host/xhci-plat.h
67500@@ -13,6 +13,7 @@
67501 struct xhci_plat_priv {
67502 	const char *firmware_name;
67503 	unsigned long long quirks;
67504+	struct xhci_vendor_data *vendor_data;
67505 	int (*plat_setup)(struct usb_hcd *);
67506 	void (*plat_start)(struct usb_hcd *);
67507 	int (*init_quirk)(struct usb_hcd *);
67508@@ -22,4 +23,11 @@ struct xhci_plat_priv {
67509 
67510 #define hcd_to_xhci_priv(h) ((struct xhci_plat_priv *)hcd_to_xhci(h)->priv)
67511 #define xhci_to_priv(x) ((struct xhci_plat_priv *)(x)->priv)
67512+
67513+struct xhci_plat_priv_overwrite {
67514+	struct xhci_vendor_ops *vendor_ops;
67515+};
67516+
67517+int xhci_plat_register_vendor_ops(struct xhci_vendor_ops *vendor_ops);
67518+
67519 #endif	/* _XHCI_PLAT_H */
67520diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
67521index b69b8c7e7..eb148a362 100644
67522--- a/drivers/usb/host/xhci-ring.c
67523+++ b/drivers/usb/host/xhci-ring.c
67524@@ -57,7 +57,10 @@
67525 #include <linux/dma-mapping.h>
67526 #include "xhci.h"
67527 #include "xhci-trace.h"
67528-#include "xhci-mtk.h"
67529+
67530+static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
67531+			 u32 field1, u32 field2,
67532+			 u32 field3, u32 field4, bool command_must_succeed);
67533 
67534 /*
67535  * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
67536@@ -76,6 +79,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
67537 		return 0;
67538 	return seg->dma + (segment_offset * sizeof(*trb));
67539 }
67540+EXPORT_SYMBOL_GPL(xhci_trb_virt_to_dma);
67541 
67542 static bool trb_is_noop(union xhci_trb *trb)
67543 {
67544@@ -151,10 +155,11 @@ static void next_trb(struct xhci_hcd *xhci,
67545 
67546 /*
67547  * See Cycle bit rules. SW is the consumer for the event ring only.
67548- * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
67549  */
67550 void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
67551 {
67552+	unsigned int link_trb_count = 0;
67553+
67554 	/* event ring doesn't have link trbs, check for last trb */
67555 	if (ring->type == TYPE_EVENT) {
67556 		if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
67557@@ -170,14 +175,23 @@ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
67558 
67559 	/* All other rings have link trbs */
67560 	if (!trb_is_link(ring->dequeue)) {
67561-		ring->dequeue++;
67562-		ring->num_trbs_free++;
67563+		if (last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
67564+			xhci_warn(xhci, "Missing link TRB at end of segment\n");
67565+		} else {
67566+			ring->dequeue++;
67567+			ring->num_trbs_free++;
67568+		}
67569 	}
67570+
67571 	while (trb_is_link(ring->dequeue)) {
67572 		ring->deq_seg = ring->deq_seg->next;
67573 		ring->dequeue = ring->deq_seg->trbs;
67574-	}
67575 
67576+		if (link_trb_count++ > ring->num_segs) {
67577+			xhci_warn(xhci, "Ring is an endless link TRB loop\n");
67578+			break;
67579+		}
67580+	}
67581 out:
67582 	trace_xhci_inc_deq(ring);
67583 
67584@@ -186,7 +200,6 @@ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
67585 
67586 /*
67587  * See Cycle bit rules. SW is the consumer for the event ring only.
67588- * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
67589  *
67590  * If we've just enqueued a TRB that is in the middle of a TD (meaning the
67591  * chain bit is set), then set the chain bit in all the following link TRBs.
67592@@ -206,11 +219,18 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
67593 {
67594 	u32 chain;
67595 	union xhci_trb *next;
67596+	unsigned int link_trb_count = 0;
67597 
67598 	chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
67599 	/* If this is not event ring, there is one less usable TRB */
67600 	if (!trb_is_link(ring->enqueue))
67601 		ring->num_trbs_free--;
67602+
67603+	if (last_trb_on_seg(ring->enq_seg, ring->enqueue)) {
67604+		xhci_err(xhci, "Tried to move enqueue past ring segment\n");
67605+		return;
67606+	}
67607+
67608 	next = ++(ring->enqueue);
67609 
67610 	/* Update the dequeue pointer further if that was a link TRB */
67611@@ -247,6 +267,11 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
67612 		ring->enq_seg = ring->enq_seg->next;
67613 		ring->enqueue = ring->enq_seg->trbs;
67614 		next = ring->enqueue;
67615+
67616+		if (link_trb_count++ > ring->num_segs) {
67617+			xhci_warn(xhci, "%s: Ring link TRB loop\n", __func__);
67618+			break;
67619+		}
67620 	}
67621 
67622 	trace_xhci_inc_enq(ring);
67623@@ -287,6 +312,7 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
67624 	/* Flush PCI posted writes */
67625 	readl(&xhci->dba->doorbell[0]);
67626 }
67627+EXPORT_SYMBOL_GPL(xhci_ring_cmd_db);
67628 
67629 static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay)
67630 {
67631@@ -421,9 +447,8 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
67632 	trace_xhci_ring_ep_doorbell(slot_id, DB_VALUE(ep_index, stream_id));
67633 
67634 	writel(DB_VALUE(ep_index, stream_id), db_addr);
67635-	/* The CPU has better things to do at this point than wait for a
67636-	 * write-posting flush.  It'll get there soon enough.
67637-	 */
67638+	/* flush the write */
67639+	readl(db_addr);
67640 }
67641 
67642 /* Ring the doorbell for any rings with pending URBs */
67643@@ -479,6 +504,26 @@ static struct xhci_virt_ep *xhci_get_virt_ep(struct xhci_hcd *xhci,
67644 	return &xhci->devs[slot_id]->eps[ep_index];
67645 }
67646 
67647+static struct xhci_ring *xhci_virt_ep_to_ring(struct xhci_hcd *xhci,
67648+					      struct xhci_virt_ep *ep,
67649+					      unsigned int stream_id)
67650+{
67651+	/* common case, no streams */
67652+	if (!(ep->ep_state & EP_HAS_STREAMS))
67653+		return ep->ring;
67654+
67655+	if (!ep->stream_info)
67656+		return NULL;
67657+
67658+	if (stream_id == 0 || stream_id >= ep->stream_info->num_streams) {
67659+		xhci_warn(xhci, "Invalid stream_id %u request for slot_id %u ep_index %u\n",
67660+			  stream_id, ep->vdev->slot_id, ep->ep_index);
67661+		return NULL;
67662+	}
67663+
67664+	return ep->stream_info->stream_rings[stream_id];
67665+}
67666+
67667 /* Get the right ring for the given slot_id, ep_index and stream_id.
67668  * If the endpoint supports streams, boundary check the URB's stream ID.
67669  * If the endpoint doesn't support streams, return the singular endpoint ring.
67670@@ -493,29 +538,7 @@ struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
67671 	if (!ep)
67672 		return NULL;
67673 
67674-	/* Common case: no streams */
67675-	if (!(ep->ep_state & EP_HAS_STREAMS))
67676-		return ep->ring;
67677-
67678-	if (stream_id == 0) {
67679-		xhci_warn(xhci,
67680-				"WARN: Slot ID %u, ep index %u has streams, "
67681-				"but URB has no stream ID.\n",
67682-				slot_id, ep_index);
67683-		return NULL;
67684-	}
67685-
67686-	if (stream_id < ep->stream_info->num_streams)
67687-		return ep->stream_info->stream_rings[stream_id];
67688-
67689-	xhci_warn(xhci,
67690-			"WARN: Slot ID %u, ep index %u has "
67691-			"stream IDs 1 to %u allocated, "
67692-			"but stream ID %u is requested.\n",
67693-			slot_id, ep_index,
67694-			ep->stream_info->num_streams - 1,
67695-			stream_id);
67696-	return NULL;
67697+	return xhci_virt_ep_to_ring(xhci, ep, stream_id);
67698 }
67699 
67700 
67701@@ -542,71 +565,54 @@ static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev,
67702 	return le64_to_cpu(ep_ctx->deq);
67703 }
67704 
67705-/*
67706- * Move the xHC's endpoint ring dequeue pointer past cur_td.
67707- * Record the new state of the xHC's endpoint ring dequeue segment,
67708- * dequeue pointer, stream id, and new consumer cycle state in state.
67709- * Update our internal representation of the ring's dequeue pointer.
67710- *
67711- * We do this in three jumps:
67712- *  - First we update our new ring state to be the same as when the xHC stopped.
67713- *  - Then we traverse the ring to find the segment that contains
67714- *    the last TRB in the TD.  We toggle the xHC's new cycle state when we pass
67715- *    any link TRBs with the toggle cycle bit set.
67716- *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
67717- *    if we've moved it past a link TRB with the toggle cycle bit set.
67718- *
67719- * Some of the uses of xhci_generic_trb are grotty, but if they're done
67720- * with correct __le32 accesses they should work fine.  Only users of this are
67721- * in here.
67722- */
67723-void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
67724-		unsigned int slot_id, unsigned int ep_index,
67725-		unsigned int stream_id, struct xhci_td *cur_td,
67726-		struct xhci_dequeue_state *state)
67727+static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
67728+				unsigned int slot_id, unsigned int ep_index,
67729+				unsigned int stream_id, struct xhci_td *td)
67730 {
67731 	struct xhci_virt_device *dev = xhci->devs[slot_id];
67732 	struct xhci_virt_ep *ep = &dev->eps[ep_index];
67733 	struct xhci_ring *ep_ring;
67734+	struct xhci_command *cmd;
67735 	struct xhci_segment *new_seg;
67736 	struct xhci_segment *halted_seg = NULL;
67737 	union xhci_trb *new_deq;
67738+	int new_cycle;
67739 	union xhci_trb *halted_trb;
67740 	int index = 0;
67741 	dma_addr_t addr;
67742 	u64 hw_dequeue;
67743 	bool cycle_found = false;
67744 	bool td_last_trb_found = false;
67745+	u32 trb_sct = 0;
67746+	int ret;
67747 
67748 	ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
67749 			ep_index, stream_id);
67750 	if (!ep_ring) {
67751-		xhci_warn(xhci, "WARN can't find new dequeue state "
67752-				"for invalid stream ID %u.\n",
67753-				stream_id);
67754-		return;
67755+		xhci_warn(xhci, "WARN can't find new dequeue, invalid stream ID %u\n",
67756+			  stream_id);
67757+		return -ENODEV;
67758 	}
67759 	/*
67760 	 * A cancelled TD can complete with a stall if HW cached the trb.
67761-	 * In this case driver can't find cur_td, but if the ring is empty we
67762+	 * In this case driver can't find td, but if the ring is empty we
67763 	 * can move the dequeue pointer to the current enqueue position.
67764+	 * We shouldn't hit this anymore as cached cancelled TRBs are given back
67765+	 * after clearing the cache, but be on the safe side and keep it anyway
67766 	 */
67767-	if (!cur_td) {
67768+	if (!td) {
67769 		if (list_empty(&ep_ring->td_list)) {
67770-			state->new_deq_seg = ep_ring->enq_seg;
67771-			state->new_deq_ptr = ep_ring->enqueue;
67772-			state->new_cycle_state = ep_ring->cycle_state;
67773-			goto done;
67774+			new_seg = ep_ring->enq_seg;
67775+			new_deq = ep_ring->enqueue;
67776+			new_cycle = ep_ring->cycle_state;
67777+			xhci_dbg(xhci, "ep ring empty, Set new dequeue = enqueue");
67778+			goto deq_found;
67779 		} else {
67780-			xhci_warn(xhci, "Can't find new dequeue state, missing cur_td\n");
67781-			return;
67782+			xhci_warn(xhci, "Can't find new dequeue state, missing td\n");
67783+			return -EINVAL;
67784 		}
67785 	}
67786 
67787-	/* Dig out the cycle state saved by the xHC during the stop ep cmd */
67788-	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
67789-			"Finding endpoint context");
67790-
67791 	hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
67792 	new_seg = ep_ring->deq_seg;
67793 	new_deq = ep_ring->dequeue;
67794@@ -618,21 +624,19 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
67795 	 */
67796 	if (xhci->quirks & XHCI_EP_CTX_BROKEN_DCS &&
67797 	    !(ep->ep_state & EP_HAS_STREAMS))
67798-		halted_seg = trb_in_td(xhci, cur_td->start_seg,
67799-				       cur_td->first_trb, cur_td->last_trb,
67800+		halted_seg = trb_in_td(xhci, td->start_seg,
67801+				       td->first_trb, td->last_trb,
67802 				       hw_dequeue & ~0xf, false);
67803 	if (halted_seg) {
67804 		index = ((dma_addr_t)(hw_dequeue & ~0xf) - halted_seg->dma) /
67805 			 sizeof(*halted_trb);
67806 		halted_trb = &halted_seg->trbs[index];
67807-		state->new_cycle_state = halted_trb->generic.field[3] & 0x1;
67808+		new_cycle = halted_trb->generic.field[3] & 0x1;
67809 		xhci_dbg(xhci, "Endpoint DCS = %d TRB index = %d cycle = %d\n",
67810-			 (u8)(hw_dequeue & 0x1), index,
67811-			 state->new_cycle_state);
67812+			 (u8)(hw_dequeue & 0x1), index, new_cycle);
67813 	} else {
67814-		state->new_cycle_state = hw_dequeue & 0x1;
67815+		new_cycle = hw_dequeue & 0x1;
67816 	}
67817-	state->stream_id = stream_id;
67818 
67819 	/*
67820 	 * We want to find the pointer, segment and cycle state of the new trb
67821@@ -647,40 +651,71 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
67822 			if (td_last_trb_found)
67823 				break;
67824 		}
67825-		if (new_deq == cur_td->last_trb)
67826+		if (new_deq == td->last_trb)
67827 			td_last_trb_found = true;
67828 
67829 		if (cycle_found && trb_is_link(new_deq) &&
67830 		    link_trb_toggles_cycle(new_deq))
67831-			state->new_cycle_state ^= 0x1;
67832+			new_cycle ^= 0x1;
67833 
67834 		next_trb(xhci, ep_ring, &new_seg, &new_deq);
67835 
67836 		/* Search wrapped around, bail out */
67837 		if (new_deq == ep->ring->dequeue) {
67838 			xhci_err(xhci, "Error: Failed finding new dequeue state\n");
67839-			state->new_deq_seg = NULL;
67840-			state->new_deq_ptr = NULL;
67841-			return;
67842+			return -EINVAL;
67843 		}
67844 
67845 	} while (!cycle_found || !td_last_trb_found);
67846 
67847-	state->new_deq_seg = new_seg;
67848-	state->new_deq_ptr = new_deq;
67849+deq_found:
67850 
67851-done:
67852 	/* Don't update the ring cycle state for the producer (us). */
67853-	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
67854-			"Cycle state = 0x%x", state->new_cycle_state);
67855+	addr = xhci_trb_virt_to_dma(new_seg, new_deq);
67856+	if (addr == 0) {
67857+		xhci_warn(xhci, "Can't find dma of new dequeue ptr\n");
67858+		xhci_warn(xhci, "deq seg = %p, deq ptr = %p\n", new_seg, new_deq);
67859+		return -EINVAL;
67860+	}
67861+
67862+	if ((ep->ep_state & SET_DEQ_PENDING)) {
67863+		xhci_warn(xhci, "Set TR Deq already pending, don't submit for 0x%pad\n",
67864+			  &addr);
67865+		return -EBUSY;
67866+	}
67867+
67868+	/* This function gets called from contexts where it cannot sleep */
67869+	cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC);
67870+	if (!cmd) {
67871+		xhci_warn(xhci, "Can't alloc Set TR Deq cmd 0x%pad\n", &addr);
67872+		return -ENOMEM;
67873+	}
67874+
67875+	if (stream_id)
67876+		trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
67877+	ret = queue_command(xhci, cmd,
67878+		lower_32_bits(addr) | trb_sct | new_cycle,
67879+		upper_32_bits(addr),
67880+		STREAM_ID_FOR_TRB(stream_id), SLOT_ID_FOR_TRB(slot_id) |
67881+		EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_SET_DEQ), false);
67882+	if (ret < 0) {
67883+		xhci_free_command(xhci, cmd);
67884+		return ret;
67885+	}
67886+	ep->queued_deq_seg = new_seg;
67887+	ep->queued_deq_ptr = new_deq;
67888 
67889 	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
67890-			"New dequeue segment = %p (virtual)",
67891-			state->new_deq_seg);
67892-	addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
67893-	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
67894-			"New dequeue pointer = 0x%llx (DMA)",
67895-			(unsigned long long) addr);
67896+		       "Set TR Deq ptr 0x%llx, cycle %u\n", addr, new_cycle);
67897+
67898+	/* Stop the TD queueing code from ringing the doorbell until
67899+	 * this command completes.  The HC won't set the dequeue pointer
67900+	 * if the ring is running, and ringing the doorbell starts the
67901+	 * ring running.
67902+	 */
67903+	ep->ep_state |= SET_DEQ_PENDING;
67904+	xhci_ring_cmd_db(xhci);
67905+	return 0;
67906 }
67907 
67908 /* flip_cycle means flip the cycle bit of all but the first and last TRB.
67909@@ -795,8 +830,10 @@ static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
67910 		urb->actual_length = 0;
67911 		status = 0;
67912 	}
67913-	list_del_init(&td->td_list);
67914-	/* Was this TD slated to be cancelled but completed anyway? */
67915+	/* TD might be removed from td_list if we are giving back a cancelled URB */
67916+	if (!list_empty(&td->td_list))
67917+		list_del_init(&td->td_list);
67918+	/* Giving back a cancelled URB, or if a slated TD completed anyway */
67919 	if (!list_empty(&td->cancelled_td_list))
67920 		list_del_init(&td->cancelled_td_list);
67921 
67922@@ -819,6 +856,31 @@ static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
67923 	return 0;
67924 }
67925 
67926+
67927+/* Complete the cancelled URBs we unlinked from td_list. */
67928+static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep)
67929+{
67930+	struct xhci_ring *ring;
67931+	struct xhci_td *td, *tmp_td;
67932+
67933+	list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
67934+				 cancelled_td_list) {
67935+
67936+		ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
67937+
67938+		if (td->cancel_status == TD_CLEARED) {
67939+			xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
67940+				 __func__, td->urb);
67941+			xhci_td_cleanup(ep->xhci, td, ring, td->status);
67942+		} else {
67943+			xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
67944+				 __func__, td->urb, td->cancel_status);
67945+		}
67946+		if (ep->xhci->xhc_state & XHCI_STATE_DYING)
67947+			return;
67948+	}
67949+}
67950+
67951 static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id,
67952 				unsigned int ep_index, enum xhci_ep_reset_type reset_type)
67953 {
67954@@ -831,6 +893,10 @@ static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id,
67955 		goto done;
67956 	}
67957 
67958+	xhci_dbg(xhci, "%s-reset ep %u, slot %u\n",
67959+		 (reset_type == EP_HARD_RESET) ? "Hard" : "Soft",
67960+		 ep_index, slot_id);
67961+
67962 	ret = xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type);
67963 done:
67964 	if (ret)
67965@@ -839,7 +905,7 @@ static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id,
67966 	return ret;
67967 }
67968 
67969-static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
67970+static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
67971 				struct xhci_virt_ep *ep, unsigned int stream_id,
67972 				struct xhci_td *td,
67973 				enum xhci_ep_reset_type reset_type)
67974@@ -852,20 +918,142 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
67975 	 * Device will be reset soon to recover the link so don't do anything
67976 	 */
67977 	if (ep->vdev->flags & VDEV_PORT_ERROR)
67978-		return;
67979+		return -ENODEV;
67980 
67981-	ep->ep_state |= EP_HALTED;
67982+	/* add td to cancelled list and let reset ep handler take care of it */
67983+	if (reset_type == EP_HARD_RESET) {
67984+		ep->ep_state |= EP_HARD_CLEAR_TOGGLE;
67985+		if (td && list_empty(&td->cancelled_td_list)) {
67986+			list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
67987+			td->cancel_status = TD_HALTED;
67988+		}
67989+	}
67990+
67991+	if (ep->ep_state & EP_HALTED) {
67992+		xhci_dbg(xhci, "Reset ep command for ep_index %d already pending\n",
67993+			 ep->ep_index);
67994+		return 0;
67995+	}
67996 
67997 	err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type);
67998 	if (err)
67999-		return;
68000+		return err;
68001+
68002+	ep->ep_state |= EP_HALTED;
68003 
68004-	if (reset_type == EP_HARD_RESET) {
68005-		ep->ep_state |= EP_HARD_CLEAR_TOGGLE;
68006-		xhci_cleanup_stalled_ring(xhci, slot_id, ep->ep_index, stream_id,
68007-					  td);
68008-	}
68009 	xhci_ring_cmd_db(xhci);
68010+
68011+	return 0;
68012+}
68013+
68014+/*
68015+ * Fix up the ep ring first, so HW stops executing cancelled TDs.
68016+ * We have the xHCI lock, so nothing can modify this list until we drop it.
68017+ * We're also in the event handler, so we can't get re-interrupted if another
68018+ * Stop Endpoint command completes.
68019+ *
68020+ * only call this when ring is not in a running state
68021+ */
68022+
68023+static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
68024+{
68025+	struct xhci_hcd		*xhci;
68026+	struct xhci_td		*td = NULL;
68027+	struct xhci_td		*tmp_td = NULL;
68028+	struct xhci_td		*cached_td = NULL;
68029+	struct xhci_ring	*ring;
68030+	u64			hw_deq;
68031+	unsigned int		slot_id = ep->vdev->slot_id;
68032+	int			err;
68033+
68034+	xhci = ep->xhci;
68035+
68036+	list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
68037+		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
68038+			       "Removing canceled TD starting at 0x%llx (dma) in stream %u URB %p",
68039+			       (unsigned long long)xhci_trb_virt_to_dma(
68040+				       td->start_seg, td->first_trb),
68041+			       td->urb->stream_id, td->urb);
68042+		list_del_init(&td->td_list);
68043+		ring = xhci_urb_to_transfer_ring(xhci, td->urb);
68044+		if (!ring) {
68045+			xhci_warn(xhci, "WARN Cancelled URB %p has invalid stream ID %u.\n",
68046+				  td->urb, td->urb->stream_id);
68047+			continue;
68048+		}
68049+		/*
68050+		 * If a ring stopped on the TD we need to cancel then we have to
68051+		 * move the xHC endpoint ring dequeue pointer past this TD.
68052+		 * Rings halted due to STALL may show hw_deq is past the stalled
68053+		 * TD, but still require a set TR Deq command to flush xHC cache.
68054+		 */
68055+		hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index,
68056+					 td->urb->stream_id);
68057+		hw_deq &= ~0xf;
68058+
68059+		if (td->cancel_status == TD_HALTED ||
68060+		    trb_in_td(xhci, td->start_seg, td->first_trb, td->last_trb, hw_deq, false)) {
68061+			switch (td->cancel_status) {
68062+			case TD_CLEARED: /* TD is already no-op */
68063+			case TD_CLEARING_CACHE: /* set TR deq command already queued */
68064+				break;
68065+			case TD_DIRTY: /* TD is cached, clear it */
68066+			case TD_HALTED:
68067+				td->cancel_status = TD_CLEARING_CACHE;
68068+				if (cached_td)
68069+					/* FIXME  stream case, several stopped rings */
68070+					xhci_dbg(xhci,
68071+						 "Move dq past stream %u URB %p instead of stream %u URB %p\n",
68072+						 td->urb->stream_id, td->urb,
68073+						 cached_td->urb->stream_id, cached_td->urb);
68074+				cached_td = td;
68075+				break;
68076+			}
68077+		} else {
68078+			td_to_noop(xhci, ring, td, false);
68079+			td->cancel_status = TD_CLEARED;
68080+		}
68081+	}
68082+
68083+	/* If there's no need to move the dequeue pointer then we're done */
68084+	if (!cached_td)
68085+		return 0;
68086+
68087+	err = xhci_move_dequeue_past_td(xhci, slot_id, ep->ep_index,
68088+					cached_td->urb->stream_id,
68089+					cached_td);
68090+	if (err) {
68091+		/* Failed to move past cached td, just set cached TDs to no-op */
68092+		list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
68093+			if (td->cancel_status != TD_CLEARING_CACHE)
68094+				continue;
68095+			xhci_dbg(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
68096+				 td->urb);
68097+			td_to_noop(xhci, ring, td, false);
68098+			td->cancel_status = TD_CLEARED;
68099+		}
68100+	}
68101+	return 0;
68102+}
68103+
68104+/*
68105+ * Returns the TD the endpoint ring halted on.
68106+ * Only call for non-running rings without streams.
68107+ */
68108+static struct xhci_td *find_halted_td(struct xhci_virt_ep *ep)
68109+{
68110+	struct xhci_td	*td;
68111+	u64		hw_deq;
68112+
68113+	if (!list_empty(&ep->ring->td_list)) { /* Not streams compatible */
68114+		hw_deq = xhci_get_hw_deq(ep->xhci, ep->vdev, ep->ep_index, 0);
68115+		hw_deq &= ~0xf;
68116+		td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list);
68117+		if (trb_in_td(ep->xhci, td->start_seg, td->first_trb,
68118+				td->last_trb, hw_deq, false))
68119+			return td;
68120+	}
68121+	return NULL;
68122 }
68123 
68124 /*
68125@@ -879,142 +1067,90 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
68126  *     bit cleared) so that the HW will skip over them.
68127  */
68128 static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
68129-		union xhci_trb *trb, struct xhci_event_cmd *event)
68130+				    union xhci_trb *trb, u32 comp_code)
68131 {
68132 	unsigned int ep_index;
68133-	struct xhci_ring *ep_ring;
68134 	struct xhci_virt_ep *ep;
68135-	struct xhci_td *cur_td = NULL;
68136-	struct xhci_td *last_unlinked_td;
68137 	struct xhci_ep_ctx *ep_ctx;
68138-	struct xhci_virt_device *vdev;
68139-	u64 hw_deq;
68140-	struct xhci_dequeue_state deq_state;
68141+	struct xhci_td *td = NULL;
68142+	enum xhci_ep_reset_type reset_type;
68143+	struct xhci_command *command;
68144+	int err;
68145 
68146 	if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
68147 		if (!xhci->devs[slot_id])
68148-			xhci_warn(xhci, "Stop endpoint command "
68149-				"completion for disabled slot %u\n",
68150-				slot_id);
68151+			xhci_warn(xhci, "Stop endpoint command completion for disabled slot %u\n",
68152+				  slot_id);
68153 		return;
68154 	}
68155 
68156-	memset(&deq_state, 0, sizeof(deq_state));
68157 	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
68158-
68159 	ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
68160 	if (!ep)
68161 		return;
68162 
68163-	vdev = xhci->devs[slot_id];
68164-	ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
68165+	ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
68166+
68167 	trace_xhci_handle_cmd_stop_ep(ep_ctx);
68168 
68169-	last_unlinked_td = list_last_entry(&ep->cancelled_td_list,
68170-			struct xhci_td, cancelled_td_list);
68171+	if (comp_code == COMP_CONTEXT_STATE_ERROR) {
68172+	/*
68173+	 * If stop endpoint command raced with a halting endpoint we need to
68174+	 * reset the host side endpoint first.
68175+	 * If the TD we halted on isn't cancelled the TD should be given back
68176+	 * with a proper error code, and the ring dequeue moved past the TD.
68177+	 * If streams case we can't find hw_deq, or the TD we halted on so do a
68178+	 * soft reset.
68179+	 *
68180+	 * Proper error code is unknown here, it would be -EPIPE if device side
68181+	 * of enadpoit halted (aka STALL), and -EPROTO if not (transaction error)
68182+	 * We use -EPROTO, if device is stalled it should return a stall error on
68183+	 * next transfer, which then will return -EPIPE, and device side stall is
68184+	 * noted and cleared by class driver.
68185+	 */
68186+		switch (GET_EP_CTX_STATE(ep_ctx)) {
68187+		case EP_STATE_HALTED:
68188+			xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n");
68189+			if (ep->ep_state & EP_HAS_STREAMS) {
68190+				reset_type = EP_SOFT_RESET;
68191+			} else {
68192+				reset_type = EP_HARD_RESET;
68193+				td = find_halted_td(ep);
68194+				if (td)
68195+					td->status = -EPROTO;
68196+			}
68197+			/* reset ep, reset handler cleans up cancelled tds */
68198+			err = xhci_handle_halted_endpoint(xhci, ep, 0, td,
68199+							  reset_type);
68200+			if (err)
68201+				break;
68202+			xhci_stop_watchdog_timer_in_irq(xhci, ep);
68203+			return;
68204+		case EP_STATE_RUNNING:
68205+			/* Race, HW handled stop ep cmd before ep was running */
68206+			xhci_dbg(xhci, "Stop ep completion ctx error, ep is running\n");
68207 
68208-	if (list_empty(&ep->cancelled_td_list)) {
68209-		xhci_stop_watchdog_timer_in_irq(xhci, ep);
68210-		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
68211-		return;
68212-	}
68213+			command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
68214+			if (!command)
68215+				xhci_stop_watchdog_timer_in_irq(xhci, ep);
68216 
68217-	/* Fix up the ep ring first, so HW stops executing cancelled TDs.
68218-	 * We have the xHCI lock, so nothing can modify this list until we drop
68219-	 * it.  We're also in the event handler, so we can't get re-interrupted
68220-	 * if another Stop Endpoint command completes
68221-	 */
68222-	list_for_each_entry(cur_td, &ep->cancelled_td_list, cancelled_td_list) {
68223-		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
68224-				"Removing canceled TD starting at 0x%llx (dma).",
68225-				(unsigned long long)xhci_trb_virt_to_dma(
68226-					cur_td->start_seg, cur_td->first_trb));
68227-		ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
68228-		if (!ep_ring) {
68229-			/* This shouldn't happen unless a driver is mucking
68230-			 * with the stream ID after submission.  This will
68231-			 * leave the TD on the hardware ring, and the hardware
68232-			 * will try to execute it, and may access a buffer
68233-			 * that has already been freed.  In the best case, the
68234-			 * hardware will execute it, and the event handler will
68235-			 * ignore the completion event for that TD, since it was
68236-			 * removed from the td_list for that endpoint.  In
68237-			 * short, don't muck with the stream ID after
68238-			 * submission.
68239-			 */
68240-			xhci_warn(xhci, "WARN Cancelled URB %p "
68241-					"has invalid stream ID %u.\n",
68242-					cur_td->urb,
68243-					cur_td->urb->stream_id);
68244-			goto remove_finished_td;
68245-		}
68246-		/*
68247-		 * If we stopped on the TD we need to cancel, then we have to
68248-		 * move the xHC endpoint ring dequeue pointer past this TD.
68249-		 */
68250-		hw_deq = xhci_get_hw_deq(xhci, vdev, ep_index,
68251-					 cur_td->urb->stream_id);
68252-		hw_deq &= ~0xf;
68253+			mod_timer(&ep->stop_cmd_timer,
68254+				  jiffies + XHCI_STOP_EP_CMD_TIMEOUT * HZ);
68255+			xhci_queue_stop_endpoint(xhci, command, slot_id, ep_index, 0);
68256+			xhci_ring_cmd_db(xhci);
68257 
68258-		if (trb_in_td(xhci, cur_td->start_seg, cur_td->first_trb,
68259-			      cur_td->last_trb, hw_deq, false)) {
68260-			xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
68261-						    cur_td->urb->stream_id,
68262-						    cur_td, &deq_state);
68263-		} else {
68264-			td_to_noop(xhci, ep_ring, cur_td, false);
68265+			return;
68266+		default:
68267+			break;
68268 		}
68269-
68270-remove_finished_td:
68271-		/*
68272-		 * The event handler won't see a completion for this TD anymore,
68273-		 * so remove it from the endpoint ring's TD list.  Keep it in
68274-		 * the cancelled TD list for URB completion later.
68275-		 */
68276-		list_del_init(&cur_td->td_list);
68277 	}
68278-
68279+	/* will queue a set TR deq if stopped on a cancelled, uncleared TD */
68280+	xhci_invalidate_cancelled_tds(ep);
68281 	xhci_stop_watchdog_timer_in_irq(xhci, ep);
68282 
68283-	/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
68284-	if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
68285-		xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
68286-					     &deq_state);
68287-		xhci_ring_cmd_db(xhci);
68288-	} else {
68289-		/* Otherwise ring the doorbell(s) to restart queued transfers */
68290-		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
68291-	}
68292-
68293-	/*
68294-	 * Drop the lock and complete the URBs in the cancelled TD list.
68295-	 * New TDs to be cancelled might be added to the end of the list before
68296-	 * we can complete all the URBs for the TDs we already unlinked.
68297-	 * So stop when we've completed the URB for the last TD we unlinked.
68298-	 */
68299-	do {
68300-		cur_td = list_first_entry(&ep->cancelled_td_list,
68301-				struct xhci_td, cancelled_td_list);
68302-		list_del_init(&cur_td->cancelled_td_list);
68303-
68304-		/* Clean up the cancelled URB */
68305-		/* Doesn't matter what we pass for status, since the core will
68306-		 * just overwrite it (because the URB has been unlinked).
68307-		 */
68308-		ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
68309-		xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td);
68310-		inc_td_cnt(cur_td->urb);
68311-		if (last_td_in_urb(cur_td))
68312-			xhci_giveback_urb_in_irq(xhci, cur_td, 0);
68313-
68314-		/* Stop processing the cancelled list if the watchdog timer is
68315-		 * running.
68316-		 */
68317-		if (xhci->xhc_state & XHCI_STATE_DYING)
68318-			return;
68319-	} while (cur_td != last_unlinked_td);
68320-
68321-	/* Return to the event handler with xhci->lock re-acquired */
68322+	/* Otherwise ring the doorbell(s) to restart queued transfers */
68323+	xhci_giveback_invalidated_tds(ep);
68324+	ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
68325 }
68326 
68327 static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
68328@@ -1044,10 +1180,7 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
68329 	struct xhci_virt_ep *ep;
68330 	struct xhci_ring *ring;
68331 
68332-	ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
68333-	if (!ep)
68334-		return;
68335-
68336+	ep = &xhci->devs[slot_id]->eps[ep_index];
68337 	if ((ep->ep_state & EP_HAS_STREAMS) ||
68338 			(ep->ep_state & EP_GETTING_NO_STREAMS)) {
68339 		int stream_id;
68340@@ -1231,10 +1364,10 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
68341 	unsigned int ep_index;
68342 	unsigned int stream_id;
68343 	struct xhci_ring *ep_ring;
68344-	struct xhci_virt_device *dev;
68345 	struct xhci_virt_ep *ep;
68346 	struct xhci_ep_ctx *ep_ctx;
68347 	struct xhci_slot_ctx *slot_ctx;
68348+	struct xhci_td *td, *tmp_td;
68349 
68350 	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
68351 	stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
68352@@ -1242,8 +1375,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
68353 	if (!ep)
68354 		return;
68355 
68356-	dev = xhci->devs[slot_id];
68357-	ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
68358+	ep_ring = xhci_virt_ep_to_ring(xhci, ep, stream_id);
68359 	if (!ep_ring) {
68360 		xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
68361 				stream_id);
68362@@ -1251,8 +1383,8 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
68363 		goto cleanup;
68364 	}
68365 
68366-	ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
68367-	slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
68368+	ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
68369+	slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
68370 	trace_xhci_handle_cmd_set_deq(slot_ctx);
68371 	trace_xhci_handle_cmd_set_deq_ep(ep_ctx);
68372 
68373@@ -1305,7 +1437,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
68374 			/* Update the ring's dequeue segment and dequeue pointer
68375 			 * to reflect the new position.
68376 			 */
68377-			update_ring_for_set_deq_completion(xhci, dev,
68378+			update_ring_for_set_deq_completion(xhci, ep->vdev,
68379 				ep_ring, ep_index);
68380 		} else {
68381 			xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
68382@@ -1313,7 +1445,20 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
68383 				  ep->queued_deq_seg, ep->queued_deq_ptr);
68384 		}
68385 	}
68386-
68387+	/* HW cached TDs cleared from cache, give them back */
68388+	list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
68389+				 cancelled_td_list) {
68390+		ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
68391+		if (td->cancel_status == TD_CLEARING_CACHE) {
68392+			td->cancel_status = TD_CLEARED;
68393+			xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
68394+				 __func__, td->urb);
68395+			xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
68396+		} else {
68397+			xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
68398+				 __func__, td->urb, td->cancel_status);
68399+		}
68400+	}
68401 cleanup:
68402 	ep->ep_state &= ~SET_DEQ_PENDING;
68403 	ep->queued_deq_seg = NULL;
68404@@ -1325,7 +1470,6 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
68405 static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
68406 		union xhci_trb *trb, u32 cmd_comp_code)
68407 {
68408-	struct xhci_virt_device *vdev;
68409 	struct xhci_virt_ep *ep;
68410 	struct xhci_ep_ctx *ep_ctx;
68411 	unsigned int ep_index;
68412@@ -1335,8 +1479,7 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
68413 	if (!ep)
68414 		return;
68415 
68416-	vdev = xhci->devs[slot_id];
68417-	ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
68418+	ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
68419 	trace_xhci_handle_cmd_reset_ep(ep_ctx);
68420 
68421 	/* This command will only fail if the endpoint wasn't halted,
68422@@ -1345,27 +1488,15 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
68423 	xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
68424 		"Ignoring reset ep completion code of %u", cmd_comp_code);
68425 
68426-	/* HW with the reset endpoint quirk needs to have a configure endpoint
68427-	 * command complete before the endpoint can be used.  Queue that here
68428-	 * because the HW can't handle two commands being queued in a row.
68429-	 */
68430-	if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
68431-		struct xhci_command *command;
68432+	/* Cleanup cancelled TDs as ep is stopped. May queue a Set TR Deq cmd */
68433+	xhci_invalidate_cancelled_tds(ep);
68434 
68435-		command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
68436-		if (!command)
68437-			return;
68438+	if (xhci->quirks & XHCI_RESET_EP_QUIRK)
68439+		xhci_dbg(xhci, "Note: Removed workaround to queue config ep for this hw");
68440+	/* Clear our internal halted state */
68441+	ep->ep_state &= ~EP_HALTED;
68442 
68443-		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
68444-				"Queueing configure endpoint command");
68445-		xhci_queue_configure_endpoint(xhci, command,
68446-				xhci->devs[slot_id]->in_ctx->dma, slot_id,
68447-				false);
68448-		xhci_ring_cmd_db(xhci);
68449-	} else {
68450-		/* Clear our internal halted state */
68451-		ep->ep_state &= ~EP_HALTED;
68452-	}
68453+	xhci_giveback_invalidated_tds(ep);
68454 
68455 	/* if this was a soft reset, then restart */
68456 	if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP)
68457@@ -1399,7 +1530,7 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
68458 }
68459 
68460 static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
68461-		struct xhci_event_cmd *event, u32 cmd_comp_code)
68462+		u32 cmd_comp_code)
68463 {
68464 	struct xhci_virt_device *virt_dev;
68465 	struct xhci_input_control_ctx *ctrl_ctx;
68466@@ -1417,6 +1548,8 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
68467 	 * is not waiting on the configure endpoint command.
68468 	 */
68469 	virt_dev = xhci->devs[slot_id];
68470+	if (!virt_dev)
68471+		return;
68472 	ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
68473 	if (!ctrl_ctx) {
68474 		xhci_warn(xhci, "Could not get input context, bad type.\n");
68475@@ -1461,24 +1594,27 @@ static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id)
68476 	struct xhci_slot_ctx *slot_ctx;
68477 
68478 	vdev = xhci->devs[slot_id];
68479+	if (!vdev)
68480+		return;
68481 	slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
68482 	trace_xhci_handle_cmd_addr_dev(slot_ctx);
68483 }
68484 
68485-static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
68486-		struct xhci_event_cmd *event)
68487+static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id)
68488 {
68489 	struct xhci_virt_device *vdev;
68490 	struct xhci_slot_ctx *slot_ctx;
68491 
68492 	vdev = xhci->devs[slot_id];
68493+	if (!vdev) {
68494+		xhci_warn(xhci, "Reset device command completion for disabled slot %u\n",
68495+			  slot_id);
68496+		return;
68497+	}
68498 	slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
68499 	trace_xhci_handle_cmd_reset_dev(slot_ctx);
68500 
68501 	xhci_dbg(xhci, "Completed reset device command.\n");
68502-	if (!xhci->devs[slot_id])
68503-		xhci_warn(xhci, "Reset device command completion "
68504-				"for disabled slot %u\n", slot_id);
68505 }
68506 
68507 static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
68508@@ -1571,7 +1707,7 @@ void xhci_handle_command_timeout(struct work_struct *work)
68509 static void handle_cmd_completion(struct xhci_hcd *xhci,
68510 		struct xhci_event_cmd *event)
68511 {
68512-	int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
68513+	unsigned int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
68514 	u64 cmd_dma;
68515 	dma_addr_t cmd_dequeue_dma;
68516 	u32 cmd_comp_code;
68517@@ -1579,6 +1715,11 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
68518 	struct xhci_command *cmd;
68519 	u32 cmd_type;
68520 
68521+	if (slot_id >= MAX_HC_SLOTS) {
68522+		xhci_warn(xhci, "Invalid slot_id %u\n", slot_id);
68523+		return;
68524+	}
68525+
68526 	cmd_dma = le64_to_cpu(event->cmd_trb);
68527 	cmd_trb = xhci->cmd_ring->dequeue;
68528 
68529@@ -1639,8 +1780,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
68530 		break;
68531 	case TRB_CONFIG_EP:
68532 		if (!cmd->completion)
68533-			xhci_handle_cmd_config_ep(xhci, slot_id, event,
68534-						  cmd_comp_code);
68535+			xhci_handle_cmd_config_ep(xhci, slot_id, cmd_comp_code);
68536 		break;
68537 	case TRB_EVAL_CONTEXT:
68538 		break;
68539@@ -1651,7 +1791,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
68540 		WARN_ON(slot_id != TRB_TO_SLOT_ID(
68541 				le32_to_cpu(cmd_trb->generic.field[3])));
68542 		if (!cmd->completion)
68543-			xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
68544+			xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb,
68545+						cmd_comp_code);
68546 		break;
68547 	case TRB_SET_DEQ:
68548 		WARN_ON(slot_id != TRB_TO_SLOT_ID(
68549@@ -1674,7 +1815,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
68550 		 */
68551 		slot_id = TRB_TO_SLOT_ID(
68552 				le32_to_cpu(cmd_trb->generic.field[3]));
68553-		xhci_handle_cmd_reset_dev(xhci, slot_id, event);
68554+		xhci_handle_cmd_reset_dev(xhci, slot_id);
68555 		break;
68556 	case TRB_NEC_GET_FW:
68557 		xhci_handle_cmd_nec_get_fw(xhci, event);
68558@@ -1701,11 +1842,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
68559 }
68560 
68561 static void handle_vendor_event(struct xhci_hcd *xhci,
68562-		union xhci_trb *event)
68563+				union xhci_trb *event, u32 trb_type)
68564 {
68565-	u32 trb_type;
68566-
68567-	trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
68568 	xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
68569 	if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
68570 		handle_cmd_completion(xhci, &event->event_cmd);
68571@@ -1930,7 +2068,8 @@ static void handle_port_status(struct xhci_hcd *xhci,
68572 	 * bits are still set.  When an event occurs, switch over to
68573 	 * polling to avoid losing status changes.
68574 	 */
68575-	xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
68576+	xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
68577+		 __func__, hcd->self.busnum);
68578 	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
68579 	spin_unlock(&xhci->lock);
68580 	/* Pass this up to the core */
68581@@ -2062,29 +2201,60 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
68582 	return 0;
68583 }
68584 
68585-static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
68586-	struct xhci_transfer_event *event, struct xhci_virt_ep *ep)
68587+static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
68588+		     struct xhci_ring *ep_ring, struct xhci_td *td,
68589+		     u32 trb_comp_code)
68590 {
68591 	struct xhci_ep_ctx *ep_ctx;
68592-	struct xhci_ring *ep_ring;
68593-	u32 trb_comp_code;
68594 
68595-	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
68596 	ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
68597-	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
68598 
68599-	if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
68600-			trb_comp_code == COMP_STOPPED ||
68601-			trb_comp_code == COMP_STOPPED_SHORT_PACKET) {
68602-		/* The Endpoint Stop Command completion will take care of any
68603-		 * stopped TDs.  A stopped TD may be restarted, so don't update
68604+	switch (trb_comp_code) {
68605+	case COMP_STOPPED_LENGTH_INVALID:
68606+	case COMP_STOPPED_SHORT_PACKET:
68607+	case COMP_STOPPED:
68608+		/*
68609+		 * The "Stop Endpoint" completion will take care of any
68610+		 * stopped TDs. A stopped TD may be restarted, so don't update
68611 		 * the ring dequeue pointer or take this TD off any lists yet.
68612 		 */
68613 		return 0;
68614-	}
68615-	if (trb_comp_code == COMP_STALL_ERROR ||
68616-		xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
68617-						trb_comp_code)) {
68618+	case COMP_USB_TRANSACTION_ERROR:
68619+	case COMP_BABBLE_DETECTED_ERROR:
68620+	case COMP_SPLIT_TRANSACTION_ERROR:
68621+		/*
68622+		 * If endpoint context state is not halted we might be
68623+		 * racing with a reset endpoint command issued by a unsuccessful
68624+		 * stop endpoint completion (context error). In that case the
68625+		 * td should be on the cancelled list, and EP_HALTED flag set.
68626+		 *
68627+		 * Or then it's not halted due to the 0.95 spec stating that a
68628+		 * babbling control endpoint should not halt. The 0.96 spec
68629+		 * again says it should.  Some HW claims to be 0.95 compliant,
68630+		 * but it halts the control endpoint anyway.
68631+		 */
68632+		if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_HALTED) {
68633+			/*
68634+			 * If EP_HALTED is set and TD is on the cancelled list
68635+			 * the TD and dequeue pointer will be handled by reset
68636+			 * ep command completion
68637+			 */
68638+			if ((ep->ep_state & EP_HALTED) &&
68639+			    !list_empty(&td->cancelled_td_list)) {
68640+				xhci_dbg(xhci, "Already resolving halted ep for 0x%llx\n",
68641+					 (unsigned long long)xhci_trb_virt_to_dma(
68642+						 td->start_seg, td->first_trb));
68643+				return 0;
68644+			}
68645+			/* endpoint not halted, don't reset it */
68646+			break;
68647+		}
68648+		/* Almost same procedure as for STALL_ERROR below */
68649+		xhci_clear_hub_tt_buffer(xhci, td, ep);
68650+		xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td,
68651+					    EP_HARD_RESET);
68652+		return 0;
68653+	case COMP_STALL_ERROR:
68654 		/*
68655 		 * xhci internal endpoint state will go to a "halt" state for
68656 		 * any stall, including default control pipe protocol stall.
68657@@ -2095,18 +2265,23 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
68658 		 * stall later. Hub TT buffer should only be cleared for FS/LS
68659 		 * devices behind HS hubs for functional stalls.
68660 		 */
68661-		if ((ep->ep_index != 0) || (trb_comp_code != COMP_STALL_ERROR))
68662+		if (ep->ep_index != 0)
68663 			xhci_clear_hub_tt_buffer(xhci, td, ep);
68664 
68665 		xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td,
68666-					     EP_HARD_RESET);
68667-	} else {
68668-		/* Update ring dequeue pointer */
68669-		while (ep_ring->dequeue != td->last_trb)
68670-			inc_deq(xhci, ep_ring);
68671-		inc_deq(xhci, ep_ring);
68672+					    EP_HARD_RESET);
68673+
68674+		return 0; /* xhci_handle_halted_endpoint marked td cancelled */
68675+	default:
68676+		break;
68677 	}
68678 
68679+	/* Update ring dequeue pointer */
68680+	ep_ring->dequeue = td->last_trb;
68681+	ep_ring->deq_seg = td->last_trb_seg;
68682+	ep_ring->num_trbs_free += td->num_trbs - 1;
68683+	inc_deq(xhci, ep_ring);
68684+
68685 	return xhci_td_cleanup(xhci, td, ep_ring, td->status);
68686 }
68687 
68688@@ -2128,9 +2303,9 @@ static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
68689 /*
68690  * Process control tds, update urb status and actual_length.
68691  */
68692-static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
68693-	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
68694-	struct xhci_virt_ep *ep)
68695+static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
68696+		struct xhci_ring *ep_ring,  struct xhci_td *td,
68697+			   union xhci_trb *ep_trb, struct xhci_transfer_event *event)
68698 {
68699 	struct xhci_ep_ctx *ep_ctx;
68700 	u32 trb_comp_code;
68701@@ -2218,15 +2393,15 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
68702 		td->urb->actual_length = requested;
68703 
68704 finish_td:
68705-	return finish_td(xhci, td, event, ep);
68706+	return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
68707 }
68708 
68709 /*
68710  * Process isochronous tds, update urb packet status and actual_length.
68711  */
68712-static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
68713-	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
68714-	struct xhci_virt_ep *ep)
68715+static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
68716+		struct xhci_ring *ep_ring, struct xhci_td *td,
68717+		union xhci_trb *ep_trb, struct xhci_transfer_event *event)
68718 {
68719 	struct urb_priv *urb_priv;
68720 	int idx;
68721@@ -2303,7 +2478,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
68722 
68723 	td->urb->actual_length += frame->actual_length;
68724 
68725-	return finish_td(xhci, td, event, ep);
68726+	return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
68727 }
68728 
68729 static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
68730@@ -2324,8 +2499,9 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
68731 	frame->actual_length = 0;
68732 
68733 	/* Update ring dequeue pointer */
68734-	while (ep->ring->dequeue != td->last_trb)
68735-		inc_deq(xhci, ep->ring);
68736+	ep->ring->dequeue = td->last_trb;
68737+	ep->ring->deq_seg = td->last_trb_seg;
68738+	ep->ring->num_trbs_free += td->num_trbs - 1;
68739 	inc_deq(xhci, ep->ring);
68740 
68741 	return xhci_td_cleanup(xhci, td, ep->ring, status);
68742@@ -2334,17 +2510,15 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
68743 /*
68744  * Process bulk and interrupt tds, update urb status and actual_length.
68745  */
68746-static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
68747-	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
68748-	struct xhci_virt_ep *ep)
68749+static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
68750+		struct xhci_ring *ep_ring, struct xhci_td *td,
68751+		union xhci_trb *ep_trb, struct xhci_transfer_event *event)
68752 {
68753 	struct xhci_slot_ctx *slot_ctx;
68754-	struct xhci_ring *ep_ring;
68755 	u32 trb_comp_code;
68756 	u32 remaining, requested, ep_trb_len;
68757 
68758 	slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
68759-	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
68760 	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
68761 	remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
68762 	ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
68763@@ -2352,7 +2526,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
68764 
68765 	switch (trb_comp_code) {
68766 	case COMP_SUCCESS:
68767-		ep->err_count = 0;
68768+		ep_ring->err_count = 0;
68769 		/* handle success with untransferred data as short packet */
68770 		if (ep_trb != td->last_trb || remaining) {
68771 			xhci_warn(xhci, "WARN Successful completion on short TX\n");
68772@@ -2378,7 +2552,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
68773 		break;
68774 	case COMP_USB_TRANSACTION_ERROR:
68775 		if (xhci->quirks & XHCI_NO_SOFT_RETRY ||
68776-		    (ep->err_count++ > MAX_SOFT_RETRY) ||
68777+		    (ep_ring->err_count++ > MAX_SOFT_RETRY) ||
68778 		    le32_to_cpu(slot_ctx->tt_info) & TT_SLOT)
68779 			break;
68780 
68781@@ -2404,7 +2578,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
68782 			  remaining);
68783 		td->urb->actual_length = 0;
68784 	}
68785-	return finish_td(xhci, td, event, ep);
68786+
68787+	return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
68788 }
68789 
68790 /*
68791@@ -2415,7 +2590,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
68792 static int handle_tx_event(struct xhci_hcd *xhci,
68793 		struct xhci_transfer_event *event)
68794 {
68795-	struct xhci_virt_device *xdev;
68796 	struct xhci_virt_ep *ep;
68797 	struct xhci_ring *ep_ring;
68798 	unsigned int slot_id;
68799@@ -2442,9 +2616,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
68800 		goto err_out;
68801 	}
68802 
68803-	xdev = xhci->devs[slot_id];
68804 	ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma);
68805-	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
68806+	ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
68807 
68808 	if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) {
68809 		xhci_err(xhci,
68810@@ -2460,14 +2633,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
68811 		case COMP_USB_TRANSACTION_ERROR:
68812 		case COMP_INVALID_STREAM_TYPE_ERROR:
68813 		case COMP_INVALID_STREAM_ID_ERROR:
68814-			xhci_dbg(xhci, "Stream transaction error ep %u no id\n",
68815-				 ep_index);
68816-			if (ep->err_count++ > MAX_SOFT_RETRY)
68817-				xhci_handle_halted_endpoint(xhci, ep, 0, NULL,
68818-							    EP_HARD_RESET);
68819-			else
68820-				xhci_handle_halted_endpoint(xhci, ep, 0, NULL,
68821-							    EP_SOFT_RESET);
68822+			xhci_handle_halted_endpoint(xhci, ep, 0, NULL,
68823+						    EP_SOFT_RESET);
68824 			goto cleanup;
68825 		case COMP_RING_UNDERRUN:
68826 		case COMP_RING_OVERRUN:
68827@@ -2522,7 +2689,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
68828 	case COMP_STALL_ERROR:
68829 		xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id,
68830 			 ep_index);
68831-		ep->ep_state |= EP_HALTED;
68832 		status = -EPIPE;
68833 		break;
68834 	case COMP_SPLIT_TRANSACTION_ERROR:
68835@@ -2755,11 +2921,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
68836 
68837 		/* update the urb's actual_length and give back to the core */
68838 		if (usb_endpoint_xfer_control(&td->urb->ep->desc))
68839-			process_ctrl_td(xhci, td, ep_trb, event, ep);
68840+			process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event);
68841 		else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
68842-			process_isoc_td(xhci, td, ep_trb, event, ep);
68843+			process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event);
68844 		else
68845-			process_bulk_intr_td(xhci, td, ep_trb, event, ep);
68846+			process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event);
68847 cleanup:
68848 		handling_skipped_tds = ep->skip &&
68849 			trb_comp_code != COMP_MISSED_SERVICE_ERROR &&
68850@@ -2800,10 +2966,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
68851  * Returns >0 for "possibly more events to process" (caller should call again),
68852  * otherwise 0 if done.  In future, <0 returns should indicate error code.
68853  */
68854-static int xhci_handle_event(struct xhci_hcd *xhci)
68855+int xhci_handle_event(struct xhci_hcd *xhci)
68856 {
68857 	union xhci_trb *event;
68858 	int update_ptrs = 1;
68859+	u32 trb_type;
68860 	int ret;
68861 
68862 	/* Event ring hasn't been allocated yet. */
68863@@ -2825,31 +2992,30 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
68864 	 * speculative reads of the event's flags/data below.
68865 	 */
68866 	rmb();
68867+	trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags));
68868 	/* FIXME: Handle more event types. */
68869-	switch (le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) {
68870-	case TRB_TYPE(TRB_COMPLETION):
68871+
68872+	switch (trb_type) {
68873+	case TRB_COMPLETION:
68874 		handle_cmd_completion(xhci, &event->event_cmd);
68875 		break;
68876-	case TRB_TYPE(TRB_PORT_STATUS):
68877+	case TRB_PORT_STATUS:
68878 		handle_port_status(xhci, event);
68879 		update_ptrs = 0;
68880 		break;
68881-	case TRB_TYPE(TRB_TRANSFER):
68882+	case TRB_TRANSFER:
68883 		ret = handle_tx_event(xhci, &event->trans_event);
68884 		if (ret >= 0)
68885 			update_ptrs = 0;
68886 		break;
68887-	case TRB_TYPE(TRB_DEV_NOTE):
68888+	case TRB_DEV_NOTE:
68889 		handle_device_notification(xhci, event);
68890 		break;
68891 	default:
68892-		if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
68893-		    TRB_TYPE(48))
68894-			handle_vendor_event(xhci, event);
68895+		if (trb_type >= TRB_VENDOR_DEFINED_LOW)
68896+			handle_vendor_event(xhci, event, trb_type);
68897 		else
68898-			xhci_warn(xhci, "ERROR unknown event type %d\n",
68899-				  TRB_FIELD_TO_TYPE(
68900-				  le32_to_cpu(event->event_cmd.flags)));
68901+			xhci_warn(xhci, "ERROR unknown event type %d\n", trb_type);
68902 	}
68903 	/* Any of the above functions may drop and re-acquire the lock, so check
68904 	 * to make sure a watchdog timer didn't mark the host as non-responsive.
68905@@ -2869,13 +3035,14 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
68906 	 */
68907 	return 1;
68908 }
68909+EXPORT_SYMBOL_GPL(xhci_handle_event);
68910 
68911 /*
68912  * Update Event Ring Dequeue Pointer:
68913  * - When all events have finished
68914  * - To avoid "Event Ring Full Error" condition
68915  */
68916-static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
68917+void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
68918 		union xhci_trb *event_ring_deq)
68919 {
68920 	u64 temp_64;
68921@@ -2905,6 +3072,16 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
68922 	temp_64 |= ERST_EHB;
68923 	xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
68924 }
68925+EXPORT_SYMBOL_GPL(xhci_update_erst_dequeue);
68926+
68927+static irqreturn_t xhci_vendor_queue_irq_work(struct xhci_hcd *xhci)
68928+{
68929+	struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
68930+
68931+	if (ops && ops->queue_irq_work)
68932+		return ops->queue_irq_work(xhci);
68933+	return IRQ_NONE;
68934+}
68935 
68936 /*
68937  * xHCI spec says we can get an interrupt, and if the HC has an error condition,
68938@@ -2940,6 +3117,10 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
68939 		goto out;
68940 	}
68941 
68942+	ret = xhci_vendor_queue_irq_work(xhci);
68943+	if (ret == IRQ_HANDLED)
68944+		goto out;
68945+
68946 	/*
68947 	 * Clear the op reg interrupt status first,
68948 	 * so we can receive interrupts from other MSI-X interrupters.
68949@@ -2977,8 +3158,6 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
68950 		if (event_loop++ < TRBS_PER_SEGMENT / 2)
68951 			continue;
68952 		xhci_update_erst_dequeue(xhci, event_ring_deq);
68953-		event_ring_deq = xhci->event_ring->dequeue;
68954-
68955 		event_loop = 0;
68956 	}
68957 
68958@@ -3032,6 +3211,7 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
68959 		u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
68960 {
68961 	unsigned int num_trbs_needed;
68962+	unsigned int link_trb_count = 0;
68963 
68964 	/* Make sure the endpoint has been added to xHC schedule */
68965 	switch (ep_state) {
68966@@ -3102,7 +3282,19 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
68967 
68968 		ep_ring->enq_seg = ep_ring->enq_seg->next;
68969 		ep_ring->enqueue = ep_ring->enq_seg->trbs;
68970+
68971+		/* prevent infinite loop if all first trbs are link trbs */
68972+		if (link_trb_count++ > ep_ring->num_segs) {
68973+			xhci_warn(xhci, "Ring is an endless link TRB loop\n");
68974+			return -EINVAL;
68975+		}
68976 	}
68977+
68978+	if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue)) {
68979+		xhci_warn(xhci, "Missing link TRB at end of ring segment\n");
68980+		return -EINVAL;
68981+	}
68982+
68983 	return 0;
68984 }
68985 
68986@@ -3121,7 +3313,8 @@ static int prepare_transfer(struct xhci_hcd *xhci,
68987 	struct xhci_ring *ep_ring;
68988 	struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
68989 
68990-	ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
68991+	ep_ring = xhci_triad_to_transfer_ring(xhci, xdev->slot_id, ep_index,
68992+					      stream_id);
68993 	if (!ep_ring) {
68994 		xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
68995 				stream_id);
68996@@ -3490,7 +3683,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
68997 			field |= TRB_IOC;
68998 			more_trbs_coming = false;
68999 			td->last_trb = ring->enqueue;
69000-
69001+			td->last_trb_seg = ring->enq_seg;
69002 			if (xhci_urb_suitable_for_idt(urb)) {
69003 				memcpy(&send_addr, urb->transfer_buffer,
69004 				       trb_buff_len);
69005@@ -3516,7 +3709,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
69006 				upper_32_bits(send_addr),
69007 				length_field,
69008 				field);
69009-
69010+		td->num_trbs++;
69011 		addr += trb_buff_len;
69012 		sent_len = trb_buff_len;
69013 
69014@@ -3540,8 +3733,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
69015 				       ep_index, urb->stream_id,
69016 				       1, urb, 1, mem_flags);
69017 		urb_priv->td[1].last_trb = ring->enqueue;
69018+		urb_priv->td[1].last_trb_seg = ring->enq_seg;
69019 		field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
69020 		queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
69021+		urb_priv->td[1].num_trbs++;
69022 	}
69023 
69024 	check_trb_math(urb, enqd_len);
69025@@ -3592,6 +3787,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
69026 
69027 	urb_priv = urb->hcpriv;
69028 	td = &urb_priv->td[0];
69029+	td->num_trbs = num_trbs;
69030 
69031 	/*
69032 	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
69033@@ -3664,6 +3860,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
69034 
69035 	/* Save the DMA address of the last TRB in the TD */
69036 	td->last_trb = ep_ring->enqueue;
69037+	td->last_trb_seg = ep_ring->enq_seg;
69038 
69039 	/* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
69040 	/* If the device sent data, the status stage is an OUT transfer */
69041@@ -3908,7 +4105,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
69042 			goto cleanup;
69043 		}
69044 		td = &urb_priv->td[i];
69045-
69046+		td->num_trbs = trbs_per_td;
69047 		/* use SIA as default, if frame id is used overwrite it */
69048 		sia_frame_id = TRB_SIA;
69049 		if (!(urb->transfer_flags & URB_ISO_ASAP) &&
69050@@ -3951,6 +4148,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
69051 			} else {
69052 				more_trbs_coming = false;
69053 				td->last_trb = ep_ring->enqueue;
69054+				td->last_trb_seg = ep_ring->enq_seg;
69055 				field |= TRB_IOC;
69056 				if (trb_block_event_intr(xhci, num_tds, i))
69057 					field |= TRB_BEI;
69058@@ -4233,71 +4431,7 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
69059 	return queue_command(xhci, cmd, 0, 0, 0,
69060 			trb_slot_id | trb_ep_index | type | trb_suspend, false);
69061 }
69062-
69063-/* Set Transfer Ring Dequeue Pointer command */
69064-void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
69065-		unsigned int slot_id, unsigned int ep_index,
69066-		struct xhci_dequeue_state *deq_state)
69067-{
69068-	dma_addr_t addr;
69069-	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
69070-	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
69071-	u32 trb_stream_id = STREAM_ID_FOR_TRB(deq_state->stream_id);
69072-	u32 trb_sct = 0;
69073-	u32 type = TRB_TYPE(TRB_SET_DEQ);
69074-	struct xhci_virt_ep *ep;
69075-	struct xhci_command *cmd;
69076-	int ret;
69077-
69078-	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
69079-		"Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u",
69080-		deq_state->new_deq_seg,
69081-		(unsigned long long)deq_state->new_deq_seg->dma,
69082-		deq_state->new_deq_ptr,
69083-		(unsigned long long)xhci_trb_virt_to_dma(
69084-			deq_state->new_deq_seg, deq_state->new_deq_ptr),
69085-		deq_state->new_cycle_state);
69086-
69087-	addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
69088-				    deq_state->new_deq_ptr);
69089-	if (addr == 0) {
69090-		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
69091-		xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
69092-			  deq_state->new_deq_seg, deq_state->new_deq_ptr);
69093-		return;
69094-	}
69095-	ep = &xhci->devs[slot_id]->eps[ep_index];
69096-	if ((ep->ep_state & SET_DEQ_PENDING)) {
69097-		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
69098-		xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
69099-		return;
69100-	}
69101-
69102-	/* This function gets called from contexts where it cannot sleep */
69103-	cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC);
69104-	if (!cmd)
69105-		return;
69106-
69107-	ep->queued_deq_seg = deq_state->new_deq_seg;
69108-	ep->queued_deq_ptr = deq_state->new_deq_ptr;
69109-	if (deq_state->stream_id)
69110-		trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
69111-	ret = queue_command(xhci, cmd,
69112-		lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state,
69113-		upper_32_bits(addr), trb_stream_id,
69114-		trb_slot_id | trb_ep_index | type, false);
69115-	if (ret < 0) {
69116-		xhci_free_command(xhci, cmd);
69117-		return;
69118-	}
69119-
69120-	/* Stop the TD queueing code from ringing the doorbell until
69121-	 * this command completes.  The HC won't set the dequeue pointer
69122-	 * if the ring is running, and ringing the doorbell starts the
69123-	 * ring running.
69124-	 */
69125-	ep->ep_state |= SET_DEQ_PENDING;
69126-}
69127+EXPORT_SYMBOL_GPL(xhci_queue_stop_endpoint);
69128 
69129 int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
69130 			int slot_id, unsigned int ep_index,
69131diff --git a/drivers/usb/host/xhci-trace.c b/drivers/usb/host/xhci-trace.c
69132index d0070814d..c4178357b 100644
69133--- a/drivers/usb/host/xhci-trace.c
69134+++ b/drivers/usb/host/xhci-trace.c
69135@@ -12,3 +12,6 @@
69136 #include "xhci-trace.h"
69137 
69138 EXPORT_TRACEPOINT_SYMBOL_GPL(xhci_dbg_quirks);
69139+EXPORT_TRACEPOINT_SYMBOL_GPL(xhci_urb_enqueue);
69140+EXPORT_TRACEPOINT_SYMBOL_GPL(xhci_handle_transfer);
69141+EXPORT_TRACEPOINT_SYMBOL_GPL(xhci_urb_giveback);
69142diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
69143index 473b0b64d..611401723 100644
69144--- a/drivers/usb/host/xhci.c
69145+++ b/drivers/usb/host/xhci.c
69146@@ -20,7 +20,6 @@
69147 
69148 #include "xhci.h"
69149 #include "xhci-trace.h"
69150-#include "xhci-mtk.h"
69151 #include "xhci-debugfs.h"
69152 #include "xhci-dbgcap.h"
69153 
69154@@ -66,7 +65,7 @@ static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
69155  * handshake done).  There are two failure modes:  "usec" have passed (major
69156  * hardware flakeout), or the register reads as all-ones (hardware removed).
69157  */
69158-int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us)
69159+int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
69160 {
69161 	u32	result;
69162 	int	ret;
69163@@ -74,7 +73,7 @@ int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us)
69164 	ret = readl_poll_timeout_atomic(ptr, result,
69165 					(result & mask) == done ||
69166 					result == U32_MAX,
69167-					1, timeout_us);
69168+					1, usec);
69169 	if (result == U32_MAX)		/* card removed */
69170 		return -ENODEV;
69171 
69172@@ -149,11 +148,9 @@ int xhci_start(struct xhci_hcd *xhci)
69173 		xhci_err(xhci, "Host took too long to start, "
69174 				"waited %u microseconds.\n",
69175 				XHCI_MAX_HALT_USEC);
69176-	if (!ret) {
69177+	if (!ret)
69178 		/* clear state flags. Including dying, halted or removing */
69179 		xhci->xhc_state = 0;
69180-		xhci->run_graceperiod = jiffies + msecs_to_jiffies(500);
69181-	}
69182 
69183 	return ret;
69184 }
69185@@ -165,7 +162,7 @@ int xhci_start(struct xhci_hcd *xhci)
69186  * Transactions will be terminated immediately, and operational registers
69187  * will be set to their defaults.
69188  */
69189-int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
69190+int xhci_reset(struct xhci_hcd *xhci)
69191 {
69192 	u32 command;
69193 	u32 state;
69194@@ -198,7 +195,8 @@ int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
69195 	if (xhci->quirks & XHCI_INTEL_HOST)
69196 		udelay(1000);
69197 
69198-	ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us);
69199+	ret = xhci_handshake(&xhci->op_regs->command,
69200+			CMD_RESET, 0, 10 * 1000 * 1000);
69201 	if (ret)
69202 		return ret;
69203 
69204@@ -211,7 +209,8 @@ int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
69205 	 * xHCI cannot write to any doorbells or operational registers other
69206 	 * than status until the "Controller Not Ready" flag is cleared.
69207 	 */
69208-	ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us);
69209+	ret = xhci_handshake(&xhci->op_regs->status,
69210+			STS_CNR, 0, 10 * 1000 * 1000);
69211 
69212 	xhci->usb2_rhub.bus_state.port_c_suspend = 0;
69213 	xhci->usb2_rhub.bus_state.suspended_ports = 0;
69214@@ -696,8 +695,6 @@ int xhci_run(struct usb_hcd *hcd)
69215 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
69216 			"Finished xhci_run for USB2 roothub");
69217 
69218-	set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags);
69219-
69220 	xhci_dbc_init(xhci);
69221 
69222 	xhci_debugfs_init(xhci);
69223@@ -734,7 +731,7 @@ static void xhci_stop(struct usb_hcd *hcd)
69224 	xhci->xhc_state |= XHCI_STATE_HALTED;
69225 	xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
69226 	xhci_halt(xhci);
69227-	xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
69228+	xhci_reset(xhci);
69229 	spin_unlock_irq(&xhci->lock);
69230 
69231 	xhci_cleanup_msix(xhci);
69232@@ -783,28 +780,11 @@ void xhci_shutdown(struct usb_hcd *hcd)
69233 	if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
69234 		usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
69235 
69236-	/* Don't poll the roothubs after shutdown. */
69237-	xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
69238-			__func__, hcd->self.busnum);
69239-	clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
69240-	del_timer_sync(&hcd->rh_timer);
69241-
69242-	if (xhci->shared_hcd) {
69243-		clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
69244-		del_timer_sync(&xhci->shared_hcd->rh_timer);
69245-	}
69246-
69247 	spin_lock_irq(&xhci->lock);
69248 	xhci_halt(xhci);
69249-
69250-	/*
69251-	 * Workaround for spurious wakeps at shutdown with HSW, and for boot
69252-	 * firmware delay in ADL-P PCH if port are left in U3 at shutdown
69253-	 */
69254-	if (xhci->quirks & XHCI_SPURIOUS_WAKEUP ||
69255-	    xhci->quirks & XHCI_RESET_TO_DEFAULT)
69256-		xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
69257-
69258+	/* Workaround for spurious wakeups at shutdown with HSW */
69259+	if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
69260+		xhci_reset(xhci);
69261 	spin_unlock_irq(&xhci->lock);
69262 
69263 	xhci_cleanup_msix(xhci);
69264@@ -1013,7 +993,8 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
69265 	xhci_dbc_suspend(xhci);
69266 
69267 	/* Don't poll the roothubs on bus suspend. */
69268-	xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
69269+	xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
69270+		 __func__, hcd->self.busnum);
69271 	clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
69272 	del_timer_sync(&hcd->rh_timer);
69273 	clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
69274@@ -1110,7 +1091,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
69275 	int			retval = 0;
69276 	bool			comp_timer_running = false;
69277 	bool			pending_portevent = false;
69278-	bool			reinit_xhc = false;
69279 
69280 	if (!hcd->state)
69281 		return 0;
69282@@ -1127,11 +1107,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
69283 	set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
69284 
69285 	spin_lock_irq(&xhci->lock);
69286+	if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend)
69287+		hibernated = true;
69288 
69289-	if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
69290-		reinit_xhc = true;
69291-
69292-	if (!reinit_xhc) {
69293+	if (!hibernated) {
69294 		/*
69295 		 * Some controllers might lose power during suspend, so wait
69296 		 * for controller not ready bit to clear, just as in xHC init.
69297@@ -1164,18 +1143,12 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
69298 			spin_unlock_irq(&xhci->lock);
69299 			return -ETIMEDOUT;
69300 		}
69301+		temp = readl(&xhci->op_regs->status);
69302 	}
69303 
69304-	temp = readl(&xhci->op_regs->status);
69305-
69306-	/* re-initialize the HC on Restore Error, or Host Controller Error */
69307-	if (temp & (STS_SRE | STS_HCE)) {
69308-		reinit_xhc = true;
69309-		if (!xhci->broken_suspend)
69310-			xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
69311-	}
69312+	/* If restore operation fails, re-initialize the HC during resume */
69313+	if ((temp & STS_SRE) || hibernated) {
69314 
69315-	if (reinit_xhc) {
69316 		if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
69317 				!(xhci_all_ports_seen_u0(xhci))) {
69318 			del_timer_sync(&xhci->comp_mode_recovery_timer);
69319@@ -1190,7 +1163,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
69320 		xhci_dbg(xhci, "Stop HCD\n");
69321 		xhci_halt(xhci);
69322 		xhci_zero_64b_regs(xhci);
69323-		retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
69324+		retval = xhci_reset(xhci);
69325 		spin_unlock_irq(&xhci->lock);
69326 		if (retval)
69327 			return retval;
69328@@ -1285,7 +1258,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
69329 		usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
69330 
69331 	/* Re-enable port polling. */
69332-	xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
69333+	xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
69334+		 __func__, hcd->self.busnum);
69335 	set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
69336 	usb_hcd_poll_rh_status(xhci->shared_hcd);
69337 	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
69338@@ -1333,6 +1307,7 @@ unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
69339 			(usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
69340 	return index;
69341 }
69342+EXPORT_SYMBOL_GPL(xhci_get_endpoint_index);
69343 
69344 /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
69345  * address from the XHCI endpoint index.
69346@@ -1343,6 +1318,7 @@ unsigned int xhci_get_endpoint_address(unsigned int ep_index)
69347 	unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
69348 	return direction | number;
69349 }
69350+EXPORT_SYMBOL_GPL(xhci_get_endpoint_address);
69351 
69352 /* Find the flag for this endpoint (for use in the control context).  Use the
69353  * endpoint index to create a bitmask.  The slot context is bit 0, endpoint 0 is
69354@@ -1353,15 +1329,6 @@ static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
69355 	return 1 << (xhci_get_endpoint_index(desc) + 1);
69356 }
69357 
69358-/* Find the flag for this endpoint (for use in the control context).  Use the
69359- * endpoint index to create a bitmask.  The slot context is bit 0, endpoint 0 is
69360- * bit 1, etc.
69361- */
69362-static unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
69363-{
69364-	return 1 << (ep_index + 1);
69365-}
69366-
69367 /* Compute the last valid endpoint context index.  Basically, this is the
69368  * endpoint index plus one.  For slot contexts with more than valid endpoint,
69369  * we find the most significant bit set in the added contexts flags.
69370@@ -1507,12 +1474,9 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
69371 	struct urb_priv	*urb_priv;
69372 	int num_tds;
69373 
69374-	if (!urb)
69375+	if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
69376+					true, true, __func__) <= 0)
69377 		return -EINVAL;
69378-	ret = xhci_check_args(hcd, urb->dev, urb->ep,
69379-					true, true, __func__);
69380-	if (ret <= 0)
69381-		return ret ? ret : -EINVAL;
69382 
69383 	slot_id = urb->dev->slot_id;
69384 	ep_index = xhci_get_endpoint_index(&urb->ep->desc);
69385@@ -1528,6 +1492,11 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
69386 		return -ENODEV;
69387 	}
69388 
69389+	if (xhci_vendor_usb_offload_skip_urb(xhci, urb)) {
69390+		xhci_dbg(xhci, "skip urb for usb offload\n");
69391+		return -EOPNOTSUPP;
69392+	}
69393+
69394 	if (usb_endpoint_xfer_isoc(&urb->ep->desc))
69395 		num_tds = urb->number_of_packets;
69396 	else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
69397@@ -1728,7 +1697,12 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
69398 
69399 	for (; i < urb_priv->num_tds; i++) {
69400 		td = &urb_priv->td[i];
69401-		list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
69402+		/* TD can already be on cancelled list if ep halted on it */
69403+		if (list_empty(&td->cancelled_td_list)) {
69404+			td->cancel_status = TD_DIRTY;
69405+			list_add_tail(&td->cancelled_td_list,
69406+				      &ep->cancelled_td_list);
69407+		}
69408 	}
69409 
69410 	/* Queue a stop endpoint command, but only if this is
69411@@ -1774,8 +1748,8 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
69412  * disabled, so there's no need for mutual exclusion to protect
69413  * the xhci->devs[slot_id] structure.
69414  */
69415-static int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
69416-		struct usb_host_endpoint *ep)
69417+int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
69418+		       struct usb_host_endpoint *ep)
69419 {
69420 	struct xhci_hcd *xhci;
69421 	struct xhci_container_ctx *in_ctx, *out_ctx;
69422@@ -1835,9 +1809,6 @@ static int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
69423 
69424 	xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
69425 
69426-	if (xhci->quirks & XHCI_MTK_HOST)
69427-		xhci_mtk_drop_ep_quirk(hcd, udev, ep);
69428-
69429 	xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
69430 			(unsigned int) ep->desc.bEndpointAddress,
69431 			udev->slot_id,
69432@@ -1845,6 +1816,7 @@ static int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
69433 			(unsigned int) new_add_flags);
69434 	return 0;
69435 }
69436+EXPORT_SYMBOL_GPL(xhci_drop_endpoint);
69437 
69438 /* Add an endpoint to a new possible bandwidth configuration for this device.
69439  * Only one call to this function is allowed per endpoint before
69440@@ -1859,8 +1831,8 @@ static int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
69441  * configuration or alt setting is installed in the device, so there's no need
69442  * for mutual exclusion to protect the xhci->devs[slot_id] structure.
69443  */
69444-static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
69445-		struct usb_host_endpoint *ep)
69446+int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
69447+		      struct usb_host_endpoint *ep)
69448 {
69449 	struct xhci_hcd *xhci;
69450 	struct xhci_container_ctx *in_ctx;
69451@@ -1934,15 +1906,6 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
69452 		return -ENOMEM;
69453 	}
69454 
69455-	if (xhci->quirks & XHCI_MTK_HOST) {
69456-		ret = xhci_mtk_add_ep_quirk(hcd, udev, ep);
69457-		if (ret < 0) {
69458-			xhci_ring_free(xhci, virt_dev->eps[ep_index].new_ring);
69459-			virt_dev->eps[ep_index].new_ring = NULL;
69460-			return ret;
69461-		}
69462-	}
69463-
69464 	ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
69465 	new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
69466 
69467@@ -1967,6 +1930,7 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
69468 			(unsigned int) new_add_flags);
69469 	return 0;
69470 }
69471+EXPORT_SYMBOL_GPL(xhci_add_endpoint);
69472 
69473 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
69474 {
69475@@ -2876,6 +2840,14 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
69476 			xhci_finish_resource_reservation(xhci, ctrl_ctx);
69477 		spin_unlock_irqrestore(&xhci->lock, flags);
69478 	}
69479+	if (ret)
69480+		goto failed;
69481+
69482+	ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
69483+	if (ret)
69484+		xhci_warn(xhci, "sync device context failed, ret=%d", ret);
69485+
69486+failed:
69487 	return ret;
69488 }
69489 
69490@@ -3000,6 +2972,7 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
69491 
69492 	return ret;
69493 }
69494+EXPORT_SYMBOL_GPL(xhci_check_bandwidth);
69495 
69496 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
69497 {
69498@@ -3018,12 +2991,17 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
69499 	for (i = 0; i < 31; i++) {
69500 		if (virt_dev->eps[i].new_ring) {
69501 			xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
69502-			xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
69503+			if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, i))
69504+				xhci_vendor_free_transfer_ring(xhci, virt_dev, i);
69505+			else
69506+				xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
69507+
69508 			virt_dev->eps[i].new_ring = NULL;
69509 		}
69510 	}
69511 	xhci_zero_in_ctx(xhci, virt_dev);
69512 }
69513+EXPORT_SYMBOL_GPL(xhci_reset_bandwidth);
69514 
69515 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
69516 		struct xhci_container_ctx *in_ctx,
69517@@ -3037,84 +3015,6 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
69518 	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
69519 }
69520 
69521-static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
69522-		unsigned int slot_id, unsigned int ep_index,
69523-		struct xhci_dequeue_state *deq_state)
69524-{
69525-	struct xhci_input_control_ctx *ctrl_ctx;
69526-	struct xhci_container_ctx *in_ctx;
69527-	struct xhci_ep_ctx *ep_ctx;
69528-	u32 added_ctxs;
69529-	dma_addr_t addr;
69530-
69531-	in_ctx = xhci->devs[slot_id]->in_ctx;
69532-	ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
69533-	if (!ctrl_ctx) {
69534-		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
69535-				__func__);
69536-		return;
69537-	}
69538-
69539-	xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
69540-			xhci->devs[slot_id]->out_ctx, ep_index);
69541-	ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
69542-	addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
69543-			deq_state->new_deq_ptr);
69544-	if (addr == 0) {
69545-		xhci_warn(xhci, "WARN Cannot submit config ep after "
69546-				"reset ep command\n");
69547-		xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
69548-				deq_state->new_deq_seg,
69549-				deq_state->new_deq_ptr);
69550-		return;
69551-	}
69552-	ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
69553-
69554-	added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
69555-	xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
69556-			xhci->devs[slot_id]->out_ctx, ctrl_ctx,
69557-			added_ctxs, added_ctxs);
69558-}
69559-
69560-void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id,
69561-			       unsigned int ep_index, unsigned int stream_id,
69562-			       struct xhci_td *td)
69563-{
69564-	struct xhci_dequeue_state deq_state;
69565-
69566-	xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
69567-			"Cleaning up stalled endpoint ring");
69568-	/* We need to move the HW's dequeue pointer past this TD,
69569-	 * or it will attempt to resend it on the next doorbell ring.
69570-	 */
69571-	xhci_find_new_dequeue_state(xhci, slot_id, ep_index, stream_id, td,
69572-				    &deq_state);
69573-
69574-	if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
69575-		return;
69576-
69577-	/* HW with the reset endpoint quirk will use the saved dequeue state to
69578-	 * issue a configure endpoint command later.
69579-	 */
69580-	if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
69581-		xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
69582-				"Queueing new dequeue state");
69583-		xhci_queue_new_dequeue_state(xhci, slot_id,
69584-				ep_index, &deq_state);
69585-	} else {
69586-		/* Better hope no one uses the input context between now and the
69587-		 * reset endpoint completion!
69588-		 * XXX: No idea how this hardware will react when stream rings
69589-		 * are enabled.
69590-		 */
69591-		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
69592-				"Setting up input context for "
69593-				"configure endpoint command");
69594-		xhci_setup_input_ctx_for_quirk(xhci, slot_id,
69595-				ep_index, &deq_state);
69596-	}
69597-}
69598-
69599 static void xhci_endpoint_disable(struct usb_hcd *hcd,
69600 				  struct usb_host_endpoint *host_ep)
69601 {
69602@@ -3260,6 +3160,13 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
69603 
69604 	wait_for_completion(stop_cmd->completion);
69605 
69606+	err = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
69607+	if (err) {
69608+		xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
69609+			  __func__, err);
69610+		goto cleanup;
69611+	}
69612+
69613 	spin_lock_irqsave(&xhci->lock, flags);
69614 
69615 	/* config ep command clears toggle if add and drop ep flags are set */
69616@@ -3291,6 +3198,11 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
69617 
69618 	wait_for_completion(cfg_cmd->completion);
69619 
69620+	err = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
69621+	if (err)
69622+		xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
69623+			  __func__, err);
69624+
69625 	xhci_free_command(xhci, cfg_cmd);
69626 cleanup:
69627 	xhci_free_command(xhci, stop_cmd);
69628@@ -3312,7 +3224,7 @@ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
69629 		return -EINVAL;
69630 	ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
69631 	if (ret <= 0)
69632-		return ret ? ret : -EINVAL;
69633+		return -EINVAL;
69634 	if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
69635 		xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
69636 				" descriptor for ep 0x%x does not support streams\n",
69637@@ -3836,6 +3748,13 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
69638 	/* Wait for the Reset Device command to finish */
69639 	wait_for_completion(reset_device_cmd->completion);
69640 
69641+	ret = xhci_vendor_sync_dev_ctx(xhci, slot_id);
69642+	if (ret) {
69643+		xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
69644+			  __func__, ret);
69645+		goto command_cleanup;
69646+	}
69647+
69648 	/* The Reset Device command can't fail, according to the 0.95/0.96 spec,
69649 	 * unless we tried to reset a slot ID that wasn't enabled,
69650 	 * or the device wasn't in the addressed or configured state.
69651@@ -3921,7 +3840,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
69652 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
69653 	struct xhci_virt_device *virt_dev;
69654 	struct xhci_slot_ctx *slot_ctx;
69655-	unsigned long flags;
69656 	int i, ret;
69657 
69658 	/*
69659@@ -3950,11 +3868,7 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
69660 	}
69661 	virt_dev->udev = NULL;
69662 	xhci_disable_slot(xhci, udev->slot_id);
69663-
69664-	spin_lock_irqsave(&xhci->lock, flags);
69665 	xhci_free_virt_device(xhci, udev->slot_id);
69666-	spin_unlock_irqrestore(&xhci->lock, flags);
69667-
69668 }
69669 
69670 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
69671@@ -4086,6 +4000,14 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
69672 		xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
69673 		goto disable_slot;
69674 	}
69675+
69676+	ret = xhci_vendor_sync_dev_ctx(xhci, slot_id);
69677+	if (ret) {
69678+		xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
69679+			  __func__, ret);
69680+		goto disable_slot;
69681+	}
69682+
69683 	vdev = xhci->devs[slot_id];
69684 	slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
69685 	trace_xhci_alloc_dev(slot_ctx);
69686@@ -4216,6 +4138,13 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
69687 	/* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
69688 	wait_for_completion(command->completion);
69689 
69690+	ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
69691+	if (ret) {
69692+		xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
69693+			  __func__, ret);
69694+		goto out;
69695+	}
69696+
69697 	/* FIXME: From section 4.3.4: "Software shall be responsible for timing
69698 	 * the SetAddress() "recovery interval" required by USB and aborting the
69699 	 * command on a timeout.
69700@@ -4300,10 +4229,11 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
69701 	return ret;
69702 }
69703 
69704-static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
69705+int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
69706 {
69707 	return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
69708 }
69709+EXPORT_SYMBOL_GPL(xhci_address_device);
69710 
69711 static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
69712 {
69713@@ -4363,6 +4293,14 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
69714 		return -ENOMEM;
69715 	}
69716 
69717+	ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
69718+	if (ret) {
69719+		spin_unlock_irqrestore(&xhci->lock, flags);
69720+		xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
69721+			  __func__, ret);
69722+		return ret;
69723+	}
69724+
69725 	xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
69726 	spin_unlock_irqrestore(&xhci->lock, flags);
69727 
69728@@ -4387,6 +4325,30 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
69729 	return ret;
69730 }
69731 
69732+struct xhci_vendor_ops *xhci_vendor_get_ops(struct xhci_hcd *xhci)
69733+{
69734+	return xhci->vendor_ops;
69735+}
69736+EXPORT_SYMBOL_GPL(xhci_vendor_get_ops);
69737+
69738+int xhci_vendor_sync_dev_ctx(struct xhci_hcd *xhci, unsigned int slot_id)
69739+{
69740+	struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
69741+
69742+	if (ops && ops->sync_dev_ctx)
69743+		return ops->sync_dev_ctx(xhci, slot_id);
69744+	return 0;
69745+}
69746+
69747+bool xhci_vendor_usb_offload_skip_urb(struct xhci_hcd *xhci, struct urb *urb)
69748+{
69749+	struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
69750+
69751+	if (ops && ops->usb_offload_skip_urb)
69752+		return ops->usb_offload_skip_urb(xhci, urb);
69753+	return false;
69754+}
69755+
69756 #ifdef CONFIG_PM
69757 
69758 /* BESL to HIRD Encoding array for USB2 LPM */
69759@@ -5011,7 +4973,6 @@ static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
69760 			struct usb_device *udev, enum usb3_link_state state)
69761 {
69762 	struct xhci_hcd	*xhci;
69763-	struct xhci_port *port;
69764 	u16 hub_encoded_timeout;
69765 	int mel;
69766 	int ret;
69767@@ -5025,13 +4986,6 @@ static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
69768 			!xhci->devs[udev->slot_id])
69769 		return USB3_LPM_DISABLED;
69770 
69771-	/* If connected to root port then check port can handle lpm */
69772-	if (udev->parent && !udev->parent->parent) {
69773-		port = xhci->usb3_rhub.ports[udev->portnum - 1];
69774-		if (port->lpm_incapable)
69775-			return USB3_LPM_DISABLED;
69776-	}
69777-
69778 	hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
69779 	mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
69780 	if (mel < 0) {
69781@@ -5091,7 +5045,7 @@ static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
69782 /* Once a hub descriptor is fetched for a device, we need to update the xHC's
69783  * internal data structures for the device.
69784  */
69785-int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
69786+static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
69787 			struct usb_tt *tt, gfp_t mem_flags)
69788 {
69789 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
69790@@ -5134,6 +5088,15 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
69791 		return -ENOMEM;
69792 	}
69793 
69794+	ret = xhci_vendor_sync_dev_ctx(xhci, hdev->slot_id);
69795+	if (ret) {
69796+		xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
69797+			  __func__, ret);
69798+		xhci_free_command(xhci, config_cmd);
69799+		spin_unlock_irqrestore(&xhci->lock, flags);
69800+		return ret;
69801+	}
69802+
69803 	xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
69804 	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
69805 	slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
69806@@ -5191,7 +5154,6 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
69807 	xhci_free_command(xhci, config_cmd);
69808 	return ret;
69809 }
69810-EXPORT_SYMBOL_GPL(xhci_update_hub_device);
69811 
69812 static int xhci_get_frame(struct usb_hcd *hcd)
69813 {
69814@@ -5310,7 +5272,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
69815 
69816 	xhci_dbg(xhci, "Resetting HCD\n");
69817 	/* Reset the internal HC memory state and registers. */
69818-	retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
69819+	retval = xhci_reset(xhci);
69820 	if (retval)
69821 		return retval;
69822 	xhci_dbg(xhci, "Reset complete\n");
69823@@ -5458,12 +5420,20 @@ void xhci_init_driver(struct hc_driver *drv,
69824 			drv->reset = over->reset;
69825 		if (over->start)
69826 			drv->start = over->start;
69827+		if (over->add_endpoint)
69828+			drv->add_endpoint = over->add_endpoint;
69829+		if (over->drop_endpoint)
69830+			drv->drop_endpoint = over->drop_endpoint;
69831 		if (over->check_bandwidth)
69832 			drv->check_bandwidth = over->check_bandwidth;
69833 		if (over->reset_bandwidth)
69834 			drv->reset_bandwidth = over->reset_bandwidth;
69835-		if (over->update_hub_device)
69836-			drv->update_hub_device = over->update_hub_device;
69837+		if (over->address_device)
69838+			drv->address_device = over->address_device;
69839+		if (over->bus_suspend)
69840+			drv->bus_suspend = over->bus_suspend;
69841+		if (over->bus_resume)
69842+			drv->bus_resume = over->bus_resume;
69843 	}
69844 }
69845 EXPORT_SYMBOL_GPL(xhci_init_driver);
69846diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
69847index c7749f6e3..b4462aafb 100644
69848--- a/drivers/usb/host/xhci.h
69849+++ b/drivers/usb/host/xhci.h
69850@@ -229,9 +229,6 @@ struct xhci_op_regs {
69851 #define CMD_ETE		(1 << 14)
69852 /* bits 15:31 are reserved (and should be preserved on writes). */
69853 
69854-#define XHCI_RESET_LONG_USEC		(10 * 1000 * 1000)
69855-#define XHCI_RESET_SHORT_USEC		(250 * 1000)
69856-
69857 /* IMAN - Interrupt Management Register */
69858 #define IMAN_IE		(1 << 1)
69859 #define IMAN_IP		(1 << 0)
69860@@ -933,7 +930,6 @@ struct xhci_virt_ep {
69861 	 * have to restore the device state to the previous state
69862 	 */
69863 	struct xhci_ring		*new_ring;
69864-	unsigned int			err_count;
69865 	unsigned int			ep_state;
69866 #define SET_DEQ_PENDING		(1 << 0)
69867 #define EP_HALTED		(1 << 1)	/* For stall handling */
69868@@ -1422,7 +1418,7 @@ union xhci_trb {
69869 /* MFINDEX Wrap Event - microframe counter wrapped */
69870 #define TRB_MFINDEX_WRAP	39
69871 /* TRB IDs 40-47 reserved, 48-63 is vendor-defined */
69872-
69873+#define TRB_VENDOR_DEFINED_LOW	48
69874 /* Nec vendor-specific command completion event. */
69875 #define	TRB_NEC_CMD_COMP	48
69876 /* Get NEC firmware revision. */
69877@@ -1542,17 +1538,27 @@ struct xhci_segment {
69878 	unsigned int		bounce_len;
69879 };
69880 
69881+enum xhci_cancelled_td_status {
69882+	TD_DIRTY = 0,
69883+	TD_HALTED,
69884+	TD_CLEARING_CACHE,
69885+	TD_CLEARED,
69886+};
69887+
69888 struct xhci_td {
69889 	struct list_head	td_list;
69890 	struct list_head	cancelled_td_list;
69891 	int			status;
69892+	enum xhci_cancelled_td_status	cancel_status;
69893 	struct urb		*urb;
69894 	struct xhci_segment	*start_seg;
69895 	union xhci_trb		*first_trb;
69896 	union xhci_trb		*last_trb;
69897+	struct xhci_segment	*last_trb_seg;
69898 	struct xhci_segment	*bounce_seg;
69899 	/* actual_length of the URB has already been set */
69900 	bool			urb_length_set;
69901+	unsigned int		num_trbs;
69902 };
69903 
69904 /* xHCI command default timeout value */
69905@@ -1564,13 +1570,6 @@ struct xhci_cd {
69906 	union xhci_trb		*cmd_trb;
69907 };
69908 
69909-struct xhci_dequeue_state {
69910-	struct xhci_segment *new_deq_seg;
69911-	union xhci_trb *new_deq_ptr;
69912-	int new_cycle_state;
69913-	unsigned int stream_id;
69914-};
69915-
69916 enum xhci_ring_type {
69917 	TYPE_CTRL = 0,
69918 	TYPE_ISOC,
69919@@ -1617,6 +1616,7 @@ struct xhci_ring {
69920 	 * if we own the TRB (if we are the consumer).  See section 4.9.1.
69921 	 */
69922 	u32			cycle_state;
69923+	unsigned int            err_count;
69924 	unsigned int		stream_id;
69925 	unsigned int		num_segs;
69926 	unsigned int		num_trbs_free;
69927@@ -1728,7 +1728,6 @@ struct xhci_port {
69928 	int			hcd_portnum;
69929 	struct xhci_hub		*rhub;
69930 	struct xhci_port_cap	*port_cap;
69931-	unsigned int		lpm_incapable:1;
69932 };
69933 
69934 struct xhci_hub {
69935@@ -1821,7 +1820,7 @@ struct xhci_hcd {
69936 
69937 	/* Host controller watchdog timer structures */
69938 	unsigned int		xhc_state;
69939-	unsigned long		run_graceperiod;
69940+
69941 	u32			command;
69942 	struct s3_save		s3;
69943 /* Host controller is dying - not responding to commands. "I'm not dead yet!"
69944@@ -1893,8 +1892,7 @@ struct xhci_hcd {
69945 #define XHCI_SG_TRB_CACHE_SIZE_QUIRK	BIT_ULL(39)
69946 #define XHCI_NO_SOFT_RETRY	BIT_ULL(40)
69947 #define XHCI_EP_CTX_BROKEN_DCS	BIT_ULL(42)
69948-#define XHCI_SUSPEND_RESUME_CLKS	BIT_ULL(43)
69949-#define XHCI_RESET_TO_DEFAULT	BIT_ULL(44)
69950+#define XHCI_U2_BROKEN_SUSPEND	BIT_ULL(43)
69951 
69952 	unsigned int		num_active_eps;
69953 	unsigned int		limit_active_eps;
69954@@ -1923,6 +1921,9 @@ struct xhci_hcd {
69955 	struct list_head	regset_list;
69956 
69957 	void			*dbc;
69958+
69959+	struct xhci_vendor_ops *vendor_ops;
69960+
69961 	/* platform-specific data -- must come last */
69962 	unsigned long		priv[] __aligned(sizeof(s64));
69963 };
69964@@ -1932,10 +1933,15 @@ struct xhci_driver_overrides {
69965 	size_t extra_priv_size;
69966 	int (*reset)(struct usb_hcd *hcd);
69967 	int (*start)(struct usb_hcd *hcd);
69968+	int (*add_endpoint)(struct usb_hcd *hcd, struct usb_device *udev,
69969+			    struct usb_host_endpoint *ep);
69970+	int (*drop_endpoint)(struct usb_hcd *hcd, struct usb_device *udev,
69971+			     struct usb_host_endpoint *ep);
69972 	int (*check_bandwidth)(struct usb_hcd *, struct usb_device *);
69973 	void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
69974-	int (*update_hub_device)(struct usb_hcd *hcd, struct usb_device *hdev,
69975-			    struct usb_tt *tt, gfp_t mem_flags);
69976+	int (*address_device)(struct usb_hcd *hcd, struct usb_device *udev);
69977+	int (*bus_suspend)(struct usb_hcd *hcd);
69978+	int (*bus_resume)(struct usb_hcd *hcd);
69979 };
69980 
69981 #define	XHCI_CFC_DELAY		10
69982@@ -2062,10 +2068,6 @@ void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
69983 struct xhci_ring *xhci_dma_to_transfer_ring(
69984 		struct xhci_virt_ep *ep,
69985 		u64 address);
69986-struct xhci_ring *xhci_stream_id_to_ring(
69987-		struct xhci_virt_device *dev,
69988-		unsigned int ep_index,
69989-		unsigned int stream_id);
69990 struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
69991 		bool allocate_completion, gfp_t mem_flags);
69992 struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci,
69993@@ -2080,20 +2082,23 @@ void xhci_free_container_ctx(struct xhci_hcd *xhci,
69994 
69995 /* xHCI host controller glue */
69996 typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
69997-int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us);
69998+int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec);
69999 void xhci_quiesce(struct xhci_hcd *xhci);
70000 int xhci_halt(struct xhci_hcd *xhci);
70001 int xhci_start(struct xhci_hcd *xhci);
70002-int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us);
70003+int xhci_reset(struct xhci_hcd *xhci);
70004 int xhci_run(struct usb_hcd *hcd);
70005 int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
70006 void xhci_shutdown(struct usb_hcd *hcd);
70007 void xhci_init_driver(struct hc_driver *drv,
70008 		      const struct xhci_driver_overrides *over);
70009+int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
70010+		      struct usb_host_endpoint *ep);
70011+int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
70012+		       struct usb_host_endpoint *ep);
70013 int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
70014 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
70015-int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
70016-			   struct usb_tt *tt, gfp_t mem_flags);
70017+int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev);
70018 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
70019 int xhci_ext_cap_init(struct xhci_hcd *xhci);
70020 
70021@@ -2141,13 +2146,6 @@ int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
70022 		enum xhci_ep_reset_type reset_type);
70023 int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
70024 		u32 slot_id);
70025-void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
70026-		unsigned int slot_id, unsigned int ep_index,
70027-		unsigned int stream_id, struct xhci_td *cur_td,
70028-		struct xhci_dequeue_state *state);
70029-void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
70030-		unsigned int slot_id, unsigned int ep_index,
70031-		struct xhci_dequeue_state *deq_state);
70032 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id,
70033 			       unsigned int ep_index, unsigned int stream_id,
70034 			       struct xhci_td *td);
70035@@ -2208,6 +2206,53 @@ static inline struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
70036 					urb->stream_id);
70037 }
70038 
70039+/**
70040+ * struct xhci_vendor_ops - function callbacks for vendor specific operations
70041+ * @vendor_init: called for vendor init process
70042+ * @vendor_cleanup: called for vendor cleanup process
70043+ * @is_usb_offload_enabled: called to check if usb offload enabled
70044+ * @queue_irq_work: called to queue vendor specific irq work
70045+ * @alloc_dcbaa: called when allocating vendor specific dcbaa
70046+ * @free_dcbaa: called to free vendor specific dcbaa
70047+ * @alloc_transfer_ring: called when remote transfer ring allocation is required
70048+ * @free_transfer_ring: called to free vendor specific transfer ring
70049+ * @sync_dev_ctx: called when synchronization for device context is required
70050+ * @alloc_container_ctx: called when allocating vendor specific container context
70051+ * @free_container_ctx: called to free vendor specific container context
70052+ */
70053+struct xhci_vendor_ops {
70054+	int (*vendor_init)(struct xhci_hcd *xhci);
70055+	void (*vendor_cleanup)(struct xhci_hcd *xhci);
70056+	bool (*is_usb_offload_enabled)(struct xhci_hcd *xhci,
70057+				       struct xhci_virt_device *vdev,
70058+				       unsigned int ep_index);
70059+	irqreturn_t (*queue_irq_work)(struct xhci_hcd *xhci);
70060+
70061+	struct xhci_device_context_array *(*alloc_dcbaa)(struct xhci_hcd *xhci,
70062+							 gfp_t flags);
70063+	void (*free_dcbaa)(struct xhci_hcd *xhci);
70064+
70065+	struct xhci_ring *(*alloc_transfer_ring)(struct xhci_hcd *xhci,
70066+			u32 endpoint_type, enum xhci_ring_type ring_type,
70067+			unsigned int max_packet, gfp_t mem_flags);
70068+	void (*free_transfer_ring)(struct xhci_hcd *xhci,
70069+			struct xhci_virt_device *virt_dev, unsigned int ep_index);
70070+	int (*sync_dev_ctx)(struct xhci_hcd *xhci, unsigned int slot_id);
70071+	bool (*usb_offload_skip_urb)(struct xhci_hcd *xhci, struct urb *urb);
70072+	void (*alloc_container_ctx)(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
70073+				    int type, gfp_t flags);
70074+	void (*free_container_ctx)(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
70075+};
70076+
70077+struct xhci_vendor_ops *xhci_vendor_get_ops(struct xhci_hcd *xhci);
70078+
70079+int xhci_vendor_sync_dev_ctx(struct xhci_hcd *xhci, unsigned int slot_id);
70080+bool xhci_vendor_usb_offload_skip_urb(struct xhci_hcd *xhci, struct urb *urb);
70081+void xhci_vendor_free_transfer_ring(struct xhci_hcd *xhci,
70082+		struct xhci_virt_device *virt_dev, unsigned int ep_index);
70083+bool xhci_vendor_is_usb_offload_enabled(struct xhci_hcd *xhci,
70084+		struct xhci_virt_device *virt_dev, unsigned int ep_index);
70085+
70086 /*
70087  * TODO: As per spec Isochronous IDT transmissions are supported. We bypass
70088  * them anyways as we where unable to find a device that matches the
70089@@ -2392,7 +2437,7 @@ static inline const char *xhci_decode_trb(char *str, size_t size,
70090 			field3 & TRB_CYCLE ? 'C' : 'c');
70091 		break;
70092 	case TRB_STOP_RING:
70093-		snprintf(str, size,
70094+		sprintf(str,
70095 			"%s: slot %d sp %d ep %d flags %c",
70096 			xhci_trb_type_string(type),
70097 			TRB_TO_SLOT_ID(field3),
70098@@ -2469,8 +2514,6 @@ static inline const char *xhci_decode_ctrl_ctx(char *str,
70099 	unsigned int	bit;
70100 	int		ret = 0;
70101 
70102-	str[0] = '\0';
70103-
70104 	if (drop) {
70105 		ret = sprintf(str, "Drop:");
70106 		for_each_set_bit(bit, &drop, 32)
70107@@ -2628,11 +2671,8 @@ static inline const char *xhci_decode_usbsts(char *str, u32 usbsts)
70108 {
70109 	int ret = 0;
70110 
70111-	ret = sprintf(str, " 0x%08x", usbsts);
70112-
70113 	if (usbsts == ~(u32)0)
70114-		return str;
70115-
70116+		return " 0xffffffff";
70117 	if (usbsts & STS_HALT)
70118 		ret += sprintf(str + ret, " HCHalted");
70119 	if (usbsts & STS_FATAL)
70120diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
70121index 9ee0fa775..57e54064f 100644
70122--- a/drivers/usb/serial/cp210x.c
70123+++ b/drivers/usb/serial/cp210x.c
70124@@ -245,7 +245,6 @@ static const struct usb_device_id id_table[] = {
70125 	{ USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */
70126 	{ USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */
70127 	{ USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */
70128-	{ USB_DEVICE(0x2184, 0x0030) }, /* GW Instek GDM-834x Digital Multimeter */
70129 	{ USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */
70130 	{ USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
70131 	{ USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
70132@@ -269,7 +268,6 @@ struct cp210x_serial_private {
70133 	speed_t			min_speed;
70134 	speed_t			max_speed;
70135 	bool			use_actual_rate;
70136-	bool			no_event_mode;
70137 };
70138 
70139 enum cp210x_event_state {
70140@@ -1341,16 +1339,12 @@ static void cp210x_change_speed(struct tty_struct *tty,
70141 
70142 static void cp210x_enable_event_mode(struct usb_serial_port *port)
70143 {
70144-	struct cp210x_serial_private *priv = usb_get_serial_data(port->serial);
70145 	struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
70146 	int ret;
70147 
70148 	if (port_priv->event_mode)
70149 		return;
70150 
70151-	if (priv->no_event_mode)
70152-		return;
70153-
70154 	port_priv->event_state = ES_DATA;
70155 	port_priv->event_mode = true;
70156 
70157@@ -1758,8 +1752,6 @@ static int cp2105_gpioconf_init(struct usb_serial *serial)
70158 
70159 	/*  2 banks of GPIO - One for the pins taken from each serial port */
70160 	if (intf_num == 0) {
70161-		priv->gc.ngpio = 2;
70162-
70163 		if (mode.eci == CP210X_PIN_MODE_MODEM) {
70164 			/* mark all GPIOs of this interface as reserved */
70165 			priv->gpio_altfunc = 0xff;
70166@@ -1770,9 +1762,8 @@ static int cp2105_gpioconf_init(struct usb_serial *serial)
70167 		priv->gpio_pushpull = (u8)((le16_to_cpu(config.gpio_mode) &
70168 						CP210X_ECI_GPIO_MODE_MASK) >>
70169 						CP210X_ECI_GPIO_MODE_OFFSET);
70170+		priv->gc.ngpio = 2;
70171 	} else if (intf_num == 1) {
70172-		priv->gc.ngpio = 3;
70173-
70174 		if (mode.sci == CP210X_PIN_MODE_MODEM) {
70175 			/* mark all GPIOs of this interface as reserved */
70176 			priv->gpio_altfunc = 0xff;
70177@@ -1783,6 +1774,7 @@ static int cp2105_gpioconf_init(struct usb_serial *serial)
70178 		priv->gpio_pushpull = (u8)((le16_to_cpu(config.gpio_mode) &
70179 						CP210X_SCI_GPIO_MODE_MASK) >>
70180 						CP210X_SCI_GPIO_MODE_OFFSET);
70181+		priv->gc.ngpio = 3;
70182 	} else {
70183 		return -ENODEV;
70184 	}
70185@@ -2102,46 +2094,6 @@ static void cp210x_init_max_speed(struct usb_serial *serial)
70186 	priv->use_actual_rate = use_actual_rate;
70187 }
70188 
70189-static void cp2102_determine_quirks(struct usb_serial *serial)
70190-{
70191-	struct cp210x_serial_private *priv = usb_get_serial_data(serial);
70192-	u8 *buf;
70193-	int ret;
70194-
70195-	buf = kmalloc(2, GFP_KERNEL);
70196-	if (!buf)
70197-		return;
70198-	/*
70199-	 * Some (possibly counterfeit) CP2102 do not support event-insertion
70200-	 * mode and respond differently to malformed vendor requests.
70201-	 * Specifically, they return one instead of two bytes when sent a
70202-	 * two-byte part-number request.
70203-	 */
70204-	ret = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
70205-			CP210X_VENDOR_SPECIFIC, REQTYPE_DEVICE_TO_HOST,
70206-			CP210X_GET_PARTNUM, 0, buf, 2, USB_CTRL_GET_TIMEOUT);
70207-	if (ret == 1) {
70208-		dev_dbg(&serial->interface->dev,
70209-				"device does not support event-insertion mode\n");
70210-		priv->no_event_mode = true;
70211-	}
70212-
70213-	kfree(buf);
70214-}
70215-
70216-static void cp210x_determine_quirks(struct usb_serial *serial)
70217-{
70218-	struct cp210x_serial_private *priv = usb_get_serial_data(serial);
70219-
70220-	switch (priv->partnum) {
70221-	case CP210X_PARTNUM_CP2102:
70222-		cp2102_determine_quirks(serial);
70223-		break;
70224-	default:
70225-		break;
70226-	}
70227-}
70228-
70229 static int cp210x_attach(struct usb_serial *serial)
70230 {
70231 	int result;
70232@@ -2162,7 +2114,6 @@ static int cp210x_attach(struct usb_serial *serial)
70233 
70234 	usb_set_serial_data(serial, priv);
70235 
70236-	cp210x_determine_quirks(serial);
70237 	cp210x_init_max_speed(serial);
70238 
70239 	result = cp210x_gpio_init(serial);
70240diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
70241index 14a7af7f3..21b1488fe 100644
70242--- a/drivers/usb/serial/option.c
70243+++ b/drivers/usb/serial/option.c
70244@@ -162,8 +162,6 @@ static void option_instat_callback(struct urb *urb);
70245 #define NOVATELWIRELESS_PRODUCT_G2		0xA010
70246 #define NOVATELWIRELESS_PRODUCT_MC551		0xB001
70247 
70248-#define UBLOX_VENDOR_ID				0x1546
70249-
70250 /* AMOI PRODUCTS */
70251 #define AMOI_VENDOR_ID				0x1614
70252 #define AMOI_PRODUCT_H01			0x0800
70253@@ -200,8 +198,6 @@ static void option_instat_callback(struct urb *urb);
70254 
70255 #define DELL_PRODUCT_5821E			0x81d7
70256 #define DELL_PRODUCT_5821E_ESIM			0x81e0
70257-#define DELL_PRODUCT_5829E_ESIM			0x81e4
70258-#define DELL_PRODUCT_5829E			0x81e6
70259 
70260 #define KYOCERA_VENDOR_ID			0x0c88
70261 #define KYOCERA_PRODUCT_KPC650			0x17da
70262@@ -242,6 +238,7 @@ static void option_instat_callback(struct urb *urb);
70263 #define QUECTEL_PRODUCT_UC15			0x9090
70264 /* These u-blox products use Qualcomm's vendor ID */
70265 #define UBLOX_PRODUCT_R410M			0x90b2
70266+#define UBLOX_PRODUCT_R6XX			0x90fa
70267 /* These Yuga products use Qualcomm's vendor ID */
70268 #define YUGA_PRODUCT_CLM920_NC5			0x9625
70269 
70270@@ -253,21 +250,10 @@ static void option_instat_callback(struct urb *urb);
70271 #define QUECTEL_PRODUCT_EG95			0x0195
70272 #define QUECTEL_PRODUCT_BG96			0x0296
70273 #define QUECTEL_PRODUCT_EP06			0x0306
70274-#define QUECTEL_PRODUCT_EM05G			0x030a
70275-#define QUECTEL_PRODUCT_EM060K			0x030b
70276-#define QUECTEL_PRODUCT_EM05G_CS		0x030c
70277-#define QUECTEL_PRODUCT_EM05CN_SG		0x0310
70278-#define QUECTEL_PRODUCT_EM05G_SG		0x0311
70279-#define QUECTEL_PRODUCT_EM05CN			0x0312
70280-#define QUECTEL_PRODUCT_EM05G_GR		0x0313
70281-#define QUECTEL_PRODUCT_EM05G_RS		0x0314
70282 #define QUECTEL_PRODUCT_EM12			0x0512
70283 #define QUECTEL_PRODUCT_RM500Q			0x0800
70284-#define QUECTEL_PRODUCT_RM520N			0x0801
70285-#define QUECTEL_PRODUCT_EC200U			0x0901
70286 #define QUECTEL_PRODUCT_EC200S_CN		0x6002
70287 #define QUECTEL_PRODUCT_EC200T			0x6026
70288-#define QUECTEL_PRODUCT_RM500K			0x7001
70289 
70290 #define CMOTECH_VENDOR_ID			0x16d8
70291 #define CMOTECH_PRODUCT_6001			0x6001
70292@@ -402,8 +388,6 @@ static void option_instat_callback(struct urb *urb);
70293 #define LONGCHEER_VENDOR_ID			0x1c9e
70294 
70295 /* 4G Systems products */
70296-/* This one was sold as the VW and Skoda "Carstick LTE" */
70297-#define FOUR_G_SYSTEMS_PRODUCT_CARSTICK_LTE	0x7605
70298 /* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick *
70299  * It seems to contain a Qualcomm QSC6240/6290 chipset            */
70300 #define FOUR_G_SYSTEMS_PRODUCT_W14		0x9603
70301@@ -446,12 +430,6 @@ static void option_instat_callback(struct urb *urb);
70302 #define CINTERION_PRODUCT_CLS8			0x00b0
70303 #define CINTERION_PRODUCT_MV31_MBIM		0x00b3
70304 #define CINTERION_PRODUCT_MV31_RMNET		0x00b7
70305-#define CINTERION_PRODUCT_MV31_2_MBIM		0x00b8
70306-#define CINTERION_PRODUCT_MV31_2_RMNET		0x00b9
70307-#define CINTERION_PRODUCT_MV32_WA		0x00f1
70308-#define CINTERION_PRODUCT_MV32_WB		0x00f2
70309-#define CINTERION_PRODUCT_MV32_WA_RMNET		0x00f3
70310-#define CINTERION_PRODUCT_MV32_WB_RMNET		0x00f4
70311 
70312 /* Olivetti products */
70313 #define OLIVETTI_VENDOR_ID			0x0b3c
70314@@ -587,13 +565,6 @@ static void option_instat_callback(struct urb *urb);
70315 #define WETELECOM_PRODUCT_6802			0x6802
70316 #define WETELECOM_PRODUCT_WMD300		0x6803
70317 
70318-/* OPPO products */
70319-#define OPPO_VENDOR_ID				0x22d9
70320-#define OPPO_PRODUCT_R11			0x276c
70321-
70322-/* Sierra Wireless products */
70323-#define SIERRA_VENDOR_ID			0x1199
70324-#define SIERRA_PRODUCT_EM9191			0x90d3
70325 
70326 /* Device flags */
70327 
70328@@ -1092,10 +1063,6 @@ static const struct usb_device_id option_ids[] = {
70329 	  .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
70330 	{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM),
70331 	  .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
70332-	{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E),
70333-	  .driver_info = RSVD(0) | RSVD(6) },
70334-	{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E_ESIM),
70335-	  .driver_info = RSVD(0) | RSVD(6) },
70336 	{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) },	/* ADU-E100, ADU-310 */
70337 	{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
70338 	{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
70339@@ -1137,16 +1104,8 @@ static const struct usb_device_id option_ids[] = {
70340 	/* u-blox products using Qualcomm vendor ID */
70341 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
70342 	  .driver_info = RSVD(1) | RSVD(3) },
70343-	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x908b),	/* u-blox LARA-R6 00B */
70344-	  .driver_info = RSVD(4) },
70345-	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x90fa),
70346+	{ USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R6XX),
70347 	  .driver_info = RSVD(3) },
70348-	/* u-blox products */
70349-	{ USB_DEVICE(UBLOX_VENDOR_ID, 0x1341) },	/* u-blox LARA-L6 */
70350-	{ USB_DEVICE(UBLOX_VENDOR_ID, 0x1342),		/* u-blox LARA-L6 (RMNET) */
70351-	  .driver_info = RSVD(4) },
70352-	{ USB_DEVICE(UBLOX_VENDOR_ID, 0x1343),		/* u-blox LARA-L6 (ECM) */
70353-	  .driver_info = RSVD(4) },
70354 	/* Quectel products using Quectel vendor ID */
70355 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff),
70356 	  .driver_info = NUMEP2 },
70357@@ -1160,48 +1119,22 @@ static const struct usb_device_id option_ids[] = {
70358 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff),
70359 	  .driver_info = NUMEP2 },
70360 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) },
70361-	{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0203, 0xff), /* BG95-M3 */
70362-	  .driver_info = ZLP },
70363 	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
70364 	  .driver_info = RSVD(4) },
70365 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
70366 	  .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
70367 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
70368-	{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05CN, 0xff),
70369-	  .driver_info = RSVD(6) | ZLP },
70370-	{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05CN_SG, 0xff),
70371-	  .driver_info = RSVD(6) | ZLP },
70372-	{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff),
70373-	  .driver_info = RSVD(6) | ZLP },
70374-	{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_GR, 0xff),
70375-	  .driver_info = RSVD(6) | ZLP },
70376-	{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_CS, 0xff),
70377-	  .driver_info = RSVD(6) | ZLP },
70378-	{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_RS, 0xff),
70379-	  .driver_info = RSVD(6) | ZLP },
70380-	{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_SG, 0xff),
70381-	  .driver_info = RSVD(6) | ZLP },
70382-	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) },
70383-	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) },
70384-	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) },
70385 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
70386 	  .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
70387 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
70388 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0xff, 0x30) },	/* EM160R-GL */
70389 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0, 0) },
70390-	{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0700, 0xff), /* BG95 */
70391-	  .driver_info = RSVD(3) | ZLP },
70392 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
70393 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
70394 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
70395 	  .driver_info = ZLP },
70396-	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) },
70397-	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) },
70398-	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) },
70399-	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) },
70400 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
70401 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
70402-	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
70403 
70404 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
70405 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
70406@@ -1278,10 +1211,6 @@ static const struct usb_device_id option_ids[] = {
70407 	  .driver_info = NCTRL(0) | RSVD(1) },
70408 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1056, 0xff),	/* Telit FD980 */
70409 	  .driver_info = NCTRL(2) | RSVD(3) },
70410-	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1057, 0xff),	/* Telit FN980 */
70411-	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
70412-	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1058, 0xff),	/* Telit FN980 (PCIe) */
70413-	  .driver_info = NCTRL(0) | RSVD(1) },
70414 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1060, 0xff),	/* Telit LN920 (rmnet) */
70415 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
70416 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1061, 0xff),	/* Telit LN920 (MBIM) */
70417@@ -1298,8 +1227,6 @@ static const struct usb_device_id option_ids[] = {
70418 	  .driver_info = NCTRL(2) | RSVD(3) },
70419 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff),	/* Telit FN990 (ECM) */
70420 	  .driver_info = NCTRL(0) | RSVD(1) },
70421-	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff),	/* Telit FN990 (PCIe) */
70422-	  .driver_info = RSVD(0) },
70423 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
70424 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
70425 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
70426@@ -1334,7 +1261,6 @@ static const struct usb_device_id option_ids[] = {
70427 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
70428 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1231, 0xff),	/* Telit LE910Cx (RNDIS) */
70429 	  .driver_info = NCTRL(2) | RSVD(3) },
70430-	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x1250, 0xff, 0x00, 0x00) },	/* Telit LE910Cx (rmnet) */
70431 	{ USB_DEVICE(TELIT_VENDOR_ID, 0x1260),
70432 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
70433 	{ USB_DEVICE(TELIT_VENDOR_ID, 0x1261),
70434@@ -1347,16 +1273,10 @@ static const struct usb_device_id option_ids[] = {
70435 	  .driver_info = NCTRL(2) },
70436 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff),	/* Telit LE910-S1 (ECM) */
70437 	  .driver_info = NCTRL(2) },
70438-	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701a, 0xff),	/* Telit LE910R1 (RNDIS) */
70439-	  .driver_info = NCTRL(2) },
70440-	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701b, 0xff),	/* Telit LE910R1 (ECM) */
70441-	  .driver_info = NCTRL(2) },
70442 	{ USB_DEVICE(TELIT_VENDOR_ID, 0x9010),				/* Telit SBL FN980 flashing device */
70443 	  .driver_info = NCTRL(0) | ZLP },
70444 	{ USB_DEVICE(TELIT_VENDOR_ID, 0x9200),				/* Telit LE910S1 flashing device */
70445 	  .driver_info = NCTRL(0) | ZLP },
70446-	{ USB_DEVICE(TELIT_VENDOR_ID, 0x9201),				/* Telit LE910R1 flashing device */
70447-	  .driver_info = NCTRL(0) | ZLP },
70448 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
70449 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
70450 	  .driver_info = RSVD(1) },
70451@@ -1729,8 +1649,6 @@ static const struct usb_device_id option_ids[] = {
70452 	  .driver_info = RSVD(2) },
70453 	{ USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) },	/* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
70454 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */
70455-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1485, 0xff, 0xff, 0xff),  /* ZTE MF286D */
70456-	  .driver_info = RSVD(5) },
70457 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
70458 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
70459 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
70460@@ -1978,8 +1896,6 @@ static const struct usb_device_id option_ids[] = {
70461 	  .driver_info = RSVD(2) },
70462 	{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
70463 	{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
70464-	{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_CARSTICK_LTE),
70465-	  .driver_info = RSVD(0) },
70466 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
70467 	  .driver_info = NCTRL(0) | NCTRL(1) },
70468 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
70469@@ -2039,18 +1955,6 @@ static const struct usb_device_id option_ids[] = {
70470 	  .driver_info = RSVD(3)},
70471 	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff),
70472 	  .driver_info = RSVD(0)},
70473-	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_2_MBIM, 0xff),
70474-	  .driver_info = RSVD(3)},
70475-	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_2_RMNET, 0xff),
70476-	  .driver_info = RSVD(0)},
70477-	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA, 0xff),
70478-	  .driver_info = RSVD(3)},
70479-	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA_RMNET, 0xff),
70480-	  .driver_info = RSVD(0) },
70481-	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB, 0xff),
70482-	  .driver_info = RSVD(3)},
70483-	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB_RMNET, 0xff),
70484-	  .driver_info = RSVD(0) },
70485 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
70486 	  .driver_info = RSVD(4) },
70487 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
70488@@ -2193,17 +2097,12 @@ static const struct usb_device_id option_ids[] = {
70489 	  .driver_info = RSVD(3) },
70490 	{ USB_DEVICE(0x1508, 0x1001),						/* Fibocom NL668 (IOT version) */
70491 	  .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
70492-	{ USB_DEVICE(0x1782, 0x4d10) },						/* Fibocom L610 (AT mode) */
70493-	{ USB_DEVICE_INTERFACE_CLASS(0x1782, 0x4d11, 0xff) },			/* Fibocom L610 (ECM/RNDIS mode) */
70494 	{ USB_DEVICE(0x2cb7, 0x0104),						/* Fibocom NL678 series */
70495 	  .driver_info = RSVD(4) | RSVD(5) },
70496 	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff),			/* Fibocom NL678 series */
70497 	  .driver_info = RSVD(6) },
70498-	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0106, 0xff) },			/* Fibocom MA510 (ECM mode w/ diag intf.) */
70499-	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x010a, 0xff) },			/* Fibocom MA510 (ECM mode) */
70500 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) },	/* Fibocom FG150 Diag */
70501 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) },		/* Fibocom FG150 AT */
70502-	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) },			/* Fibocom FM160 (MBIM mode) */
70503 	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) },			/* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
70504 	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) },			/* Fibocom FM101-GL (laptop MBIM) */
70505 	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff),			/* Fibocom FM101-GL (laptop MBIM) */
70506@@ -2212,9 +2111,6 @@ static const struct usb_device_id option_ids[] = {
70507 	{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) },			/* GosunCn GM500 RNDIS */
70508 	{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) },			/* GosunCn GM500 MBIM */
70509 	{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) },			/* GosunCn GM500 ECM/NCM */
70510-	{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
70511-	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
70512-	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
70513 	{ } /* Terminating entry */
70514 };
70515 MODULE_DEVICE_TABLE(usb, option_ids);
70516diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
70517index b1e844bf3..c81f70530 100644
70518--- a/drivers/usb/serial/qcserial.c
70519+++ b/drivers/usb/serial/qcserial.c
70520@@ -165,9 +165,6 @@ static const struct usb_device_id id_table[] = {
70521 	{DEVICE_SWI(0x1199, 0x907b)},	/* Sierra Wireless EM74xx */
70522 	{DEVICE_SWI(0x1199, 0x9090)},	/* Sierra Wireless EM7565 QDL */
70523 	{DEVICE_SWI(0x1199, 0x9091)},	/* Sierra Wireless EM7565 */
70524-	{DEVICE_SWI(0x1199, 0x90d2)},	/* Sierra Wireless EM9191 QDL */
70525-	{DEVICE_SWI(0x1199, 0xc080)},	/* Sierra Wireless EM7590 QDL */
70526-	{DEVICE_SWI(0x1199, 0xc081)},	/* Sierra Wireless EM7590 */
70527 	{DEVICE_SWI(0x413c, 0x81a2)},	/* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
70528 	{DEVICE_SWI(0x413c, 0x81a3)},	/* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
70529 	{DEVICE_SWI(0x413c, 0x81a4)},	/* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
70530diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
70531index e5a971b83..a345f2982 100644
70532--- a/drivers/usb/storage/scsiglue.c
70533+++ b/drivers/usb/storage/scsiglue.c
70534@@ -102,6 +102,10 @@ static int slave_configure(struct scsi_device *sdev)
70535 	if (us->fflags & (US_FL_MAX_SECTORS_64 | US_FL_MAX_SECTORS_MIN)) {
70536 		unsigned int max_sectors = 64;
70537 
70538+		if (le16_to_cpu(us->pusb_dev->descriptor.idVendor) == 0x05e3 &&
70539+		    le16_to_cpu(us->pusb_dev->descriptor.idProduct) == 0x0749)
70540+			max_sectors = 128;
70541+
70542 		if (us->fflags & US_FL_MAX_SECTORS_MIN)
70543 			max_sectors = PAGE_SIZE >> 9;
70544 		if (queue_max_hw_sectors(sdev->request_queue) > max_sectors)
70545diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
70546index 20dcbccb2..8d3848322 100644
70547--- a/drivers/usb/storage/unusual_devs.h
70548+++ b/drivers/usb/storage/unusual_devs.h
70549@@ -406,16 +406,6 @@ UNUSUAL_DEV(  0x04b8, 0x0602, 0x0110, 0x0110,
70550 		"785EPX Storage",
70551 		USB_SC_SCSI, USB_PR_BULK, NULL, US_FL_SINGLE_LUN),
70552 
70553-/*
70554- * Reported by James Buren <braewoods+lkml@braewoods.net>
70555- * Virtual ISOs cannot be remounted if ejected while the device is locked
70556- * Disable locking to mimic Windows behavior that bypasses the issue
70557- */
70558-UNUSUAL_DEV(  0x04c5, 0x2028, 0x0001, 0x0001,
70559-		"iODD",
70560-		"2531/2541",
70561-		USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE),
70562-
70563 /*
70564  * Not sure who reported this originally but
70565  * Pavel Machek <pavel@ucw.cz> reported that the extra US_FL_SINGLE_LUN
70566@@ -426,16 +416,9 @@ UNUSUAL_DEV(  0x04cb, 0x0100, 0x0000, 0x2210,
70567 		USB_SC_UFI, USB_PR_DEVICE, NULL, US_FL_FIX_INQUIRY | US_FL_SINGLE_LUN),
70568 
70569 /*
70570- * Reported by Ondrej Zary <linux@zary.sk>
70571+ * Reported by Ondrej Zary <linux@rainbow-software.org>
70572  * The device reports one sector more and breaks when that sector is accessed
70573- * Firmwares older than 2.6c (the latest one and the only that claims Linux
70574- * support) have also broken tag handling
70575  */
70576-UNUSUAL_DEV(  0x04ce, 0x0002, 0x0000, 0x026b,
70577-		"ScanLogic",
70578-		"SL11R-IDE",
70579-		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
70580-		US_FL_FIX_CAPACITY | US_FL_BULK_IGNORE_TAG),
70581 UNUSUAL_DEV(  0x04ce, 0x0002, 0x026c, 0x026c,
70582 		"ScanLogic",
70583 		"SL11R-IDE",
70584@@ -927,6 +910,12 @@ UNUSUAL_DEV(  0x05e3, 0x0723, 0x9451, 0x9451,
70585 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
70586 		US_FL_SANE_SENSE ),
70587 
70588+UNUSUAL_DEV(  0x05e3, 0x0749, 0x0000, 0xffff,
70589+		"Genesys Logic",
70590+		"USB Storage",
70591+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
70592+		US_FL_GO_SLOW | US_FL_MAX_SECTORS_64 | US_FL_IGNORE_RESIDUE ),
70593+
70594 /*
70595  * Reported by Hanno Boeck <hanno@gmx.de>
70596  * Taken from the Lycoris Kernel
70597@@ -2302,16 +2291,6 @@ UNUSUAL_DEV(  0x2027, 0xa001, 0x0000, 0x9999,
70598 		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
70599 		US_FL_SCM_MULT_TARG ),
70600 
70601-/*
70602- * Reported by DocMAX <mail@vacharakis.de>
70603- * and Thomas Weißschuh <linux@weissschuh.net>
70604- */
70605-UNUSUAL_DEV( 0x2109, 0x0715, 0x9999, 0x9999,
70606-		"VIA Labs, Inc.",
70607-		"VL817 SATA Bridge",
70608-		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
70609-		US_FL_IGNORE_UAS),
70610-
70611 UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
70612 		"ST",
70613 		"2A",
70614diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
70615index c7b763d6d..34c6c33ae 100644
70616--- a/drivers/usb/storage/unusual_uas.h
70617+++ b/drivers/usb/storage/unusual_uas.h
70618@@ -50,7 +50,7 @@ UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999,
70619 		"LaCie",
70620 		"Rugged USB3-FW",
70621 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
70622-		US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
70623+		US_FL_IGNORE_UAS),
70624 
70625 /* Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */
70626 UNUSUAL_DEV(0x090c, 0x2000, 0x0000, 0x9999,
70627@@ -76,6 +76,12 @@ UNUSUAL_DEV(0x0b05, 0x1932, 0x0000, 0x9999,
70628 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
70629 		US_FL_IGNORE_UAS),
70630 
70631+UNUSUAL_DEV(0x0bc2, 0x2321, 0x0000, 0x9999,
70632+		"Seagate",
70633+		"Expansion HDD",
70634+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
70635+		US_FL_IGNORE_UAS),
70636+
70637 /* Reported-by: David Webb <djw@noc.ac.uk> */
70638 UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
70639 		"Seagate",
70640@@ -111,6 +117,12 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
70641 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
70642 		US_FL_BROKEN_FUA),
70643 
70644+UNUSUAL_DEV(0x152d, 0x0583, 0x0000, 0x9999,
70645+		"JMicron",
70646+		"JMS583",
70647+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
70648+		US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES),
70649+
70650 /* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
70651 UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999,
70652 		"PNY",
70653@@ -132,6 +144,12 @@ UNUSUAL_DEV(0x17ef, 0x3899, 0x0000, 0x9999,
70654 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
70655 		US_FL_IGNORE_UAS),
70656 
70657+UNUSUAL_DEV(0x174c, 0x55aa, 0x0000, 0x9999,
70658+		"WINTOGO",
70659+		"CHIPFANCIER",
70660+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
70661+		US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES),
70662+
70663 /* Reported-by: Hans de Goede <hdegoede@redhat.com> */
70664 UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
70665 		"VIA",
70666diff --git a/drivers/usb/typec/altmodes/Kconfig b/drivers/usb/typec/altmodes/Kconfig
70667index 60d375e9c..1a6b5e872 100644
70668--- a/drivers/usb/typec/altmodes/Kconfig
70669+++ b/drivers/usb/typec/altmodes/Kconfig
70670@@ -4,6 +4,7 @@ menu "USB Type-C Alternate Mode drivers"
70671 
70672 config TYPEC_DP_ALTMODE
70673 	tristate "DisplayPort Alternate Mode driver"
70674+	depends on DRM
70675 	help
70676 	  DisplayPort USB Type-C Alternate Mode allows DisplayPort
70677 	  displays and adapters to be attached to the USB Type-C
70678diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
70679index e8eaca5a8..8769cab04 100644
70680--- a/drivers/usb/typec/altmodes/displayport.c
70681+++ b/drivers/usb/typec/altmodes/displayport.c
70682@@ -11,12 +11,14 @@
70683 #include <linux/delay.h>
70684 #include <linux/mutex.h>
70685 #include <linux/module.h>
70686+#include <linux/property.h>
70687 #include <linux/usb/pd_vdo.h>
70688 #include <linux/usb/typec_dp.h>
70689+#include <drm/drm_connector.h>
70690 #include "displayport.h"
70691 
70692-#define DP_HEADER(_dp, cmd)		(VDO((_dp)->alt->svid, 1, cmd) | \
70693-					 VDO_OPOS(USB_TYPEC_DP_MODE))
70694+#define DP_HEADER(_dp, ver, cmd)	(VDO((_dp)->alt->svid, 1, ver, cmd)	\
70695+					 | VDO_OPOS(USB_TYPEC_DP_MODE))
70696 
70697 enum {
70698 	DP_CONF_USB,
70699@@ -45,6 +47,17 @@ enum {
70700 					 BIT(DP_PIN_ASSIGN_D) | \
70701 					 BIT(DP_PIN_ASSIGN_F))
70702 
70703+/*
70704+ * A UFP_U that uses a USB Type-C plug describes the pin assignments supported
70705+ * for the corresponding receptacle. (i.e., a UFP_D will describe the DFP_D pin
70706+ * assignments to which it connects), whereas a UFP_U that uses a USB Type-C
70707+ * receptacle describes its pin assignments directly (i.e., a UFP_D will
70708+ * describe its own UFP_D pin assignments).
70709+ */
70710+#define DP_CAP_PIN_ASSIGN(_cap_)	(((_cap_) & DP_CAP_RECEPTACLE) ? \
70711+					 DP_CAP_UFP_D_PIN_ASSIGN(_cap_) : \
70712+					 DP_CAP_DFP_D_PIN_ASSIGN(_cap_))
70713+
70714 enum dp_state {
70715 	DP_STATE_IDLE,
70716 	DP_STATE_ENTER,
70717@@ -57,11 +70,13 @@ struct dp_altmode {
70718 	struct typec_displayport_data data;
70719 
70720 	enum dp_state state;
70721+	bool hpd;
70722 
70723 	struct mutex lock; /* device lock */
70724 	struct work_struct work;
70725 	struct typec_altmode *alt;
70726 	const struct typec_altmode *port;
70727+	struct fwnode_handle *connector_fwnode;
70728 };
70729 
70730 static int dp_altmode_notify(struct dp_altmode *dp)
70731@@ -82,14 +97,10 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
70732 		return 0;
70733 	case DP_STATUS_CON_DFP_D:
70734 		conf |= DP_CONF_UFP_U_AS_DFP_D;
70735-		pin_assign = DP_CAP_UFP_D_PIN_ASSIGN(dp->alt->vdo) &
70736-			     DP_CAP_DFP_D_PIN_ASSIGN(dp->port->vdo);
70737 		break;
70738 	case DP_STATUS_CON_UFP_D:
70739 	case DP_STATUS_CON_BOTH: /* NOTE: First acting as DP source */
70740 		conf |= DP_CONF_UFP_U_AS_UFP_D;
70741-		pin_assign = DP_CAP_PIN_ASSIGN_UFP_D(dp->alt->vdo) &
70742-				 DP_CAP_PIN_ASSIGN_DFP_D(dp->port->vdo);
70743 		break;
70744 	default:
70745 		break;
70746@@ -97,6 +108,8 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
70747 
70748 	/* Determining the initial pin assignment. */
70749 	if (!DP_CONF_GET_PIN_ASSIGN(dp->data.conf)) {
70750+		pin_assign = DP_CAP_PIN_ASSIGN(dp->alt->vdo);
70751+
70752 		/* Is USB together with DP preferred */
70753 		if (dp->data.status & DP_STATUS_PREFER_MULTI_FUNC &&
70754 		    pin_assign & DP_PIN_ASSIGN_MULTI_FUNC_MASK)
70755@@ -104,6 +117,13 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
70756 		else if (pin_assign & DP_PIN_ASSIGN_DP_ONLY_MASK)
70757 			pin_assign &= DP_PIN_ASSIGN_DP_ONLY_MASK;
70758 
70759+		/*
70760+		 * DFP_U never selects Pin Assignment E when Pin Assignment C
70761+		 * and possibly Pin Assignment D are offered by the UFP_U.
70762+		 */
70763+		if (pin_assign & (BIT(DP_PIN_ASSIGN_C) | BIT(DP_PIN_ASSIGN_D)))
70764+			pin_assign &= ~BIT(DP_PIN_ASSIGN_E);
70765+
70766 		if (!pin_assign)
70767 			return -EINVAL;
70768 
70769@@ -118,6 +138,7 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
70770 static int dp_altmode_status_update(struct dp_altmode *dp)
70771 {
70772 	bool configured = !!DP_CONF_GET_PIN_ASSIGN(dp->data.conf);
70773+	bool hpd = !!(dp->data.status & DP_STATUS_HPD_STATE);
70774 	u8 con = DP_STATUS_CONNECTION(dp->data.status);
70775 	int ret = 0;
70776 
70777@@ -130,6 +151,11 @@ static int dp_altmode_status_update(struct dp_altmode *dp)
70778 		ret = dp_altmode_configure(dp, con);
70779 		if (!ret)
70780 			dp->state = DP_STATE_CONFIGURE;
70781+	} else {
70782+		if (dp->hpd != hpd) {
70783+			drm_connector_oob_hotplug_event(dp->connector_fwnode);
70784+			dp->hpd = hpd;
70785+		}
70786 	}
70787 
70788 	return ret;
70789@@ -156,9 +182,14 @@ static int dp_altmode_configured(struct dp_altmode *dp)
70790 
70791 static int dp_altmode_configure_vdm(struct dp_altmode *dp, u32 conf)
70792 {
70793-	u32 header = DP_HEADER(dp, DP_CMD_CONFIGURE);
70794+	int svdm_version = typec_altmode_get_svdm_version(dp->alt);
70795+	u32 header;
70796 	int ret;
70797 
70798+	if (svdm_version < 0)
70799+		return svdm_version;
70800+
70801+	header = DP_HEADER(dp, svdm_version, DP_CMD_CONFIGURE);
70802 	ret = typec_altmode_notify(dp->alt, TYPEC_STATE_SAFE, &dp->data);
70803 	if (ret) {
70804 		dev_err(&dp->alt->dev,
70805@@ -181,6 +212,7 @@ static int dp_altmode_configure_vdm(struct dp_altmode *dp, u32 conf)
70806 static void dp_altmode_work(struct work_struct *work)
70807 {
70808 	struct dp_altmode *dp = container_of(work, struct dp_altmode, work);
70809+	int svdm_version;
70810 	u32 header;
70811 	u32 vdo;
70812 	int ret;
70813@@ -194,7 +226,10 @@ static void dp_altmode_work(struct work_struct *work)
70814 			dev_err(&dp->alt->dev, "failed to enter mode\n");
70815 		break;
70816 	case DP_STATE_UPDATE:
70817-		header = DP_HEADER(dp, DP_CMD_STATUS_UPDATE);
70818+		svdm_version = typec_altmode_get_svdm_version(dp->alt);
70819+		if (svdm_version < 0)
70820+			break;
70821+		header = DP_HEADER(dp, svdm_version, DP_CMD_STATUS_UPDATE);
70822 		vdo = 1;
70823 		ret = typec_altmode_vdm(dp->alt, header, &vdo, 2);
70824 		if (ret)
70825@@ -409,18 +444,6 @@ static const char * const pin_assignments[] = {
70826 	[DP_PIN_ASSIGN_F] = "F",
70827 };
70828 
70829-/*
70830- * Helper function to extract a peripheral's currently supported
70831- * Pin Assignments from its DisplayPort alternate mode state.
70832- */
70833-static u8 get_current_pin_assignments(struct dp_altmode *dp)
70834-{
70835-	if (DP_CONF_CURRENTLY(dp->data.conf) == DP_CONF_DFP_D)
70836-		return DP_CAP_PIN_ASSIGN_DFP_D(dp->alt->vdo);
70837-	else
70838-		return DP_CAP_PIN_ASSIGN_UFP_D(dp->alt->vdo);
70839-}
70840-
70841 static ssize_t
70842 pin_assignment_store(struct device *dev, struct device_attribute *attr,
70843 		     const char *buf, size_t size)
70844@@ -447,7 +470,7 @@ pin_assignment_store(struct device *dev, struct device_attribute *attr,
70845 		goto out_unlock;
70846 	}
70847 
70848-	assignments = get_current_pin_assignments(dp);
70849+	assignments = DP_CAP_PIN_ASSIGN(dp->alt->vdo);
70850 
70851 	if (!(DP_CONF_GET_PIN_ASSIGN(conf) & assignments)) {
70852 		ret = -EINVAL;
70853@@ -484,7 +507,7 @@ static ssize_t pin_assignment_show(struct device *dev,
70854 
70855 	cur = get_count_order(DP_CONF_GET_PIN_ASSIGN(dp->data.conf));
70856 
70857-	assignments = get_current_pin_assignments(dp);
70858+	assignments = DP_CAP_PIN_ASSIGN(dp->alt->vdo);
70859 
70860 	for (i = 0; assignments; assignments >>= 1, i++) {
70861 		if (assignments & 1) {
70862@@ -518,16 +541,17 @@ static const struct attribute_group dp_altmode_group = {
70863 int dp_altmode_probe(struct typec_altmode *alt)
70864 {
70865 	const struct typec_altmode *port = typec_altmode_get_partner(alt);
70866+	struct fwnode_handle *fwnode;
70867 	struct dp_altmode *dp;
70868 	int ret;
70869 
70870 	/* FIXME: Port can only be DFP_U. */
70871 
70872 	/* Make sure we have compatiple pin configurations */
70873-	if (!(DP_CAP_PIN_ASSIGN_DFP_D(port->vdo) &
70874-	      DP_CAP_PIN_ASSIGN_UFP_D(alt->vdo)) &&
70875-	    !(DP_CAP_PIN_ASSIGN_UFP_D(port->vdo) &
70876-	      DP_CAP_PIN_ASSIGN_DFP_D(alt->vdo)))
70877+	if (!(DP_CAP_DFP_D_PIN_ASSIGN(port->vdo) &
70878+	      DP_CAP_UFP_D_PIN_ASSIGN(alt->vdo)) &&
70879+	    !(DP_CAP_UFP_D_PIN_ASSIGN(port->vdo) &
70880+	      DP_CAP_DFP_D_PIN_ASSIGN(alt->vdo)))
70881 		return -ENODEV;
70882 
70883 	ret = sysfs_create_group(&alt->dev.kobj, &dp_altmode_group);
70884@@ -546,6 +570,11 @@ int dp_altmode_probe(struct typec_altmode *alt)
70885 	alt->desc = "DisplayPort";
70886 	alt->ops = &dp_altmode_ops;
70887 
70888+	fwnode = dev_fwnode(alt->dev.parent->parent); /* typec_port fwnode */
70889+	dp->connector_fwnode = fwnode_find_reference(fwnode, "displayport", 0);
70890+	if (IS_ERR(dp->connector_fwnode))
70891+		dp->connector_fwnode = NULL;
70892+
70893 	typec_altmode_set_drvdata(alt, dp);
70894 
70895 	dp->state = DP_STATE_ENTER;
70896@@ -561,6 +590,13 @@ void dp_altmode_remove(struct typec_altmode *alt)
70897 
70898 	sysfs_remove_group(&alt->dev.kobj, &dp_altmode_group);
70899 	cancel_work_sync(&dp->work);
70900+
70901+	if (dp->connector_fwnode) {
70902+		if (dp->hpd)
70903+			drm_connector_oob_hotplug_event(dp->connector_fwnode);
70904+
70905+		fwnode_handle_put(dp->connector_fwnode);
70906+	}
70907 }
70908 EXPORT_SYMBOL_GPL(dp_altmode_remove);
70909 
70910diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
70911index 9d3a35b20..cb32a838e 100644
70912--- a/drivers/usb/typec/class.c
70913+++ b/drivers/usb/typec/class.c
70914@@ -11,6 +11,8 @@
70915 #include <linux/mutex.h>
70916 #include <linux/property.h>
70917 #include <linux/slab.h>
70918+#include <linux/usb/pd_vdo.h>
70919+#include <linux/android_kabi.h>
70920 
70921 #include "bus.h"
70922 
70923@@ -18,6 +20,8 @@ struct typec_plug {
70924 	struct device			dev;
70925 	enum typec_plug_index		index;
70926 	struct ida			mode_ids;
70927+	int				num_altmodes;
70928+	ANDROID_KABI_RESERVE(1);
70929 };
70930 
70931 struct typec_cable {
70932@@ -25,6 +29,8 @@ struct typec_cable {
70933 	enum typec_plug_type		type;
70934 	struct usb_pd_identity		*identity;
70935 	unsigned int			active:1;
70936+	u16				pd_revision; /* 0300H = "3.0" */
70937+	ANDROID_KABI_RESERVE(1);
70938 };
70939 
70940 struct typec_partner {
70941@@ -33,6 +39,10 @@ struct typec_partner {
70942 	struct usb_pd_identity		*identity;
70943 	enum typec_accessory		accessory;
70944 	struct ida			mode_ids;
70945+	int				num_altmodes;
70946+	u16				pd_revision; /* 0300H = "3.0" */
70947+	enum usb_pd_svdm_ver		svdm_version;
70948+	ANDROID_KABI_RESERVE(1);
70949 };
70950 
70951 struct typec_port {
70952@@ -54,6 +64,7 @@ struct typec_port {
70953 
70954 	const struct typec_capability	*cap;
70955 	const struct typec_operations   *ops;
70956+	ANDROID_KABI_RESERVE(1);
70957 };
70958 
70959 #define to_typec_port(_dev_) container_of(_dev_, struct typec_port, dev)
70960@@ -81,6 +92,29 @@ static const char * const typec_accessory_modes[] = {
70961 	[TYPEC_ACCESSORY_DEBUG]		= "debug",
70962 };
70963 
70964+/* Product types defined in USB PD Specification R3.0 V2.0 */
70965+static const char * const product_type_ufp[8] = {
70966+	[IDH_PTYPE_NOT_UFP]		= "not_ufp",
70967+	[IDH_PTYPE_HUB]			= "hub",
70968+	[IDH_PTYPE_PERIPH]		= "peripheral",
70969+	[IDH_PTYPE_PSD]			= "psd",
70970+	[IDH_PTYPE_AMA]			= "ama",
70971+};
70972+
70973+static const char * const product_type_dfp[8] = {
70974+	[IDH_PTYPE_NOT_DFP]		= "not_dfp",
70975+	[IDH_PTYPE_DFP_HUB]		= "hub",
70976+	[IDH_PTYPE_DFP_HOST]		= "host",
70977+	[IDH_PTYPE_DFP_PB]		= "power_brick",
70978+};
70979+
70980+static const char * const product_type_cable[8] = {
70981+	[IDH_PTYPE_NOT_CABLE]		= "not_cable",
70982+	[IDH_PTYPE_PCABLE]		= "passive",
70983+	[IDH_PTYPE_ACABLE]		= "active",
70984+	[IDH_PTYPE_VPD]			= "vpd",
70985+};
70986+
70987 static struct usb_pd_identity *get_pd_identity(struct device *dev)
70988 {
70989 	if (is_typec_partner(dev)) {
70990@@ -95,6 +129,32 @@ static struct usb_pd_identity *get_pd_identity(struct device *dev)
70991 	return NULL;
70992 }
70993 
70994+static const char *get_pd_product_type(struct device *dev)
70995+{
70996+	struct typec_port *port = to_typec_port(dev->parent);
70997+	struct usb_pd_identity *id = get_pd_identity(dev);
70998+	const char *ptype = NULL;
70999+
71000+	if (is_typec_partner(dev)) {
71001+		if (!id)
71002+			return NULL;
71003+
71004+		if (port->data_role == TYPEC_HOST)
71005+			ptype = product_type_ufp[PD_IDH_PTYPE(id->id_header)];
71006+		else
71007+			ptype = product_type_dfp[PD_IDH_DFP_PTYPE(id->id_header)];
71008+	} else if (is_typec_cable(dev)) {
71009+		if (id)
71010+			ptype = product_type_cable[PD_IDH_PTYPE(id->id_header)];
71011+		else
71012+			ptype = to_typec_cable(dev)->active ?
71013+				product_type_cable[IDH_PTYPE_ACABLE] :
71014+				product_type_cable[IDH_PTYPE_PCABLE];
71015+	}
71016+
71017+	return ptype;
71018+}
71019+
71020 static ssize_t id_header_show(struct device *dev, struct device_attribute *attr,
71021 			      char *buf)
71022 {
71023@@ -122,10 +182,40 @@ static ssize_t product_show(struct device *dev, struct device_attribute *attr,
71024 }
71025 static DEVICE_ATTR_RO(product);
71026 
71027+static ssize_t product_type_vdo1_show(struct device *dev, struct device_attribute *attr,
71028+				      char *buf)
71029+{
71030+	struct usb_pd_identity *id = get_pd_identity(dev);
71031+
71032+	return sysfs_emit(buf, "0x%08x\n", id->vdo[0]);
71033+}
71034+static DEVICE_ATTR_RO(product_type_vdo1);
71035+
71036+static ssize_t product_type_vdo2_show(struct device *dev, struct device_attribute *attr,
71037+				      char *buf)
71038+{
71039+	struct usb_pd_identity *id = get_pd_identity(dev);
71040+
71041+	return sysfs_emit(buf, "0x%08x\n", id->vdo[1]);
71042+}
71043+static DEVICE_ATTR_RO(product_type_vdo2);
71044+
71045+static ssize_t product_type_vdo3_show(struct device *dev, struct device_attribute *attr,
71046+				      char *buf)
71047+{
71048+	struct usb_pd_identity *id = get_pd_identity(dev);
71049+
71050+	return sysfs_emit(buf, "0x%08x\n", id->vdo[2]);
71051+}
71052+static DEVICE_ATTR_RO(product_type_vdo3);
71053+
71054 static struct attribute *usb_pd_id_attrs[] = {
71055 	&dev_attr_id_header.attr,
71056 	&dev_attr_cert_stat.attr,
71057 	&dev_attr_product.attr,
71058+	&dev_attr_product_type_vdo1.attr,
71059+	&dev_attr_product_type_vdo2.attr,
71060+	&dev_attr_product_type_vdo3.attr,
71061 	NULL
71062 };
71063 
71064@@ -139,13 +229,54 @@ static const struct attribute_group *usb_pd_id_groups[] = {
71065 	NULL,
71066 };
71067 
71068+static void typec_product_type_notify(struct device *dev)
71069+{
71070+	char *envp[2] = { };
71071+	const char *ptype;
71072+
71073+	ptype = get_pd_product_type(dev);
71074+	if (!ptype)
71075+		return;
71076+
71077+	sysfs_notify(&dev->kobj, NULL, "type");
71078+
71079+	envp[0] = kasprintf(GFP_KERNEL, "PRODUCT_TYPE=%s", ptype);
71080+	if (!envp[0])
71081+		return;
71082+
71083+	kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
71084+	kfree(envp[0]);
71085+}
71086+
71087 static void typec_report_identity(struct device *dev)
71088 {
71089 	sysfs_notify(&dev->kobj, "identity", "id_header");
71090 	sysfs_notify(&dev->kobj, "identity", "cert_stat");
71091 	sysfs_notify(&dev->kobj, "identity", "product");
71092+	sysfs_notify(&dev->kobj, "identity", "product_type_vdo1");
71093+	sysfs_notify(&dev->kobj, "identity", "product_type_vdo2");
71094+	sysfs_notify(&dev->kobj, "identity", "product_type_vdo3");
71095+	typec_product_type_notify(dev);
71096 }
71097 
71098+static ssize_t
71099+type_show(struct device *dev, struct device_attribute *attr, char *buf)
71100+{
71101+	const char *ptype;
71102+
71103+	ptype = get_pd_product_type(dev);
71104+	if (!ptype)
71105+		return 0;
71106+
71107+	return sysfs_emit(buf, "%s\n", ptype);
71108+}
71109+static DEVICE_ATTR_RO(type);
71110+
71111+static ssize_t usb_power_delivery_revision_show(struct device *dev,
71112+						struct device_attribute *attr,
71113+						char *buf);
71114+static DEVICE_ATTR_RO(usb_power_delivery_revision);
71115+
71116 /* ------------------------------------------------------------------------- */
71117 /* Alternate Modes */
71118 
71119@@ -382,7 +513,7 @@ static umode_t typec_altmode_attr_is_visible(struct kobject *kobj,
71120 	return attr->mode;
71121 }
71122 
71123-static struct attribute_group typec_altmode_group = {
71124+static const struct attribute_group typec_altmode_group = {
71125 	.is_visible = typec_altmode_attr_is_visible,
71126 	.attrs = typec_altmode_attrs,
71127 };
71128@@ -484,6 +615,10 @@ typec_register_altmode(struct device *parent,
71129 	if (is_typec_partner(parent))
71130 		alt->adev.dev.bus = &typec_bus;
71131 
71132+	/* Plug alt modes need a class to generate udev events. */
71133+	if (is_typec_plug(parent))
71134+		alt->adev.dev.class = typec_class;
71135+
71136 	ret = device_register(&alt->adev.dev);
71137 	if (ret) {
71138 		dev_err(parent, "failed to register alternate mode (%d)\n",
71139@@ -534,12 +669,61 @@ static ssize_t supports_usb_power_delivery_show(struct device *dev,
71140 }
71141 static DEVICE_ATTR_RO(supports_usb_power_delivery);
71142 
71143+static ssize_t number_of_alternate_modes_show(struct device *dev, struct device_attribute *attr,
71144+					      char *buf)
71145+{
71146+	struct typec_partner *partner;
71147+	struct typec_plug *plug;
71148+	int num_altmodes;
71149+
71150+	if (is_typec_partner(dev)) {
71151+		partner = to_typec_partner(dev);
71152+		num_altmodes = partner->num_altmodes;
71153+	} else if (is_typec_plug(dev)) {
71154+		plug = to_typec_plug(dev);
71155+		num_altmodes = plug->num_altmodes;
71156+	} else {
71157+		return 0;
71158+	}
71159+
71160+	return sysfs_emit(buf, "%d\n", num_altmodes);
71161+}
71162+static DEVICE_ATTR_RO(number_of_alternate_modes);
71163+
71164 static struct attribute *typec_partner_attrs[] = {
71165 	&dev_attr_accessory_mode.attr,
71166 	&dev_attr_supports_usb_power_delivery.attr,
71167+	&dev_attr_number_of_alternate_modes.attr,
71168+	&dev_attr_type.attr,
71169+	&dev_attr_usb_power_delivery_revision.attr,
71170+	NULL
71171+};
71172+
71173+static umode_t typec_partner_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n)
71174+{
71175+	struct typec_partner *partner = to_typec_partner(kobj_to_dev(kobj));
71176+
71177+	if (attr == &dev_attr_number_of_alternate_modes.attr) {
71178+		if (partner->num_altmodes < 0)
71179+			return 0;
71180+	}
71181+
71182+	if (attr == &dev_attr_type.attr)
71183+		if (!get_pd_product_type(kobj_to_dev(kobj)))
71184+			return 0;
71185+
71186+	return attr->mode;
71187+}
71188+
71189+static const struct attribute_group typec_partner_group = {
71190+	.is_visible = typec_partner_attr_is_visible,
71191+	.attrs = typec_partner_attrs
71192+};
71193+
71194+static const struct attribute_group *typec_partner_groups[] = {
71195+	&typec_partner_group,
71196 	NULL
71197 };
71198-ATTRIBUTE_GROUPS(typec_partner);
71199 
71200 static void typec_partner_release(struct device *dev)
71201 {
71202@@ -572,6 +756,61 @@ int typec_partner_set_identity(struct typec_partner *partner)
71203 }
71204 EXPORT_SYMBOL_GPL(typec_partner_set_identity);
71205 
71206+/**
71207+ * typec_partner_set_pd_revision - Set the PD revision supported by the partner
71208+ * @partner: The partner to be updated.
71209+ * @pd_revision:  USB Power Delivery Specification Revision supported by partner
71210+ *
71211+ * This routine is used to report that the PD revision of the port partner has
71212+ * become available.
71213+ */
71214+void typec_partner_set_pd_revision(struct typec_partner *partner, u16 pd_revision)
71215+{
71216+	if (partner->pd_revision == pd_revision)
71217+		return;
71218+
71219+	partner->pd_revision = pd_revision;
71220+	sysfs_notify(&partner->dev.kobj, NULL, "usb_power_delivery_revision");
71221+	if (pd_revision != 0 && !partner->usb_pd) {
71222+		partner->usb_pd = 1;
71223+		sysfs_notify(&partner->dev.kobj, NULL,
71224+			     "supports_usb_power_delivery");
71225+	}
71226+	kobject_uevent(&partner->dev.kobj, KOBJ_CHANGE);
71227+}
71228+EXPORT_SYMBOL_GPL(typec_partner_set_pd_revision);
71229+
71230+/**
71231+ * typec_partner_set_num_altmodes - Set the number of available partner altmodes
71232+ * @partner: The partner to be updated.
71233+ * @num_altmodes: The number of altmodes we want to specify as available.
71234+ *
71235+ * This routine is used to report the number of alternate modes supported by the
71236+ * partner. This value is *not* enforced in alternate mode registration routines.
71237+ *
71238+ * @partner.num_altmodes is set to -1 on partner registration, denoting that
71239+ * a valid value has not been set for it yet.
71240+ *
71241+ * Returns 0 on success or negative error number on failure.
71242+ */
71243+int typec_partner_set_num_altmodes(struct typec_partner *partner, int num_altmodes)
71244+{
71245+	int ret;
71246+
71247+	if (num_altmodes < 0)
71248+		return -EINVAL;
71249+
71250+	partner->num_altmodes = num_altmodes;
71251+	ret = sysfs_update_group(&partner->dev.kobj, &typec_partner_group);
71252+	if (ret < 0)
71253+		return ret;
71254+
71255+	sysfs_notify(&partner->dev.kobj, NULL, "number_of_alternate_modes");
71256+
71257+	return 0;
71258+}
71259+EXPORT_SYMBOL_GPL(typec_partner_set_num_altmodes);
71260+
71261 /**
71262  * typec_partner_register_altmode - Register USB Type-C Partner Alternate Mode
71263  * @partner: USB Type-C Partner that supports the alternate mode
71264@@ -592,6 +831,20 @@ typec_partner_register_altmode(struct typec_partner *partner,
71265 }
71266 EXPORT_SYMBOL_GPL(typec_partner_register_altmode);
71267 
71268+/**
71269+ * typec_partner_set_svdm_version - Set negotiated Structured VDM (SVDM) Version
71270+ * @partner: USB Type-C Partner that supports SVDM
71271+ * @svdm_version: Negotiated SVDM Version
71272+ *
71273+ * This routine is used to save the negotiated SVDM Version.
71274+ */
71275+void typec_partner_set_svdm_version(struct typec_partner *partner,
71276+				   enum usb_pd_svdm_ver svdm_version)
71277+{
71278+	partner->svdm_version = svdm_version;
71279+}
71280+EXPORT_SYMBOL_GPL(typec_partner_set_svdm_version);
71281+
71282 /**
71283  * typec_register_partner - Register a USB Type-C Partner
71284  * @port: The USB Type-C Port the partner is connected to
71285@@ -614,6 +867,9 @@ struct typec_partner *typec_register_partner(struct typec_port *port,
71286 	ida_init(&partner->mode_ids);
71287 	partner->usb_pd = desc->usb_pd;
71288 	partner->accessory = desc->accessory;
71289+	partner->num_altmodes = -1;
71290+	partner->pd_revision = desc->pd_revision;
71291+	partner->svdm_version = port->cap->svdm_version;
71292 
71293 	if (desc->identity) {
71294 		/*
71295@@ -664,11 +920,70 @@ static void typec_plug_release(struct device *dev)
71296 	kfree(plug);
71297 }
71298 
71299+static struct attribute *typec_plug_attrs[] = {
71300+	&dev_attr_number_of_alternate_modes.attr,
71301+	NULL
71302+};
71303+
71304+static umode_t typec_plug_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n)
71305+{
71306+	struct typec_plug *plug = to_typec_plug(kobj_to_dev(kobj));
71307+
71308+	if (attr == &dev_attr_number_of_alternate_modes.attr) {
71309+		if (plug->num_altmodes < 0)
71310+			return 0;
71311+	}
71312+
71313+	return attr->mode;
71314+}
71315+
71316+static const struct attribute_group typec_plug_group = {
71317+	.is_visible = typec_plug_attr_is_visible,
71318+	.attrs = typec_plug_attrs
71319+};
71320+
71321+static const struct attribute_group *typec_plug_groups[] = {
71322+	&typec_plug_group,
71323+	NULL
71324+};
71325+
71326 static const struct device_type typec_plug_dev_type = {
71327 	.name = "typec_plug",
71328+	.groups = typec_plug_groups,
71329 	.release = typec_plug_release,
71330 };
71331 
71332+/**
71333+ * typec_plug_set_num_altmodes - Set the number of available plug altmodes
71334+ * @plug: The plug to be updated.
71335+ * @num_altmodes: The number of altmodes we want to specify as available.
71336+ *
71337+ * This routine is used to report the number of alternate modes supported by the
71338+ * plug. This value is *not* enforced in alternate mode registration routines.
71339+ *
71340+ * @plug.num_altmodes is set to -1 on plug registration, denoting that
71341+ * a valid value has not been set for it yet.
71342+ *
71343+ * Returns 0 on success or negative error number on failure.
71344+ */
71345+int typec_plug_set_num_altmodes(struct typec_plug *plug, int num_altmodes)
71346+{
71347+	int ret;
71348+
71349+	if (num_altmodes < 0)
71350+		return -EINVAL;
71351+
71352+	plug->num_altmodes = num_altmodes;
71353+	ret = sysfs_update_group(&plug->dev.kobj, &typec_plug_group);
71354+	if (ret < 0)
71355+		return ret;
71356+
71357+	sysfs_notify(&plug->dev.kobj, NULL, "number_of_alternate_modes");
71358+
71359+	return 0;
71360+}
71361+EXPORT_SYMBOL_GPL(typec_plug_set_num_altmodes);
71362+
71363 /**
71364  * typec_plug_register_altmode - Register USB Type-C Cable Plug Alternate Mode
71365  * @plug: USB Type-C Cable Plug that supports the alternate mode
71366@@ -714,6 +1029,7 @@ struct typec_plug *typec_register_plug(struct typec_cable *cable,
71367 	sprintf(name, "plug%d", desc->index);
71368 
71369 	ida_init(&plug->mode_ids);
71370+	plug->num_altmodes = -1;
71371 	plug->index = desc->index;
71372 	plug->dev.class = typec_class;
71373 	plug->dev.parent = &cable->dev;
71374@@ -746,15 +1062,6 @@ EXPORT_SYMBOL_GPL(typec_unregister_plug);
71375 
71376 /* Type-C Cables */
71377 
71378-static ssize_t
71379-type_show(struct device *dev, struct device_attribute *attr, char *buf)
71380-{
71381-	struct typec_cable *cable = to_typec_cable(dev);
71382-
71383-	return sprintf(buf, "%s\n", cable->active ? "active" : "passive");
71384-}
71385-static DEVICE_ATTR_RO(type);
71386-
71387 static const char * const typec_plug_types[] = {
71388 	[USB_PLUG_NONE]		= "unknown",
71389 	[USB_PLUG_TYPE_A]	= "type-a",
71390@@ -775,6 +1082,7 @@ static DEVICE_ATTR_RO(plug_type);
71391 static struct attribute *typec_cable_attrs[] = {
71392 	&dev_attr_type.attr,
71393 	&dev_attr_plug_type.attr,
71394+	&dev_attr_usb_power_delivery_revision.attr,
71395 	NULL
71396 };
71397 ATTRIBUTE_GROUPS(typec_cable);
71398@@ -877,6 +1185,7 @@ struct typec_cable *typec_register_cable(struct typec_port *port,
71399 
71400 	cable->type = desc->type;
71401 	cable->active = desc->active;
71402+	cable->pd_revision = desc->pd_revision;
71403 
71404 	if (desc->identity) {
71405 		/*
71406@@ -1246,11 +1555,23 @@ static ssize_t usb_power_delivery_revision_show(struct device *dev,
71407 						struct device_attribute *attr,
71408 						char *buf)
71409 {
71410-	struct typec_port *p = to_typec_port(dev);
71411+	u16 rev = 0;
71412 
71413-	return sprintf(buf, "%d\n", (p->cap->pd_revision >> 8) & 0xff);
71414+	if (is_typec_partner(dev)) {
71415+		struct typec_partner *partner = to_typec_partner(dev);
71416+
71417+		rev = partner->pd_revision;
71418+	} else if (is_typec_cable(dev)) {
71419+		struct typec_cable *cable = to_typec_cable(dev);
71420+
71421+		rev = cable->pd_revision;
71422+	} else if (is_typec_port(dev)) {
71423+		struct typec_port *p = to_typec_port(dev);
71424+
71425+		rev = p->cap->pd_revision;
71426+	}
71427+	return sysfs_emit(buf, "%d.%d\n", (rev >> 8) & 0xff, (rev >> 4) & 0xf);
71428 }
71429-static DEVICE_ATTR_RO(usb_power_delivery_revision);
71430 
71431 static ssize_t orientation_show(struct device *dev,
71432 				   struct device_attribute *attr,
71433@@ -1311,7 +1632,7 @@ static umode_t typec_attr_is_visible(struct kobject *kobj,
71434 	return attr->mode;
71435 }
71436 
71437-static struct attribute_group typec_group = {
71438+static const struct attribute_group typec_group = {
71439 	.is_visible = typec_attr_is_visible,
71440 	.attrs = typec_attrs,
71441 };
71442@@ -1354,6 +1675,11 @@ const struct device_type typec_port_dev_type = {
71443 /* --------------------------------------- */
71444 /* Driver callbacks to report role updates */
71445 
71446+static int partner_match(struct device *dev, void *data)
71447+{
71448+	return is_typec_partner(dev);
71449+}
71450+
71451 /**
71452  * typec_set_data_role - Report data role change
71453  * @port: The USB Type-C Port where the role was changed
71454@@ -1363,12 +1689,23 @@ const struct device_type typec_port_dev_type = {
71455  */
71456 void typec_set_data_role(struct typec_port *port, enum typec_data_role role)
71457 {
71458+	struct device *partner_dev;
71459+
71460 	if (port->data_role == role)
71461 		return;
71462 
71463 	port->data_role = role;
71464 	sysfs_notify(&port->dev.kobj, NULL, "data_role");
71465 	kobject_uevent(&port->dev.kobj, KOBJ_CHANGE);
71466+
71467+	partner_dev = device_find_child(&port->dev, NULL, partner_match);
71468+	if (!partner_dev)
71469+		return;
71470+
71471+	if (to_typec_partner(partner_dev)->identity)
71472+		typec_product_type_notify(partner_dev);
71473+
71474+	put_device(partner_dev);
71475 }
71476 EXPORT_SYMBOL_GPL(typec_set_data_role);
71477 
71478@@ -1409,11 +1746,6 @@ void typec_set_vconn_role(struct typec_port *port, enum typec_role role)
71479 }
71480 EXPORT_SYMBOL_GPL(typec_set_vconn_role);
71481 
71482-static int partner_match(struct device *dev, void *data)
71483-{
71484-	return is_typec_partner(dev);
71485-}
71486-
71487 /**
71488  * typec_set_pwr_opmode - Report changed power operation mode
71489  * @port: The USB Type-C Port where the mode was changed
71490@@ -1584,6 +1916,33 @@ EXPORT_SYMBOL_GPL(typec_set_mode);
71491 
71492 /* --------------------------------------- */
71493 
71494+/**
71495+ * typec_get_negotiated_svdm_version - Get negotiated SVDM Version
71496+ * @port: USB Type-C Port.
71497+ *
71498+ * Get the negotiated SVDM Version. The Version is set to the port default
71499+ * value stored in typec_capability on partner registration, and updated after
71500+ * a successful Discover Identity if the negotiated value is less than the
71501+ * default value.
71502+ *
71503+ * Returns usb_pd_svdm_ver if the partner has been registered otherwise -ENODEV.
71504+ */
71505+int typec_get_negotiated_svdm_version(struct typec_port *port)
71506+{
71507+	enum usb_pd_svdm_ver svdm_version;
71508+	struct device *partner_dev;
71509+
71510+	partner_dev = device_find_child(&port->dev, NULL, partner_match);
71511+	if (!partner_dev)
71512+		return -ENODEV;
71513+
71514+	svdm_version = to_typec_partner(partner_dev)->svdm_version;
71515+	put_device(partner_dev);
71516+
71517+	return svdm_version;
71518+}
71519+EXPORT_SYMBOL_GPL(typec_get_negotiated_svdm_version);
71520+
71521 /**
71522  * typec_get_drvdata - Return private driver data pointer
71523  * @port: USB Type-C port
71524@@ -1625,6 +1984,62 @@ typec_port_register_altmode(struct typec_port *port,
71525 }
71526 EXPORT_SYMBOL_GPL(typec_port_register_altmode);
71527 
71528+#ifdef CONFIG_NO_GKI
71529+void typec_port_register_altmodes(struct typec_port *port,
71530+	const struct typec_altmode_ops *ops, void *drvdata,
71531+	struct typec_altmode **altmodes, size_t n)
71532+{
71533+	struct fwnode_handle *altmodes_node, *child;
71534+	struct typec_altmode_desc desc;
71535+	struct typec_altmode *alt;
71536+	size_t index = 0;
71537+	u32 svid, vdo;
71538+	int ret;
71539+
71540+	altmodes_node = device_get_named_child_node(&port->dev, "altmodes");
71541+	if (!altmodes_node)
71542+		return; /* No altmodes specified */
71543+
71544+	fwnode_for_each_child_node(altmodes_node, child) {
71545+		ret = fwnode_property_read_u32(child, "svid", &svid);
71546+		if (ret) {
71547+			dev_err(&port->dev, "Error reading svid for altmode %s\n",
71548+				fwnode_get_name(child));
71549+			continue;
71550+		}
71551+
71552+		ret = fwnode_property_read_u32(child, "vdo", &vdo);
71553+		if (ret) {
71554+			dev_err(&port->dev, "Error reading vdo for altmode %s\n",
71555+				fwnode_get_name(child));
71556+			continue;
71557+		}
71558+
71559+		if (index >= n) {
71560+			dev_err(&port->dev, "Error not enough space for altmode %s\n",
71561+				fwnode_get_name(child));
71562+			continue;
71563+		}
71564+
71565+		desc.svid = svid;
71566+		desc.vdo = vdo;
71567+		desc.mode = index + 1;
71568+		alt = typec_port_register_altmode(port, &desc);
71569+		if (IS_ERR(alt)) {
71570+			dev_err(&port->dev, "Error registering altmode %s\n",
71571+				fwnode_get_name(child));
71572+			continue;
71573+		}
71574+
71575+		alt->ops = ops;
71576+		typec_altmode_set_drvdata(alt, drvdata);
71577+		altmodes[index] = alt;
71578+		index++;
71579+	}
71580+}
71581+EXPORT_SYMBOL_GPL(typec_port_register_altmodes);
71582+#endif /* CONFIG_NO_GKI */
71583+
71584 /**
71585  * typec_register_port - Register a USB Type-C Port
71586  * @parent: Parent device
71587diff --git a/drivers/usb/typec/ucsi/Kconfig b/drivers/usb/typec/ucsi/Kconfig
71588index 2192d7c4f..5e9b37b3f 100644
71589--- a/drivers/usb/typec/ucsi/Kconfig
71590+++ b/drivers/usb/typec/ucsi/Kconfig
71591@@ -3,6 +3,7 @@
71592 config TYPEC_UCSI
71593 	tristate "USB Type-C Connector System Software Interface driver"
71594 	depends on !CPU_BIG_ENDIAN
71595+	depends on USB_ROLE_SWITCH || !USB_ROLE_SWITCH
71596 	help
71597 	  USB Type-C Connector System Software Interface (UCSI) is a
71598 	  specification for an interface that allows the operating system to
71599diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
71600index 1cf924f3a..cd3676b96 100644
71601--- a/drivers/video/backlight/pwm_bl.c
71602+++ b/drivers/video/backlight/pwm_bl.c
71603@@ -603,6 +603,8 @@ static int pwm_backlight_probe(struct platform_device *pdev)
71604 		pb->scale = data->max_brightness;
71605 	}
71606 
71607+	pwm_adjust_config(pb->pwm);
71608+
71609 	pb->lth_brightness = data->lth_brightness * (div_u64(state.period,
71610 				pb->scale));
71611 
71612diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c
71613index 25801e8e3..8fbde92ae 100644
71614--- a/drivers/video/fbdev/kyro/fbdev.c
71615+++ b/drivers/video/fbdev/kyro/fbdev.c
71616@@ -372,11 +372,6 @@ static int kyro_dev_overlay_viewport_set(u32 x, u32 y, u32 ulWidth, u32 ulHeight
71617 		/* probably haven't called CreateOverlay yet */
71618 		return -EINVAL;
71619 
71620-	if (ulWidth == 0 || ulWidth == 0xffffffff ||
71621-	    ulHeight == 0 || ulHeight == 0xffffffff ||
71622-	    (x < 2 && ulWidth + 2 == 0))
71623-		return -EINVAL;
71624-
71625 	/* Stop Ramdac Output */
71626 	DisableRamdacOutput(deviceInfo.pSTGReg);
71627 
71628@@ -399,9 +394,6 @@ static int kyrofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
71629 {
71630 	struct kyrofb_info *par = info->par;
71631 
71632-	if (!var->pixclock)
71633-		return -EINVAL;
71634-
71635 	if (var->bits_per_pixel != 16 && var->bits_per_pixel != 32) {
71636 		printk(KERN_WARNING "kyrofb: depth not supported: %u\n", var->bits_per_pixel);
71637 		return -EINVAL;
71638diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h
71639index b0dcc0733..26b4246cb 100644
71640--- a/include/drm/bridge/analogix_dp.h
71641+++ b/include/drm/bridge/analogix_dp.h
71642@@ -8,6 +8,7 @@
71643 #define _ANALOGIX_DP_H_
71644 
71645 #include <drm/drm_crtc.h>
71646+#include <sound/hdmi-codec.h>
71647 
71648 struct analogix_dp_device;
71649 
71650@@ -15,11 +16,21 @@ enum analogix_dp_devtype {
71651 	EXYNOS_DP,
71652 	RK3288_DP,
71653 	RK3399_EDP,
71654+	RK3568_EDP,
71655+	RK3588_EDP,
71656 };
71657 
71658 static inline bool is_rockchip(enum analogix_dp_devtype type)
71659 {
71660-	return type == RK3288_DP || type == RK3399_EDP;
71661+	switch (type) {
71662+	case RK3288_DP:
71663+	case RK3399_EDP:
71664+	case RK3568_EDP:
71665+	case RK3588_EDP:
71666+		return true;
71667+	default:
71668+		return false;
71669+	}
71670 }
71671 
71672 struct analogix_dp_plat_data {
71673@@ -28,18 +39,26 @@ struct analogix_dp_plat_data {
71674 	struct drm_encoder *encoder;
71675 	struct drm_connector *connector;
71676 	bool skip_connector;
71677+	bool ssc;
71678+
71679+	bool split_mode;
71680+	struct analogix_dp_device *left;
71681+	struct analogix_dp_device *right;
71682 
71683 	int (*power_on_start)(struct analogix_dp_plat_data *);
71684 	int (*power_on_end)(struct analogix_dp_plat_data *);
71685 	int (*power_off)(struct analogix_dp_plat_data *);
71686 	int (*attach)(struct analogix_dp_plat_data *, struct drm_bridge *,
71687 		      struct drm_connector *);
71688+	void (*detach)(struct analogix_dp_plat_data *, struct drm_bridge *);
71689 	int (*get_modes)(struct analogix_dp_plat_data *,
71690 			 struct drm_connector *);
71691+	void (*convert_to_split_mode)(struct drm_display_mode *);
71692+	void (*convert_to_origin_mode)(struct drm_display_mode *);
71693 };
71694 
71695-int analogix_dp_resume(struct analogix_dp_device *dp);
71696-int analogix_dp_suspend(struct analogix_dp_device *dp);
71697+int analogix_dp_runtime_resume(struct analogix_dp_device *dp);
71698+int analogix_dp_runtime_suspend(struct analogix_dp_device *dp);
71699 
71700 struct analogix_dp_device *
71701 analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data);
71702@@ -50,4 +69,13 @@ void analogix_dp_remove(struct analogix_dp_device *dp);
71703 int analogix_dp_start_crc(struct drm_connector *connector);
71704 int analogix_dp_stop_crc(struct drm_connector *connector);
71705 
71706+int analogix_dp_audio_hw_params(struct analogix_dp_device *dp,
71707+				struct hdmi_codec_daifmt *daifmt,
71708+				struct hdmi_codec_params *params);
71709+void analogix_dp_audio_shutdown(struct analogix_dp_device *dp);
71710+int analogix_dp_audio_startup(struct analogix_dp_device *dp);
71711+int analogix_dp_audio_get_eld(struct analogix_dp_device *dp,
71712+			      u8 *buf, size_t len);
71713+int analogix_dp_loader_protect(struct analogix_dp_device *dp);
71714+
71715 #endif /* _ANALOGIX_DP_H_ */
71716diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
71717index ea34ca146..b1057b624 100644
71718--- a/include/drm/bridge/dw_hdmi.h
71719+++ b/include/drm/bridge/dw_hdmi.h
71720@@ -6,12 +6,15 @@
71721 #ifndef __DW_HDMI__
71722 #define __DW_HDMI__
71723 
71724+#include <drm/drm_property.h>
71725 #include <sound/hdmi-codec.h>
71726+#include <media/cec.h>
71727 
71728 struct drm_display_info;
71729 struct drm_display_mode;
71730 struct drm_encoder;
71731 struct dw_hdmi;
71732+struct dw_hdmi_qp;
71733 struct platform_device;
71734 
71735 /**
71736@@ -92,6 +95,13 @@ enum dw_hdmi_phy_type {
71737 	DW_HDMI_PHY_VENDOR_PHY = 0xfe,
71738 };
71739 
71740+struct dw_hdmi_audio_tmds_n {
71741+	unsigned long tmds;
71742+	unsigned int n_32k;
71743+	unsigned int n_44k1;
71744+	unsigned int n_48k;
71745+};
71746+
71747 struct dw_hdmi_mpll_config {
71748 	unsigned long mpixelclock;
71749 	struct {
71750@@ -112,6 +122,15 @@ struct dw_hdmi_phy_config {
71751 	u16 vlev_ctr;   /* voltage level control */
71752 };
71753 
71754+struct dw_hdmi_link_config {
71755+	bool dsc_mode;
71756+	bool frl_mode;
71757+	int frl_lanes;
71758+	int rate_per_lane;
71759+	int hcactive;
71760+	u8 pps_payload[128];
71761+};
71762+
71763 struct dw_hdmi_phy_ops {
71764 	int (*init)(struct dw_hdmi *hdmi, void *data,
71765 		    const struct drm_display_info *display,
71766@@ -123,12 +142,48 @@ struct dw_hdmi_phy_ops {
71767 	void (*setup_hpd)(struct dw_hdmi *hdmi, void *data);
71768 };
71769 
71770+struct dw_hdmi_qp_phy_ops {
71771+	int (*init)(struct dw_hdmi_qp *hdmi, void *data,
71772+		    struct drm_display_mode *mode);
71773+	void (*disable)(struct dw_hdmi_qp *hdmi, void *data);
71774+	enum drm_connector_status (*read_hpd)(struct dw_hdmi_qp *hdmi,
71775+					      void *data);
71776+	void (*update_hpd)(struct dw_hdmi_qp *hdmi, void *data,
71777+			   bool force, bool disabled, bool rxsense);
71778+	void (*setup_hpd)(struct dw_hdmi_qp *hdmi, void *data);
71779+	void (*set_mode)(struct dw_hdmi_qp *dw_hdmi, void *data,
71780+			 u32 mode_mask, bool enable);
71781+};
71782+
71783+struct dw_hdmi_property_ops {
71784+	void (*attach_properties)(struct drm_connector *connector,
71785+				  unsigned int color, int version,
71786+				  void *data);
71787+	void (*destroy_properties)(struct drm_connector *connector,
71788+				   void *data);
71789+	int (*set_property)(struct drm_connector *connector,
71790+			    struct drm_connector_state *state,
71791+			    struct drm_property *property,
71792+			    u64 val,
71793+			    void *data);
71794+	int (*get_property)(struct drm_connector *connector,
71795+			    const struct drm_connector_state *state,
71796+			    struct drm_property *property,
71797+			    u64 *val,
71798+			    void *data);
71799+};
71800+
71801 struct dw_hdmi_plat_data {
71802 	struct regmap *regm;
71803 
71804+	unsigned long input_bus_format;
71805 	unsigned long input_bus_encoding;
71806+	unsigned int max_tmdsclk;
71807 	bool use_drm_infoframe;
71808 	bool ycbcr_420_allowed;
71809+	bool unsupported_yuv_input;
71810+	bool unsupported_deep_color;
71811+	bool is_hdmi_qp;
71812 
71813 	/*
71814 	 * Private data passed to all the .mode_valid() and .configure_phy()
71815@@ -137,22 +192,46 @@ struct dw_hdmi_plat_data {
71816 	void *priv_data;
71817 
71818 	/* Platform-specific mode validation (optional). */
71819-	enum drm_mode_status (*mode_valid)(struct dw_hdmi *hdmi, void *data,
71820+	enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
71821+					   void *data,
71822 					   const struct drm_display_info *info,
71823 					   const struct drm_display_mode *mode);
71824 
71825 	/* Vendor PHY support */
71826 	const struct dw_hdmi_phy_ops *phy_ops;
71827+	const struct dw_hdmi_qp_phy_ops *qp_phy_ops;
71828 	const char *phy_name;
71829 	void *phy_data;
71830 	unsigned int phy_force_vendor;
71831+	const struct dw_hdmi_audio_tmds_n *tmds_n_table;
71832 
71833 	/* Synopsys PHY support */
71834 	const struct dw_hdmi_mpll_config *mpll_cfg;
71835+	const struct dw_hdmi_mpll_config *mpll_cfg_420;
71836 	const struct dw_hdmi_curr_ctrl *cur_ctr;
71837 	const struct dw_hdmi_phy_config *phy_config;
71838 	int (*configure_phy)(struct dw_hdmi *hdmi, void *data,
71839 			     unsigned long mpixelclock);
71840+
71841+	unsigned long (*get_input_bus_format)(void *data);
71842+	unsigned long (*get_output_bus_format)(void *data);
71843+	unsigned long (*get_enc_in_encoding)(void *data);
71844+	unsigned long (*get_enc_out_encoding)(void *data);
71845+	unsigned long (*get_quant_range)(void *data);
71846+	struct drm_property *(*get_hdr_property)(void *data);
71847+	struct drm_property_blob *(*get_hdr_blob)(void *data);
71848+	bool (*get_color_changed)(void *data);
71849+	int (*get_yuv422_format)(struct drm_connector *connector,
71850+				 struct edid *edid);
71851+	int (*get_edid_dsc_info)(void *data, struct edid *edid);
71852+	int (*get_next_hdr_data)(void *data, struct edid *edid,
71853+				 struct drm_connector *connector);
71854+	struct dw_hdmi_link_config *(*get_link_cfg)(void *data);
71855+	void (*set_grf_cfg)(void *data);
71856+
71857+	/* Vendor Property support */
71858+	const struct dw_hdmi_property_ops *property_ops;
71859+	struct drm_connector *connector;
71860 };
71861 
71862 struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
71863@@ -161,8 +240,9 @@ void dw_hdmi_remove(struct dw_hdmi *hdmi);
71864 void dw_hdmi_unbind(struct dw_hdmi *hdmi);
71865 struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev,
71866 			     struct drm_encoder *encoder,
71867-			     const struct dw_hdmi_plat_data *plat_data);
71868+			     struct dw_hdmi_plat_data *plat_data);
71869 
71870+void dw_hdmi_suspend(struct dw_hdmi *hdmi);
71871 void dw_hdmi_resume(struct dw_hdmi *hdmi);
71872 
71873 void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense);
71874@@ -192,5 +272,28 @@ enum drm_connector_status dw_hdmi_phy_read_hpd(struct dw_hdmi *hdmi,
71875 void dw_hdmi_phy_update_hpd(struct dw_hdmi *hdmi, void *data,
71876 			    bool force, bool disabled, bool rxsense);
71877 void dw_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data);
71878+void dw_hdmi_set_quant_range(struct dw_hdmi *hdmi);
71879+void dw_hdmi_set_output_type(struct dw_hdmi *hdmi, u64 val);
71880+bool dw_hdmi_get_output_whether_hdmi(struct dw_hdmi *hdmi);
71881+int dw_hdmi_get_output_type_cap(struct dw_hdmi *hdmi);
71882+void dw_hdmi_set_cec_adap(struct dw_hdmi *hdmi, struct cec_adapter *adap);
71883+
71884+void dw_hdmi_qp_unbind(struct dw_hdmi_qp *hdmi);
71885+struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
71886+				struct drm_encoder *encoder,
71887+				struct dw_hdmi_plat_data *plat_data);
71888+void dw_hdmi_qp_suspend(struct device *dev, struct dw_hdmi_qp *hdmi);
71889+void dw_hdmi_qp_resume(struct device *dev, struct dw_hdmi_qp *hdmi);
71890+void dw_hdmi_qp_cec_set_hpd(struct dw_hdmi_qp *hdmi, bool plug_in, bool change);
71891+void dw_hdmi_qp_set_cec_adap(struct dw_hdmi_qp *hdmi, struct cec_adapter *adap);
71892+int dw_hdmi_qp_set_earc(struct dw_hdmi_qp *hdmi);
71893+void dw_hdmi_qp_set_sample_rate(struct dw_hdmi_qp *hdmi, unsigned int rate);
71894+void dw_hdmi_qp_set_channel_count(struct dw_hdmi_qp *hdmi, unsigned int cnt);
71895+void dw_hdmi_qp_set_channel_status(struct dw_hdmi_qp *hdmi, u8 *channel_status);
71896+void dw_hdmi_qp_set_channel_allocation(struct dw_hdmi_qp *hdmi, unsigned int ca);
71897+void dw_hdmi_qp_audio_enable(struct dw_hdmi_qp *hdmi);
71898+void dw_hdmi_qp_audio_disable(struct dw_hdmi_qp *hdmi);
71899+int dw_hdmi_qp_set_plugged_cb(struct dw_hdmi_qp *hdmi, hdmi_codec_plugged_cb fn,
71900+			      struct device *codec_dev);
71901 
71902 #endif /* __IMX_HDMI_H__ */
71903diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h
71904index bda8aa7c2..f89b0476a 100644
71905--- a/include/drm/bridge/dw_mipi_dsi.h
71906+++ b/include/drm/bridge/dw_mipi_dsi.h
71907@@ -66,5 +66,6 @@ void dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi);
71908 int dw_mipi_dsi_bind(struct dw_mipi_dsi *dsi, struct drm_encoder *encoder);
71909 void dw_mipi_dsi_unbind(struct dw_mipi_dsi *dsi);
71910 void dw_mipi_dsi_set_slave(struct dw_mipi_dsi *dsi, struct dw_mipi_dsi *slave);
71911+struct drm_connector *dw_mipi_dsi_get_connector(struct dw_mipi_dsi *dsi);
71912 
71913 #endif /* __DW_MIPI_DSI__ */
71914diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
71915index 928136556..f8cd5d8e5 100644
71916--- a/include/drm/drm_connector.h
71917+++ b/include/drm/drm_connector.h
71918@@ -175,6 +175,48 @@ struct drm_scdc {
71919 	struct drm_scrambling scrambling;
71920 };
71921 
71922+#ifdef CONFIG_NO_GKI
71923+/**
71924+ * struct drm_hdmi_dsc_cap - DSC capabilities of HDMI sink
71925+ *
71926+ * Describes the DSC support provided by HDMI 2.1 sink.
71927+ * The information is fetched fom additional HFVSDB blocks defined
71928+ * for HDMI 2.1.
71929+ */
71930+struct drm_hdmi_dsc_cap {
71931+	/** @v_1p2: flag for dsc1.2 version support by sink */
71932+	bool v_1p2;
71933+
71934+	/** @native_420: Does sink support DSC with 4:2:0 compression */
71935+	bool native_420;
71936+
71937+	/**
71938+	 * @all_bpp: Does sink support all bpp with 4:4:4: or 4:2:2
71939+	 * compressed formats
71940+	 */
71941+	bool all_bpp;
71942+
71943+	/**
71944+	 * @bpc_supported: compressed bpc supported by sink : 10, 12 or 16 bpc
71945+	 */
71946+	u8 bpc_supported;
71947+
71948+	/** @max_slices: maximum number of Horizontal slices supported by */
71949+	u8 max_slices;
71950+
71951+	/** @clk_per_slice : max pixel clock in MHz supported per slice */
71952+	int clk_per_slice;
71953+
71954+	/** @max_lanes : dsc max lanes supported for Fixed rate Link training */
71955+	u8 max_lanes;
71956+
71957+	/** @max_frl_rate_per_lane : maximum frl rate with DSC per lane */
71958+	u8 max_frl_rate_per_lane;
71959+
71960+	/** @total_chunk_kbytes: max size of chunks in KBs supported per line*/
71961+	u8 total_chunk_kbytes;
71962+};
71963+#endif
71964 
71965 /**
71966  * struct drm_hdmi_info - runtime information about the connected HDMI sink
71967@@ -207,6 +249,17 @@ struct drm_hdmi_info {
71968 
71969 	/** @y420_dc_modes: bitmap of deep color support index */
71970 	u8 y420_dc_modes;
71971+
71972+#ifdef CONFIG_NO_GKI
71973+	/** @max_frl_rate_per_lane: support fixed rate link */
71974+	u8 max_frl_rate_per_lane;
71975+
71976+	/** @max_lanes: supported by sink */
71977+	u8 max_lanes;
71978+
71979+	/** @dsc_cap: DSC capabilities of the sink */
71980+	struct drm_hdmi_dsc_cap dsc_cap;
71981+#endif
71982 };
71983 
71984 /**
71985@@ -1596,6 +1649,7 @@ drm_connector_is_unregistered(struct drm_connector *connector)
71986 		DRM_CONNECTOR_UNREGISTERED;
71987 }
71988 
71989+void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode);
71990 const char *drm_get_connector_type_name(unsigned int connector_type);
71991 const char *drm_get_connector_status_name(enum drm_connector_status status);
71992 const char *drm_get_subpixel_order_name(enum subpixel_order order);
71993diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
71994index 59b51a09c..3a3d9d887 100644
71995--- a/include/drm/drm_crtc.h
71996+++ b/include/drm/drm_crtc.h
71997@@ -287,7 +287,16 @@ struct drm_crtc_state {
71998 	 * NULL) is an array of &struct drm_color_lut.
71999 	 */
72000 	struct drm_property_blob *gamma_lut;
72001-
72002+#if defined(CONFIG_ROCKCHIP_DRM_CUBIC_LUT)
72003+	/**
72004+	 * @cubic_lut:
72005+	 *
72006+	 * Cubic Lookup table for converting pixel data. See
72007+	 * drm_crtc_enable_color_mgmt(). The blob (if not NULL) is a 3D array
72008+	 * of &struct drm_color_lut.
72009+	 */
72010+	struct drm_property_blob *cubic_lut;
72011+#endif
72012 	/**
72013 	 * @target_vblank:
72014 	 *
72015diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
72016index e57d0440f..e395d1fc1 100644
72017--- a/include/drm/drm_drv.h
72018+++ b/include/drm/drm_drv.h
72019@@ -29,6 +29,7 @@
72020 
72021 #include <linux/list.h>
72022 #include <linux/irqreturn.h>
72023+#include <linux/uuid.h>
72024 
72025 #include <drm/drm_device.h>
72026 
72027@@ -460,6 +461,15 @@ struct drm_driver {
72028 	int (*gem_prime_mmap)(struct drm_gem_object *obj,
72029 				struct vm_area_struct *vma);
72030 
72031+	/**
72032+	 * @gem_prime_get_uuid
72033+	 *
72034+	 * get_uuid hook for GEM drivers. Retrieves the virtio uuid of the
72035+	 * given GEM buffer.
72036+	 */
72037+	int (*gem_prime_get_uuid)(struct drm_gem_object *obj,
72038+				  uuid_t *uuid);
72039+
72040 	/**
72041 	 * @dumb_create:
72042 	 *
72043diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
72044index 4526b6a1e..6f082761e 100644
72045--- a/include/drm/drm_edid.h
72046+++ b/include/drm/drm_edid.h
72047@@ -229,6 +229,38 @@ struct detailed_timing {
72048 				    DRM_EDID_YCBCR420_DC_36 | \
72049 				    DRM_EDID_YCBCR420_DC_30)
72050 
72051+#ifdef CONFIG_NO_GKI
72052+/* HDMI 2.1 additional fields */
72053+#define DRM_EDID_MAX_FRL_RATE_MASK		0xf0
72054+#define DRM_EDID_FAPA_START_LOCATION		(1 << 0)
72055+#define DRM_EDID_ALLM				(1 << 1)
72056+#define DRM_EDID_FVA				(1 << 2)
72057+
72058+/* Deep Color specific */
72059+#define DRM_EDID_DC_30BIT_420			(1 << 0)
72060+#define DRM_EDID_DC_36BIT_420			(1 << 1)
72061+#define DRM_EDID_DC_48BIT_420			(1 << 2)
72062+
72063+/* VRR specific */
72064+#define DRM_EDID_CNMVRR				(1 << 3)
72065+#define DRM_EDID_CINEMA_VRR			(1 << 4)
72066+#define DRM_EDID_MDELTA				(1 << 5)
72067+#define DRM_EDID_VRR_MAX_UPPER_MASK		0xc0
72068+#define DRM_EDID_VRR_MAX_LOWER_MASK		0xff
72069+#define DRM_EDID_VRR_MIN_MASK			0x3f
72070+
72071+/* DSC specific */
72072+#define DRM_EDID_DSC_10BPC			(1 << 0)
72073+#define DRM_EDID_DSC_12BPC			(1 << 1)
72074+#define DRM_EDID_DSC_16BPC			(1 << 2)
72075+#define DRM_EDID_DSC_ALL_BPP			(1 << 3)
72076+#define DRM_EDID_DSC_NATIVE_420			(1 << 6)
72077+#define DRM_EDID_DSC_1P2			(1 << 7)
72078+#define DRM_EDID_DSC_MAX_FRL_RATE_MASK		0xf0
72079+#define DRM_EDID_DSC_MAX_SLICES			0xf
72080+#define DRM_EDID_DSC_TOTAL_CHUNK_KBYTES		0x3f
72081+#endif
72082+
72083 /* ELD Header Block */
72084 #define DRM_ELD_HEADER_BLOCK_SIZE	4
72085 
72086diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
72087index 42d04607d..726cfe0ff 100644
72088--- a/include/drm/drm_file.h
72089+++ b/include/drm/drm_file.h
72090@@ -411,6 +411,9 @@ void drm_event_cancel_free(struct drm_device *dev,
72091 			   struct drm_pending_event *p);
72092 void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e);
72093 void drm_send_event(struct drm_device *dev, struct drm_pending_event *e);
72094+void drm_send_event_timestamp_locked(struct drm_device *dev,
72095+				     struct drm_pending_event *e,
72096+				     ktime_t timestamp);
72097 
72098 struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags);
72099 
72100diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
72101index 31ba85a41..05592c17d 100644
72102--- a/include/drm/drm_mipi_dsi.h
72103+++ b/include/drm/drm_mipi_dsi.h
72104@@ -19,12 +19,18 @@ struct drm_dsc_picture_parameter_set;
72105 #define MIPI_DSI_MSG_REQ_ACK	BIT(0)
72106 /* use Low Power Mode to transmit message */
72107 #define MIPI_DSI_MSG_USE_LPM	BIT(1)
72108+/* read mipi_dsi_msg.ctrl and unicast to only that ctrls */
72109+#define MIPI_DSI_MSG_UNICAST	BIT(2)
72110+/* Stack all commands until lastcommand bit and trigger all in one go */
72111+#define MIPI_DSI_MSG_LASTCOMMAND BIT(3)
72112 
72113 /**
72114  * struct mipi_dsi_msg - read/write DSI buffer
72115  * @channel: virtual channel id
72116  * @type: payload data type
72117  * @flags: flags controlling this message transmission
72118+ * @ctrl: ctrl index to transmit on
72119+ * @wait_ms: duration in ms to wait after message transmission
72120  * @tx_len: length of @tx_buf
72121  * @tx_buf: data to be written
72122  * @rx_len: length of @rx_buf
72123@@ -34,6 +40,8 @@ struct mipi_dsi_msg {
72124 	u8 channel;
72125 	u8 type;
72126 	u16 flags;
72127+	u32 ctrl;
72128+	u32 wait_ms;
72129 
72130 	size_t tx_len;
72131 	const void *tx_buf;
72132@@ -132,6 +140,10 @@ struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node);
72133 #define MIPI_DSI_CLOCK_NON_CONTINUOUS	BIT(10)
72134 /* transmit data in low power */
72135 #define MIPI_DSI_MODE_LPM		BIT(11)
72136+/* disable BLLP area */
72137+#define MIPI_DSI_MODE_VIDEO_BLLP	BIT(12)
72138+/* disable EOF BLLP area */
72139+#define MIPI_DSI_MODE_VIDEO_EOF_BLLP	BIT(13)
72140 
72141 enum mipi_dsi_pixel_format {
72142 	MIPI_DSI_FMT_RGB888,
72143diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
72144index a18f73eb3..76d114569 100644
72145--- a/include/drm/drm_mode_config.h
72146+++ b/include/drm/drm_mode_config.h
72147@@ -794,6 +794,19 @@ struct drm_mode_config {
72148 	 */
72149 	struct drm_property *gamma_lut_size_property;
72150 
72151+#if defined(CONFIG_ROCKCHIP_DRM_CUBIC_LUT)
72152+	/**
72153+	 * @cubic_lut_property: Optional CRTC property to set the 3D LUT used to
72154+	 * convert color spaces.
72155+	 */
72156+	struct drm_property *cubic_lut_property;
72157+	/**
72158+	 * @cubic_lut_size_property: Optional CRTC property for the size of the
72159+	 * 3D LUT as supported by the driver (read-only).
72160+	 */
72161+	struct drm_property *cubic_lut_size_property;
72162+#endif
72163+
72164 	/**
72165 	 * @suggested_x_property: Optional connector property with a hint for
72166 	 * the position of the output on the host's screen.
72167diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h
72168index c34a3e803..6292fa663 100644
72169--- a/include/drm/drm_mode_object.h
72170+++ b/include/drm/drm_mode_object.h
72171@@ -60,7 +60,7 @@ struct drm_mode_object {
72172 	void (*free_cb)(struct kref *kref);
72173 };
72174 
72175-#define DRM_OBJECT_MAX_PROPERTY 24
72176+#define DRM_OBJECT_MAX_PROPERTY 64
72177 /**
72178  * struct drm_object_properties - property tracking for &drm_mode_object
72179  */
72180diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
72181index 0f69f9fbf..a9d0c6b08 100644
72182--- a/include/drm/drm_prime.h
72183+++ b/include/drm/drm_prime.h
72184@@ -107,5 +107,6 @@ void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
72185 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
72186 				     dma_addr_t *addrs, int max_pages);
72187 
72188+int drm_gem_dmabuf_get_uuid(struct dma_buf *dma_buf, uuid_t *uuid);
72189 
72190 #endif /* __DRM_PRIME_H__ */
72191diff --git a/include/dt-bindings/soc/rockchip,boot-mode.h b/include/dt-bindings/soc/rockchip,boot-mode.h
72192index 4b0914c09..1436e1d32 100644
72193--- a/include/dt-bindings/soc/rockchip,boot-mode.h
72194+++ b/include/dt-bindings/soc/rockchip,boot-mode.h
72195@@ -10,7 +10,15 @@
72196 #define BOOT_BL_DOWNLOAD	(REBOOT_FLAG + 1)
72197 /* enter recovery */
72198 #define BOOT_RECOVERY		(REBOOT_FLAG + 3)
72199- /* enter fastboot mode */
72200+/* reboot by panic */
72201+#define BOOT_PANIC		(REBOOT_FLAG + 7)
72202+/* reboot by watchdog */
72203+#define BOOT_WATCHDOG		(REBOOT_FLAG + 8)
72204+/* enter fastboot mode */
72205 #define BOOT_FASTBOOT		(REBOOT_FLAG + 9)
72206+/* enter charging mode */
72207+#define BOOT_CHARGING		(REBOOT_FLAG + 11)
72208+/* enter usb mass storage mode */
72209+#define BOOT_UMS		(REBOOT_FLAG + 12)
72210 
72211 #endif
72212diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
72213index 03a5de5f9..e22088f27 100644
72214--- a/include/linux/clk-provider.h
72215+++ b/include/linux/clk-provider.h
72216@@ -32,6 +32,7 @@
72217 #define CLK_OPS_PARENT_ENABLE	BIT(12)
72218 /* duty cycle call may be forwarded to the parent clock */
72219 #define CLK_DUTY_CYCLE_PARENT	BIT(13)
72220+#define CLK_DONT_HOLD_STATE	BIT(14) /* Don't hold state */
72221 
72222 struct clk;
72223 struct clk_hw;
72224@@ -252,6 +253,12 @@ struct clk_ops {
72225 	int		(*init)(struct clk_hw *hw);
72226 	void		(*terminate)(struct clk_hw *hw);
72227 	void		(*debug_init)(struct clk_hw *hw, struct dentry *dentry);
72228+	int		(*pre_rate_change)(struct clk_hw *hw,
72229+					   unsigned long rate,
72230+					   unsigned long new_rate);
72231+	int		(*post_rate_change)(struct clk_hw *hw,
72232+					    unsigned long old_rate,
72233+					    unsigned long rate);
72234 };
72235 
72236 /**
72237@@ -599,6 +606,7 @@ struct clk_divider {
72238 	u8		shift;
72239 	u8		width;
72240 	u8		flags;
72241+	unsigned long	max_prate;
72242 	const struct clk_div_table	*table;
72243 	spinlock_t	*lock;
72244 };
72245@@ -947,6 +955,7 @@ struct clk_fractional_divider {
72246 	u8		nwidth;
72247 	u32		nmask;
72248 	u8		flags;
72249+	unsigned long	max_prate;
72250 	void		(*approximation)(struct clk_hw *hw,
72251 				unsigned long rate, unsigned long *parent_rate,
72252 				unsigned long *m, unsigned long *n);
72253@@ -957,6 +966,7 @@ struct clk_fractional_divider {
72254 
72255 #define CLK_FRAC_DIVIDER_ZERO_BASED		BIT(0)
72256 #define CLK_FRAC_DIVIDER_BIG_ENDIAN		BIT(1)
72257+#define CLK_FRAC_DIVIDER_NO_LIMIT		BIT(2)
72258 
72259 extern const struct clk_ops clk_fractional_divider_ops;
72260 struct clk *clk_register_fractional_divider(struct device *dev,
72261@@ -1028,6 +1038,7 @@ struct clk_composite {
72262 	struct clk_hw	*mux_hw;
72263 	struct clk_hw	*rate_hw;
72264 	struct clk_hw	*gate_hw;
72265+	struct clk_hw	*brother_hw;
72266 
72267 	const struct clk_ops	*mux_ops;
72268 	const struct clk_ops	*rate_ops;
72269@@ -1076,6 +1087,7 @@ void devm_clk_unregister(struct device *dev, struct clk *clk);
72270 
72271 void clk_hw_unregister(struct clk_hw *hw);
72272 void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw);
72273+void clk_sync_state(struct device *dev);
72274 
72275 /* helper functions */
72276 const char *__clk_get_name(const struct clk *clk);
72277@@ -1088,6 +1100,9 @@ static inline struct clk_hw *__clk_get_hw(struct clk *clk)
72278 	return (struct clk_hw *)clk;
72279 }
72280 #endif
72281+struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id);
72282+struct clk *devm_clk_hw_get_clk(struct device *dev, struct clk_hw *hw,
72283+				const char *con_id);
72284 unsigned int clk_hw_get_num_parents(const struct clk_hw *hw);
72285 struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw);
72286 struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw,
72287diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
72288index 0c5706abb..b17991437 100644
72289--- a/include/linux/dma-buf.h
72290+++ b/include/linux/dma-buf.h
72291@@ -209,6 +209,40 @@ struct dma_buf_ops {
72292 	 */
72293 	int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
72294 
72295+	/**
72296+	 * @begin_cpu_access_partial:
72297+	 *
72298+	 * This is called from dma_buf_begin_cpu_access_partial() and allows the
72299+	 * exporter to ensure that the memory specified in the range is
72300+	 * available for cpu access - the exporter might need to allocate or
72301+	 * swap-in and pin the backing storage.
72302+	 * The exporter also needs to ensure that cpu access is
72303+	 * coherent for the access direction. The direction can be used by the
72304+	 * exporter to optimize the cache flushing, i.e. access with a different
72305+	 * direction (read instead of write) might return stale or even bogus
72306+	 * data (e.g. when the exporter needs to copy the data to temporary
72307+	 * storage).
72308+	 *
72309+	 * This callback is optional.
72310+	 *
72311+	 * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command
72312+	 * from userspace (where storage shouldn't be pinned to avoid handing
72313+	 * de-factor mlock rights to userspace) and for the kernel-internal
72314+	 * users of the various kmap interfaces, where the backing storage must
72315+	 * be pinned to guarantee that the atomic kmap calls can succeed. Since
72316+	 * there's no in-kernel users of the kmap interfaces yet this isn't a
72317+	 * real problem.
72318+	 *
72319+	 * Returns:
72320+	 *
72321+	 * 0 on success or a negative error code on failure. This can for
72322+	 * example fail when the backing storage can't be allocated. Can also
72323+	 * return -ERESTARTSYS or -EINTR when the call has been interrupted and
72324+	 * needs to be restarted.
72325+	 */
72326+	int (*begin_cpu_access_partial)(struct dma_buf *dmabuf,
72327+					enum dma_data_direction,
72328+					unsigned int offset, unsigned int len);
72329 	/**
72330 	 * @end_cpu_access:
72331 	 *
72332@@ -227,6 +261,9 @@ struct dma_buf_ops {
72333 	 * to be restarted.
72334 	 */
72335 	int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
72336+	int (*end_cpu_access_partial)(struct dma_buf *dmabuf,
72337+				      enum dma_data_direction,
72338+				      unsigned int offset, unsigned int len);
72339 
72340 	/**
72341 	 * @mmap:
72342@@ -267,7 +304,12 @@ struct dma_buf_ops {
72343 
72344 	void *(*vmap)(struct dma_buf *);
72345 	void (*vunmap)(struct dma_buf *, void *vaddr);
72346+	int (*get_uuid)(struct dma_buf *dmabuf, uuid_t *uuid);
72347+	int (*get_flags)(struct dma_buf *dmabuf, unsigned long *flags);
72348 };
72349+#ifdef CONFIG_NO_GKI
72350+typedef int (*dma_buf_destructor)(struct dma_buf *dmabuf, void *dtor_data);
72351+#endif
72352 
72353 /**
72354  * struct dma_buf - shared buffer object
72355@@ -343,6 +385,10 @@ struct dma_buf {
72356 		struct dma_buf *dmabuf;
72357 	} *sysfs_entry;
72358 #endif
72359+#ifdef CONFIG_NO_GKI
72360+	dma_buf_destructor dtor;
72361+	void *dtor_data;
72362+#endif
72363 };
72364 
72365 /**
72366@@ -412,6 +458,7 @@ struct dma_buf_attachment {
72367 	const struct dma_buf_attach_ops *importer_ops;
72368 	void *importer_priv;
72369 	void *priv;
72370+	unsigned long dma_map_attrs;
72371 };
72372 
72373 /**
72374@@ -489,6 +536,9 @@ dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
72375 	return !!attach->importer_ops;
72376 }
72377 
72378+int get_each_dmabuf(int (*callback)(const struct dma_buf *dmabuf,
72379+		    void *private), void *private);
72380+int is_dma_buf_file(struct file *file);
72381 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
72382 					  struct device *dev);
72383 struct dma_buf_attachment *
72384@@ -513,13 +563,22 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
72385 void dma_buf_move_notify(struct dma_buf *dma_buf);
72386 int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
72387 			     enum dma_data_direction dir);
72388+int dma_buf_begin_cpu_access_partial(struct dma_buf *dma_buf,
72389+				     enum dma_data_direction dir,
72390+				     unsigned int offset, unsigned int len);
72391 int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
72392 			   enum dma_data_direction dir);
72393+int dma_buf_end_cpu_access_partial(struct dma_buf *dma_buf,
72394+				     enum dma_data_direction dir,
72395+				     unsigned int offset, unsigned int len);
72396 
72397 int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
72398 		 unsigned long);
72399 void *dma_buf_vmap(struct dma_buf *);
72400 void dma_buf_vunmap(struct dma_buf *, void *vaddr);
72401+long dma_buf_set_name(struct dma_buf *dmabuf, const char *name);
72402+int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags);
72403+int dma_buf_get_uuid(struct dma_buf *dmabuf, uuid_t *uuid);
72404 
72405 #ifdef CONFIG_DMABUF_PROCESS_INFO
72406 /**
72407@@ -532,4 +591,20 @@ void dma_buf_vunmap(struct dma_buf *, void *vaddr);
72408  */
72409 struct dma_buf *get_dma_buf_from_file(struct file *f);
72410 #endif /* CONFIG_DMABUF_PROCESS_INFO */
72411+
72412+#ifdef CONFIG_NO_GKI
72413+/**
72414+ * dma_buf_set_destructor - set the dma-buf's destructor
72415+ * @dmabuf:		[in]	pointer to dma-buf
72416+ * @dma_buf_destructor	[in]	the destructor function
72417+ * @dtor_data:		[in]	destructor data associated with this buffer
72418+ */
72419+static inline void dma_buf_set_destructor(struct dma_buf *dmabuf,
72420+					  dma_buf_destructor dtor,
72421+					  void *dtor_data)
72422+{
72423+	dmabuf->dtor = dtor;
72424+	dmabuf->dtor_data = dtor_data;
72425+}
72426+#endif
72427 #endif /* __DMA_BUF_H__ */
72428diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
72429index 09e23adb3..9f12efaaa 100644
72430--- a/include/linux/dma-fence.h
72431+++ b/include/linux/dma-fence.h
72432@@ -372,6 +372,9 @@ static inline void __dma_fence_might_wait(void) {}
72433 
72434 int dma_fence_signal(struct dma_fence *fence);
72435 int dma_fence_signal_locked(struct dma_fence *fence);
72436+int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp);
72437+int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
72438+				      ktime_t timestamp);
72439 signed long dma_fence_default_wait(struct dma_fence *fence,
72440 				   bool intr, signed long timeout);
72441 int dma_fence_add_callback(struct dma_fence *fence,
72442diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h
72443index 83b8cfb2d..b5c6f60ef 100644
72444--- a/include/linux/dma-heap.h
72445+++ b/include/linux/dma-heap.h
72446@@ -16,15 +16,17 @@ struct dma_heap;
72447 
72448 /**
72449  * struct dma_heap_ops - ops to operate on a given heap
72450- * @allocate:		allocate dmabuf and return fd
72451+ * @allocate:		allocate dmabuf and return struct dma_buf ptr
72452+ * @get_pool_size:	if heap maintains memory pools, get pool size in bytes
72453  *
72454- * allocate returns dmabuf fd  on success, -errno on error.
72455+ * allocate returns dmabuf on success, ERR_PTR(-errno) on error.
72456  */
72457 struct dma_heap_ops {
72458-	int (*allocate)(struct dma_heap *heap,
72459+	struct dma_buf *(*allocate)(struct dma_heap *heap,
72460 			unsigned long len,
72461 			unsigned long fd_flags,
72462 			unsigned long heap_flags);
72463+	long (*get_pool_size)(struct dma_heap *heap);
72464 };
72465 
72466 /**
72467@@ -59,10 +61,73 @@ void *dma_heap_get_drvdata(struct dma_heap *heap);
72468  */
72469 const char *dma_heap_get_name(struct dma_heap *heap);
72470 
72471+/**
72472+ * dma_heap_get_dev() - get device struct for the heap
72473+ * @heap: DMA-Heap to retrieve device struct from
72474+ *
72475+ * Returns:
72476+ * The device struct for the heap.
72477+ */
72478+struct device *dma_heap_get_dev(struct dma_heap *heap);
72479+
72480+/**
72481+ * dma_heap_get_name() - get heap name
72482+ * @heap: DMA-Heap to retrieve private data for
72483+ *
72484+ * Returns:
72485+ * The char* for the heap name.
72486+ */
72487+const char *dma_heap_get_name(struct dma_heap *heap);
72488+
72489 /**
72490  * dma_heap_add - adds a heap to dmabuf heaps
72491  * @exp_info:		information needed to register this heap
72492  */
72493 struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info);
72494 
72495+/**
72496+ * dma_heap_put - drops a reference to a dmabuf heaps, potentially freeing it
72497+ * @heap:		heap pointer
72498+ */
72499+void dma_heap_put(struct dma_heap *heap);
72500+
72501+/**
72502+ * dma_heap_find - Returns the registered dma_heap with the specified name
72503+ * @name: Name of the heap to find
72504+ *
72505+ * NOTE: dma_heaps returned from this function MUST be released
72506+ * using dma_heap_put() when the user is done.
72507+ */
72508+struct dma_heap *dma_heap_find(const char *name);
72509+
72510+/**
72511+ * dma_heap_buffer_alloc - Allocate dma-buf from a dma_heap
72512+ * @heap:	dma_heap to allocate from
72513+ * @len:	size to allocate
72514+ * @fd_flags:	flags to set on returned dma-buf fd
72515+ * @heap_flags:	flags to pass to the dma heap
72516+ *
72517+ * This is for internal dma-buf allocations only.
72518+ */
72519+struct dma_buf *dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
72520+				      unsigned int fd_flags,
72521+				      unsigned int heap_flags);
72522+
72523+/** dma_heap_buffer_free - Free dma_buf allocated by dma_heap_buffer_alloc
72524+ * @dma_buf:	dma_buf to free
72525+ *
72526+ * This is really only a simple wrapper to dma_buf_put()
72527+ */
72528+void dma_heap_buffer_free(struct dma_buf *);
72529+
72530+/**
72531+ * dma_heap_bufferfd_alloc - Allocate dma-buf fd from a dma_heap
72532+ * @heap:	dma_heap to allocate from
72533+ * @len:	size to allocate
72534+ * @fd_flags:	flags to set on returned dma-buf fd
72535+ * @heap_flags:	flags to pass to the dma heap
72536+ */
72537+int dma_heap_bufferfd_alloc(struct dma_heap *heap, size_t len,
72538+			    unsigned int fd_flags,
72539+			    unsigned int heap_flags);
72540 #endif /* _DMA_HEAPS_H */
72541diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
72542index 2112f21f7..f51561eda 100644
72543--- a/include/linux/dma-iommu.h
72544+++ b/include/linux/dma-iommu.h
72545@@ -37,6 +37,11 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc,
72546 
72547 void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
72548 
72549+int iommu_dma_reserve_iova(struct device *dev, dma_addr_t base,
72550+			   u64 size);
72551+
72552+int iommu_dma_enable_best_fit_algo(struct device *dev);
72553+
72554 #else /* CONFIG_IOMMU_DMA */
72555 
72556 struct iommu_domain;
72557@@ -78,5 +83,16 @@ static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_he
72558 {
72559 }
72560 
72561+static inline int iommu_dma_reserve_iova(struct device *dev, dma_addr_t base,
72562+					 u64 size)
72563+{
72564+	return -ENODEV;
72565+}
72566+
72567+static inline int iommu_dma_enable_best_fit_algo(struct device *dev)
72568+{
72569+	return -ENODEV;
72570+}
72571+
72572 #endif	/* CONFIG_IOMMU_DMA */
72573 #endif	/* __DMA_IOMMU_H */
72574diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
72575index a7d70cdee..7270f12e2 100644
72576--- a/include/linux/dma-mapping.h
72577+++ b/include/linux/dma-mapping.h
72578@@ -61,6 +61,23 @@
72579  */
72580 #define DMA_ATTR_PRIVILEGED		(1UL << 9)
72581 
72582+/*
72583+ * DMA_ATTR_SYS_CACHE_ONLY: used to indicate that the buffer should be mapped
72584+ * with the correct memory attributes so that it can be cached in the system
72585+ * or last level cache. This is useful for buffers that are being mapped for
72586+ * devices that are non-coherent, but can use the system cache.
72587+ */
72588+#define DMA_ATTR_SYS_CACHE_ONLY		(1UL << 10)
72589+
72590+/*
72591+ * DMA_ATTR_SYS_CACHE_ONLY_NWA: used to indicate that the buffer should be
72592+ * mapped with the correct memory attributes so that it can be cached in the
72593+ * system or last level cache, with a no write allocate cache policy. This is
72594+ * useful for buffers that are being mapped for devices that are non-coherent,
72595+ * but can use the system cache.
72596+ */
72597+#define DMA_ATTR_SYS_CACHE_ONLY_NWA	(1UL << 11)
72598+
72599 /*
72600  * A dma_addr_t can hold any valid DMA or bus address for the platform.  It can
72601  * be given to a device to use as a DMA source or target.  It is specific to a
72602diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
72603index 5f04a2b35..9ca1ea02f 100644
72604--- a/include/linux/energy_model.h
72605+++ b/include/linux/energy_model.h
72606@@ -29,6 +29,8 @@ struct em_perf_state {
72607  * em_perf_domain - Performance domain
72608  * @table:		List of performance states, in ascending order
72609  * @nr_perf_states:	Number of performance states
72610+ * @milliwatts:		Flag indicating the power values are in milli-Watts
72611+ *			or some other scale.
72612  * @cpus:		Cpumask covering the CPUs of the domain. It's here
72613  *			for performance reasons to avoid potential cache
72614  *			misses during energy calculations in the scheduler
72615@@ -43,6 +45,7 @@ struct em_perf_state {
72616 struct em_perf_domain {
72617 	struct em_perf_state *table;
72618 	int nr_perf_states;
72619+	int milliwatts;
72620 	unsigned long cpus[];
72621 };
72622 
72623@@ -95,7 +98,8 @@ struct em_data_callback {
72624 struct em_perf_domain *em_cpu_get(int cpu);
72625 struct em_perf_domain *em_pd_get(struct device *dev);
72626 int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
72627-				struct em_data_callback *cb, cpumask_t *span);
72628+				struct em_data_callback *cb, cpumask_t *span,
72629+				bool milliwatts);
72630 void em_dev_unregister_perf_domain(struct device *dev);
72631 
72632 /**
72633@@ -119,6 +123,9 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
72634 	struct em_perf_state *ps;
72635 	int i, cpu;
72636 
72637+	if (!sum_util)
72638+		return 0;
72639+
72640 	/*
72641 	 * In order to predict the performance state, map the utilization of
72642 	 * the most utilized CPU of the performance domain to a requested
72643@@ -202,7 +209,8 @@ struct em_data_callback {};
72644 
72645 static inline
72646 int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
72647-				struct em_data_callback *cb, cpumask_t *span)
72648+				struct em_data_callback *cb, cpumask_t *span,
72649+				bool milliwatts)
72650 {
72651 	return -EINVAL;
72652 }
72653diff --git a/include/linux/extcon.h b/include/linux/extcon.h
72654index fa1e431ee..6f851be8c 100644
72655--- a/include/linux/extcon.h
72656+++ b/include/linux/extcon.h
72657@@ -37,6 +37,7 @@
72658 /* USB external connector */
72659 #define EXTCON_USB		1
72660 #define EXTCON_USB_HOST		2
72661+#define EXTCON_USB_VBUS_EN	3
72662 
72663 /*
72664  * Charging external connector
72665diff --git a/include/linux/freezer.h b/include/linux/freezer.h
72666index 27828145c..cd57fd734 100644
72667--- a/include/linux/freezer.h
72668+++ b/include/linux/freezer.h
72669@@ -27,6 +27,10 @@ static inline bool frozen(struct task_struct *p)
72670 	return p->flags & PF_FROZEN;
72671 }
72672 
72673+static inline bool frozen_or_skipped(struct task_struct *p)
72674+{
72675+	return p->flags & (PF_FROZEN | PF_FREEZER_SKIP);
72676+}
72677 extern bool freezing_slow_path(struct task_struct *p);
72678 
72679 /*
72680@@ -270,6 +274,7 @@ static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
72681 
72682 #else /* !CONFIG_FREEZER */
72683 static inline bool frozen(struct task_struct *p) { return false; }
72684+static inline bool frozen_or_skipped(struct task_struct *p) { return false; }
72685 static inline bool freezing(struct task_struct *p) { return false; }
72686 static inline void __thaw_task(struct task_struct *t) {}
72687 
72688diff --git a/include/linux/iommu.h b/include/linux/iommu.h
72689index e90c267e7..2476f1a97 100644
72690--- a/include/linux/iommu.h
72691+++ b/include/linux/iommu.h
72692@@ -31,6 +31,18 @@
72693  * if the IOMMU page table format is equivalent.
72694  */
72695 #define IOMMU_PRIV	(1 << 5)
72696+/*
72697+ * Allow caching in a transparent outer level of cache, also known as
72698+ * the last-level or system cache, with a read/write allocation policy.
72699+ * Does not depend on IOMMU_CACHE. Incompatible with IOMMU_SYS_CACHE_NWA.
72700+ */
72701+#define IOMMU_SYS_CACHE	(1 << 6)
72702+/*
72703+ * Allow caching in a transparent outer level of cache, also known as
72704+ * the last-level or system cache, with a read allocation policy.
72705+ * Does not depend on IOMMU_CACHE. Incompatible with IOMMU_SYS_CACHE.
72706+ */
72707+#define IOMMU_SYS_CACHE_NWA (1 << 7)
72708 
72709 struct iommu_ops;
72710 struct iommu_group;
72711@@ -190,7 +202,12 @@ struct iommu_iotlb_gather {
72712  * @attach_dev: attach device to an iommu domain
72713  * @detach_dev: detach device from an iommu domain
72714  * @map: map a physically contiguous memory region to an iommu domain
72715+ * @map_pages: map a physically contiguous set of pages of the same size to
72716+ *             an iommu domain.
72717+ * @map_sg: map a scatter-gather list of physically contiguous chunks to
72718+ *          an iommu domain.
72719  * @unmap: unmap a physically contiguous memory region from an iommu domain
72720+ * @unmap_pages: unmap a number of pages of the same size from an iommu domain
72721  * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
72722  * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
72723  * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
72724@@ -241,10 +258,20 @@ struct iommu_ops {
72725 	void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
72726 	int (*map)(struct iommu_domain *domain, unsigned long iova,
72727 		   phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
72728+	int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
72729+			 phys_addr_t paddr, size_t pgsize, size_t pgcount,
72730+			 int prot, gfp_t gfp, size_t *mapped);
72731+	int (*map_sg)(struct iommu_domain *domain, unsigned long iova,
72732+		      struct scatterlist *sg, unsigned int nents, int prot,
72733+		      gfp_t gfp, size_t *mapped);
72734 	size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
72735 		     size_t size, struct iommu_iotlb_gather *iotlb_gather);
72736+	size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
72737+			      size_t pgsize, size_t pgcount,
72738+			      struct iommu_iotlb_gather *iotlb_gather);
72739 	void (*flush_iotlb_all)(struct iommu_domain *domain);
72740-	void (*iotlb_sync_map)(struct iommu_domain *domain);
72741+	void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
72742+			       size_t size);
72743 	void (*iotlb_sync)(struct iommu_domain *domain,
72744 			   struct iommu_iotlb_gather *iotlb_gather);
72745 	phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
72746@@ -561,6 +588,8 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
72747 extern struct iommu_group *pci_device_group(struct device *dev);
72748 /* Generic device grouping function */
72749 extern struct iommu_group *generic_device_group(struct device *dev);
72750+extern void rk_iommu_mask_irq(struct device *dev);
72751+extern void rk_iommu_unmask_irq(struct device *dev);
72752 /* FSL-MC device grouping function */
72753 struct iommu_group *fsl_mc_device_group(struct device *dev);
72754 
72755@@ -1069,6 +1098,14 @@ static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
72756 {
72757 	return NULL;
72758 }
72759+
72760+static inline void rk_iommu_mask_irq(struct device *dev)
72761+{
72762+}
72763+
72764+static inline void rk_iommu_unmask_irq(struct device *dev)
72765+{
72766+}
72767 #endif /* CONFIG_IOMMU_API */
72768 
72769 /**
72770diff --git a/include/linux/iova.h b/include/linux/iova.h
72771index 6c19b09e9..0394d44f7 100644
72772--- a/include/linux/iova.h
72773+++ b/include/linux/iova.h
72774@@ -95,6 +95,7 @@ struct iova_domain {
72775 						   flush-queues */
72776 	atomic_t fq_timer_on;			/* 1 when timer is active, 0
72777 						   when not */
72778+	bool best_fit;
72779 };
72780 
72781 static inline unsigned long iova_size(struct iova *iova)
72782@@ -163,6 +164,7 @@ void put_iova_domain(struct iova_domain *iovad);
72783 struct iova *split_and_remove_iova(struct iova_domain *iovad,
72784 	struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
72785 void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
72786+void free_global_cached_iovas(struct iova_domain *iovad);
72787 #else
72788 static inline int iova_cache_get(void)
72789 {
72790@@ -270,6 +272,11 @@ static inline void free_cpu_cached_iovas(unsigned int cpu,
72791 					 struct iova_domain *iovad)
72792 {
72793 }
72794+
72795+static inline void free_global_cached_iovas(struct iova_domain *iovad)
72796+{
72797+}
72798+
72799 #endif
72800 
72801 #endif
72802diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
72803index f6d092fdb..43348c487 100644
72804--- a/include/linux/irqchip/arm-gic-v3.h
72805+++ b/include/linux/irqchip/arm-gic-v3.h
72806@@ -693,6 +693,20 @@ int its_init(struct fwnode_handle *handle, struct rdists *rdists,
72807 	     struct irq_domain *domain);
72808 int mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent);
72809 
72810+struct gic_chip_data {
72811+	struct fwnode_handle	*fwnode;
72812+	void __iomem		*dist_base;
72813+	struct redist_region	*redist_regions;
72814+	struct rdists		rdists;
72815+	struct irq_domain	*domain;
72816+	u64			redist_stride;
72817+	u32			nr_redist_regions;
72818+	u64			flags;
72819+	bool			has_rss;
72820+	unsigned int		ppi_nr;
72821+	struct partition_desc	**ppi_descs;
72822+};
72823+
72824 static inline bool gic_enable_sre(void)
72825 {
72826 	u32 val;
72827@@ -708,6 +722,8 @@ static inline bool gic_enable_sre(void)
72828 	return !!(val & ICC_SRE_EL1_SRE);
72829 }
72830 
72831+void gic_resume(void);
72832+
72833 #endif
72834 
72835 #endif
72836diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h
72837index 6976b8331..943c3411c 100644
72838--- a/include/linux/irqchip/arm-gic-v4.h
72839+++ b/include/linux/irqchip/arm-gic-v4.h
72840@@ -39,6 +39,8 @@ struct its_vpe {
72841 	irq_hw_number_t		vpe_db_lpi;
72842 	/* VPE resident */
72843 	bool			resident;
72844+	/* VPT parse complete */
72845+	bool			ready;
72846 	union {
72847 		/* GICv4.0 implementations */
72848 		struct {
72849@@ -104,6 +106,7 @@ enum its_vcpu_info_cmd_type {
72850 	PROP_UPDATE_AND_INV_VLPI,
72851 	SCHEDULE_VPE,
72852 	DESCHEDULE_VPE,
72853+	COMMIT_VPE,
72854 	INVALL_VPE,
72855 	PROP_UPDATE_VSGI,
72856 };
72857@@ -129,6 +132,7 @@ int its_alloc_vcpu_irqs(struct its_vm *vm);
72858 void its_free_vcpu_irqs(struct its_vm *vm);
72859 int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en);
72860 int its_make_vpe_non_resident(struct its_vpe *vpe, bool db);
72861+int its_commit_vpe(struct its_vpe *vpe);
72862 int its_invall_vpe(struct its_vpe *vpe);
72863 int its_map_vlpi(int irq, struct its_vlpi_map *map);
72864 int its_get_vlpi(int irq, struct its_vlpi_map *map);
72865diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h
72866index e07f6e61c..7cdc5dfa4 100644
72867--- a/include/linux/mfd/rk808.h
72868+++ b/include/linux/mfd/rk808.h
72869@@ -113,6 +113,235 @@ enum rk808_reg {
72870 #define RK808_INT_STS_MSK_REG2	0x4f
72871 #define RK808_IO_POL_REG	0x50
72872 
72873+/* RK816 */
72874+enum rk816_reg {
72875+	RK816_ID_DCDC1,
72876+	RK816_ID_DCDC2,
72877+	RK816_ID_DCDC3,
72878+	RK816_ID_DCDC4,
72879+	RK816_ID_LDO1,
72880+	RK816_ID_LDO2,
72881+	RK816_ID_LDO3,
72882+	RK816_ID_LDO4,
72883+	RK816_ID_LDO5,
72884+	RK816_ID_LDO6,
72885+};
72886+
72887+/*VERSION REGISTER*/
72888+#define RK816_CHIP_NAME_REG			0x17
72889+#define RK816_CHIP_VER_REG			0x18
72890+#define RK816_OTP_VER_REG			0x19
72891+#define RK816_NUM_REGULATORS			10
72892+
72893+/*POWER ON/OFF REGISTER*/
72894+#define RK816_VB_MON_REG			0x21
72895+#define RK816_THERMAL_REG			0x22
72896+#define RK816_PWRON_LP_INT_TIME_REG		0x47
72897+#define RK816_PWRON_DB_REG			0x48
72898+#define RK816_DEV_CTRL_REG			0x4B
72899+#define RK816_ON_SOURCE_REG			0xAE
72900+#define RK816_OFF_SOURCE_REG			0xAF
72901+
72902+/*POWER CHANNELS ENABLE REGISTER*/
72903+#define RK816_DCDC_EN_REG1			0x23
72904+#define RK816_DCDC_EN_REG2			0x24
72905+#define RK816_SLP_DCDC_EN_REG			0x25
72906+#define RK816_SLP_LDO_EN_REG			0x26
72907+#define RK816_LDO_EN_REG1			0x27
72908+#define RK816_LDO_EN_REG2			0x28
72909+
72910+/*BUCK AND LDO CONFIG REGISTER*/
72911+#define RK816_BUCK1_CONFIG_REG			0x2E
72912+#define RK816_BUCK1_ON_VSEL_REG			0x2F
72913+#define RK816_BUCK1_SLP_VSEL_REG		0x30
72914+#define RK816_BUCK2_CONFIG_REG			0x32
72915+#define RK816_BUCK2_ON_VSEL_REG			0x33
72916+#define RK816_BUCK2_SLP_VSEL_REG		0x34
72917+#define RK816_BUCK3_CONFIG_REG			0x36
72918+#define RK816_BUCK4_CONFIG_REG			0x37
72919+#define RK816_BUCK4_ON_VSEL_REG			0x38
72920+#define RK816_BUCK4_SLP_VSEL_REG		0x39
72921+#define RK816_LDO1_ON_VSEL_REG			0x3B
72922+#define RK816_LDO1_SLP_VSEL_REG			0x3C
72923+#define RK816_LDO2_ON_VSEL_REG			0x3D
72924+#define RK816_LDO2_SLP_VSEL_REG			0x3E
72925+#define RK816_LDO3_ON_VSEL_REG			0x3F
72926+#define RK816_LDO3_SLP_VSEL_REG			0x40
72927+#define RK816_LDO4_ON_VSEL_REG			0x41
72928+#define RK816_LDO4_SLP_VSEL_REG			0x42
72929+#define RK816_LDO5_ON_VSEL_REG			0x43
72930+#define RK816_LDO5_SLP_VSEL_REG			0x44
72931+#define RK816_LDO6_ON_VSEL_REG			0x45
72932+#define RK816_LDO6_SLP_VSEL_REG			0x46
72933+#define RK816_GPIO_IO_POL_REG			0x50
72934+
72935+/*CHARGER BOOST AND OTG REGISTER*/
72936+#define RK816_OTG_BUCK_LDO_CONFIG_REG           0x2A
72937+#define RK816_CHRG_CONFIG_REG                   0x2B
72938+#define RK816_BOOST_ON_VESL_REG                 0x54
72939+#define RK816_BOOST_SLP_VSEL_REG                0x55
72940+#define RK816_CHRG_BOOST_CONFIG_REG             0x9A
72941+#define RK816_SUP_STS_REG                       0xA0
72942+#define RK816_USB_CTRL_REG                      0xA1
72943+#define RK816_CHRG_CTRL_REG1                    0xA3
72944+#define RK816_CHRG_CTRL_REG2                    0xA4
72945+#define RK816_CHRG_CTRL_REG3                    0xA5
72946+#define RK816_BAT_CTRL_REG                      0xA6
72947+#define RK816_BAT_HTS_TS_REG                    0xA8
72948+#define RK816_BAT_LTS_TS_REG                    0xA9
72949+
72950+#define RK816_TS_CTRL_REG			0xAC
72951+#define RK816_ADC_CTRL_REG			0xAD
72952+#define RK816_GGCON_REG				0xB0
72953+#define RK816_GGSTS_REG				0xB1
72954+#define RK816_ZERO_CUR_ADC_REGH			0xB2
72955+#define RK816_ZERO_CUR_ADC_REGL			0xB3
72956+#define RK816_GASCNT_CAL_REG3			0xB4
72957+#define RK816_GASCNT_CAL_REG2			0xB5
72958+#define RK816_GASCNT_CAL_REG1			0xB6
72959+#define RK816_GASCNT_CAL_REG0			0xB7
72960+#define RK816_GASCNT_REG3			0xB8
72961+#define RK816_GASCNT_REG2			0xB9
72962+#define RK816_GASCNT_REG1			0xBA
72963+#define RK816_GASCNT_REG0			0xBB
72964+#define RK816_BAT_CUR_AVG_REGH			0xBC
72965+#define RK816_BAT_CUR_AVG_REGL			0xBD
72966+#define RK816_TS_ADC_REGH			0xBE
72967+#define RK816_TS_ADC_REGL			0xBF
72968+#define RK816_USB_ADC_REGH			0xC0
72969+#define RK816_USB_ADC_REGL			0xC1
72970+#define RK816_BAT_OCV_REGH			0xC2
72971+#define RK816_BAT_OCV_REGL			0xC3
72972+#define RK816_BAT_VOL_REGH			0xC4
72973+#define RK816_BAT_VOL_REGL			0xC5
72974+#define RK816_RELAX_ENTRY_THRES_REGH		0xC6
72975+#define RK816_RELAX_ENTRY_THRES_REGL		0xC7
72976+#define RK816_RELAX_EXIT_THRES_REGH		0xC8
72977+#define RK816_RELAX_EXIT_THRES_REGL		0xC9
72978+#define RK816_RELAX_VOL1_REGH			0xCA
72979+#define RK816_RELAX_VOL1_REGL			0xCB
72980+#define RK816_RELAX_VOL2_REGH			0xCC
72981+#define RK816_RELAX_VOL2_REGL			0xCD
72982+#define RK816_RELAX_CUR1_REGH			0xCE
72983+#define RK816_RELAX_CUR1_REGL			0xCF
72984+#define RK816_RELAX_CUR2_REGH			0xD0
72985+#define RK816_RELAX_CUR2_REGL			0xD1
72986+#define RK816_CAL_OFFSET_REGH			0xD2
72987+#define RK816_CAL_OFFSET_REGL			0xD3
72988+#define RK816_NON_ACT_TIMER_CNT_REG		0xD4
72989+#define RK816_VCALIB0_REGH			0xD5
72990+#define RK816_VCALIB0_REGL			0xD6
72991+#define RK816_VCALIB1_REGH			0xD7
72992+#define RK816_VCALIB1_REGL			0xD8
72993+#define RK816_FCC_GASCNT_REG3			0xD9
72994+#define RK816_FCC_GASCNT_REG2			0xDA
72995+#define RK816_FCC_GASCNT_REG1			0xDB
72996+#define RK816_FCC_GASCNT_REG0			0xDC
72997+#define RK816_IOFFSET_REGH			0xDD
72998+#define RK816_IOFFSET_REGL			0xDE
72999+#define RK816_SLEEP_CON_SAMP_CUR_REG		0xDF
73000+
73001+/*DATA REGISTER*/
73002+#define RK816_SOC_REG				0xE0
73003+#define RK816_REMAIN_CAP_REG3			0xE1
73004+#define RK816_REMAIN_CAP_REG2			0xE2
73005+#define RK816_REMAIN_CAP_REG1			0xE3
73006+#define RK816_REMAIN_CAP_REG0			0xE4
73007+#define RK816_UPDATE_LEVE_REG			0xE5
73008+#define RK816_NEW_FCC_REG3			0xE6
73009+#define RK816_NEW_FCC_REG2			0xE7
73010+#define RK816_NEW_FCC_REG1			0xE8
73011+#define RK816_NEW_FCC_REG0			0xE9
73012+#define RK816_NON_ACT_TIMER_CNT_REG_SAVE	0xEA
73013+#define RK816_OCV_VOL_VALID_REG			0xEB
73014+#define RK816_REBOOT_CNT_REG			0xEC
73015+#define RK816_PCB_IOFFSET_REG			0xED
73016+#define RK816_MISC_MARK_REG			0xEE
73017+#define RK816_HALT_CNT_REG			0xEF
73018+#define RK816_CALC_REST_REGH			0xF0
73019+#define RK816_CALC_REST_REGL			0xF1
73020+#define DATA18_REG				0xF2
73021+
73022+/*INTERRUPT REGISTER*/
73023+#define RK816_INT_STS_REG1			0x49
73024+#define RK816_INT_STS_MSK_REG1			0x4A
73025+#define RK816_INT_STS_REG2			0x4C
73026+#define RK816_INT_STS_MSK_REG2			0x4D
73027+#define RK816_INT_STS_REG3			0x4E
73028+#define RK816_INT_STS_MSK_REG3			0x4F
73029+#define RK816_GPIO_IO_POL_REG			0x50
73030+
73031+#define RK816_DATA18_REG			0xF2
73032+
73033+/* IRQ Definitions */
73034+#define RK816_IRQ_PWRON_FALL			0
73035+#define RK816_IRQ_PWRON_RISE			1
73036+#define RK816_IRQ_VB_LOW			2
73037+#define RK816_IRQ_PWRON				3
73038+#define RK816_IRQ_PWRON_LP			4
73039+#define RK816_IRQ_HOTDIE			5
73040+#define RK816_IRQ_RTC_ALARM			6
73041+#define RK816_IRQ_RTC_PERIOD			7
73042+#define RK816_IRQ_USB_OV			8
73043+#define RK816_IRQ_PLUG_IN			9
73044+#define RK816_IRQ_PLUG_OUT			10
73045+#define RK816_IRQ_CHG_OK			11
73046+#define RK816_IRQ_CHG_TE			12
73047+#define RK816_IRQ_CHG_TS			13
73048+#define RK816_IRQ_CHG_CVTLIM			14
73049+#define RK816_IRQ_DISCHG_ILIM			15
73050+
73051+#define RK816_IRQ_PWRON_FALL_MSK		BIT(5)
73052+#define RK816_IRQ_PWRON_RISE_MSK		BIT(6)
73053+#define RK816_IRQ_VB_LOW_MSK			BIT(1)
73054+#define RK816_IRQ_PWRON_MSK			BIT(2)
73055+#define RK816_IRQ_PWRON_LP_MSK			BIT(3)
73056+#define RK816_IRQ_HOTDIE_MSK			BIT(4)
73057+#define RK816_IRQ_RTC_ALARM_MSK			BIT(5)
73058+#define RK816_IRQ_RTC_PERIOD_MSK		BIT(6)
73059+#define RK816_IRQ_USB_OV_MSK			BIT(7)
73060+#define RK816_IRQ_PLUG_IN_MSK			BIT(0)
73061+#define RK816_IRQ_PLUG_OUT_MSK			BIT(1)
73062+#define RK816_IRQ_CHG_OK_MSK			BIT(2)
73063+#define RK816_IRQ_CHG_TE_MSK			BIT(3)
73064+#define RK816_IRQ_CHG_TS_MSK			BIT(4)
73065+#define RK816_IRQ_CHG_CVTLIM_MSK		BIT(6)
73066+#define RK816_IRQ_DISCHG_ILIM_MSK		BIT(7)
73067+
73068+#define RK816_VBAT_LOW_2V8			0x00
73069+#define RK816_VBAT_LOW_2V9			0x01
73070+#define RK816_VBAT_LOW_3V0			0x02
73071+#define RK816_VBAT_LOW_3V1			0x03
73072+#define RK816_VBAT_LOW_3V2			0x04
73073+#define RK816_VBAT_LOW_3V3			0x05
73074+#define RK816_VBAT_LOW_3V4			0x06
73075+#define RK816_VBAT_LOW_3V5			0x07
73076+#define RK816_PWR_FALL_INT_STATUS		(0x1 << 5)
73077+#define RK816_PWR_RISE_INT_STATUS		(0x1 << 6)
73078+#define RK816_ALARM_INT_STATUS			(0x1 << 5)
73079+#define EN_VBAT_LOW_IRQ				(0x1 << 4)
73080+#define VBAT_LOW_ACT_MASK			(0x1 << 4)
73081+#define RTC_TIMER_ALARM_INT_MSK			(0x3 << 2)
73082+#define RTC_TIMER_ALARM_INT_DIS			(0x0 << 2)
73083+#define RTC_PERIOD_ALARM_INT_MSK		(0x3 << 5)
73084+#define RTC_PERIOD_ALARM_INT_ST			(0x3 << 5)
73085+#define RTC_PERIOD_ALARM_INT_DIS		(0x3 << 5)
73086+#define RTC_PERIOD_ALARM_INT_EN			(0x9f)
73087+#define REG_WRITE_MSK				0xff
73088+#define BUCK4_MAX_ILIMIT			0x2c
73089+#define BUCK_RATE_MSK				(0x3 << 3)
73090+#define BUCK_RATE_12_5MV_US			(0x2 << 3)
73091+#define ALL_INT_FLAGS_ST			0xff
73092+#define PLUGIN_OUT_INT_EN			0xfc
73093+#define RK816_PWRON_FALL_RISE_INT_EN		0x9f
73094+#define BUCK1_2_IMAX_MAX			(0x3 << 6)
73095+#define BUCK3_4_IMAX_MAX			(0x3 << 3)
73096+#define BOOST_DISABLE				((0x1 << 5) | (0x0 << 1))
73097+#define BUCK4_VRP_3PERCENT			0xc0
73098+#define RK816_BUCK_DVS_CONFIRM			(0x1 << 7)
73099+#define RK816_TYPE_ES2				0x05
73100+#define RK816_CHIP_VERSION_MASK			0x0f
73101+
73102 /* RK818 */
73103 #define RK818_DCDC1			0
73104 #define RK818_LDO1			4
73105@@ -138,6 +367,8 @@ enum rk818_reg {
73106 	RK818_ID_OTG_SWITCH,
73107 };
73108 
73109+#define RK818_VB_MON_REG		0x21
73110+#define RK818_THERMAL_REG		0x22
73111 #define RK818_DCDC_EN_REG		0x23
73112 #define RK818_LDO_EN_REG		0x24
73113 #define RK818_SLEEP_SET_OFF_REG1	0x25
73114@@ -190,7 +421,84 @@ enum rk818_reg {
73115 #define RK818_BOOST_LDO9_SLP_VSEL_REG	0x55
73116 #define RK818_BOOST_CTRL_REG		0x56
73117 #define RK818_DCDC_ILMAX		0x90
73118+#define RK818_CHRG_COMP_REG		0x9a
73119+#define RK818_SUP_STS_REG		0xa0
73120 #define RK818_USB_CTRL_REG		0xa1
73121+#define RK818_CHRG_CTRL_REG1		0xa3
73122+#define RK818_CHRG_CTRL_REG2		0xa4
73123+#define RK818_CHRG_CTRL_REG3		0xa5
73124+#define RK818_BAT_CTRL_REG		0xa6
73125+#define RK818_BAT_HTS_TS1_REG		0xa8
73126+#define RK818_BAT_LTS_TS1_REG		0xa9
73127+#define RK818_BAT_HTS_TS2_REG		0xaa
73128+#define RK818_BAT_LTS_TS2_REG		0xab
73129+#define RK818_TS_CTRL_REG		0xac
73130+#define RK818_ADC_CTRL_REG		0xad
73131+#define RK818_ON_SOURCE_REG		0xae
73132+#define RK818_OFF_SOURCE_REG		0xaf
73133+#define RK818_GGCON_REG			0xb0
73134+#define RK818_GGSTS_REG			0xb1
73135+#define RK818_FRAME_SMP_INTERV_REG	0xb2
73136+#define RK818_AUTO_SLP_CUR_THR_REG	0xb3
73137+#define RK818_GASCNT_CAL_REG3		0xb4
73138+#define RK818_GASCNT_CAL_REG2		0xb5
73139+#define RK818_GASCNT_CAL_REG1		0xb6
73140+#define RK818_GASCNT_CAL_REG0		0xb7
73141+#define RK818_GASCNT3_REG		0xb8
73142+#define RK818_GASCNT2_REG		0xb9
73143+#define RK818_GASCNT1_REG		0xba
73144+#define RK818_GASCNT0_REG		0xbb
73145+#define RK818_BAT_CUR_AVG_REGH		0xbc
73146+#define RK818_BAT_CUR_AVG_REGL		0xbd
73147+#define RK818_TS1_ADC_REGH		0xbe
73148+#define RK818_TS1_ADC_REGL		0xbf
73149+#define RK818_TS2_ADC_REGH		0xc0
73150+#define RK818_TS2_ADC_REGL		0xc1
73151+#define RK818_BAT_OCV_REGH		0xc2
73152+#define RK818_BAT_OCV_REGL		0xc3
73153+#define RK818_BAT_VOL_REGH		0xc4
73154+#define RK818_BAT_VOL_REGL		0xc5
73155+#define RK818_RELAX_ENTRY_THRES_REGH	0xc6
73156+#define RK818_RELAX_ENTRY_THRES_REGL	0xc7
73157+#define RK818_RELAX_EXIT_THRES_REGH	0xc8
73158+#define RK818_RELAX_EXIT_THRES_REGL	0xc9
73159+#define RK818_RELAX_VOL1_REGH		0xca
73160+#define RK818_RELAX_VOL1_REGL		0xcb
73161+#define RK818_RELAX_VOL2_REGH		0xcc
73162+#define RK818_RELAX_VOL2_REGL		0xcd
73163+#define RK818_BAT_CUR_R_CALC_REGH	0xce
73164+#define RK818_BAT_CUR_R_CALC_REGL	0xcf
73165+#define RK818_BAT_VOL_R_CALC_REGH	0xd0
73166+#define RK818_BAT_VOL_R_CALC_REGL	0xd1
73167+#define RK818_CAL_OFFSET_REGH		0xd2
73168+#define RK818_CAL_OFFSET_REGL		0xd3
73169+#define RK818_NON_ACT_TIMER_CNT_REG	0xd4
73170+#define RK818_VCALIB0_REGH		0xd5
73171+#define RK818_VCALIB0_REGL		0xd6
73172+#define RK818_VCALIB1_REGH		0xd7
73173+#define RK818_VCALIB1_REGL		0xd8
73174+#define RK818_IOFFSET_REGH		0xdd
73175+#define RK818_IOFFSET_REGL		0xde
73176+#define RK818_SOC_REG			0xe0
73177+#define RK818_REMAIN_CAP_REG3		0xe1
73178+#define RK818_REMAIN_CAP_REG2		0xe2
73179+#define RK818_REMAIN_CAP_REG1		0xe3
73180+#define RK818_REMAIN_CAP_REG0		0xe4
73181+#define RK818_UPDAT_LEVE_REG		0xe5
73182+#define RK818_NEW_FCC_REG3		0xe6
73183+#define RK818_NEW_FCC_REG2		0xe7
73184+#define RK818_NEW_FCC_REG1		0xe8
73185+#define RK818_NEW_FCC_REG0		0xe9
73186+#define RK818_NON_ACT_TIMER_CNT_SAVE_REG	0xea
73187+#define RK818_OCV_VOL_VALID_REG		0xeb
73188+#define RK818_REBOOT_CNT_REG		0xec
73189+#define RK818_POFFSET_REG		0xed
73190+#define RK818_MISC_MARK_REG		0xee
73191+#define RK818_HALT_CNT_REG		0xef
73192+#define RK818_CALC_REST_REGH		0xf0
73193+#define RK818_CALC_REST_REGL		0xf1
73194+#define RK818_SAVE_DATA19		0xf2
73195+#define RK818_NUM_REGULATOR		17
73196 
73197 #define RK818_H5V_EN			BIT(0)
73198 #define RK818_REF_RDY_CTRL		BIT(1)
73199@@ -255,14 +563,22 @@ enum rk805_reg {
73200 #define RK805_PWRON_FALL_RISE_INT_MSK	0x81
73201 
73202 /* RK805 IRQ Definitions */
73203-#define RK805_IRQ_PWRON_RISE		0
73204 #define RK805_IRQ_VB_LOW		1
73205 #define RK805_IRQ_PWRON			2
73206 #define RK805_IRQ_PWRON_LP		3
73207 #define RK805_IRQ_HOTDIE		4
73208 #define RK805_IRQ_RTC_ALARM		5
73209 #define RK805_IRQ_RTC_PERIOD		6
73210-#define RK805_IRQ_PWRON_FALL		7
73211+
73212+/*
73213+ * When PMIC irq occurs, regmap-irq.c will traverse all PMIC child
73214+ * interrupts from low index 0 to high index, we give fall interrupt
73215+ * high priority to be called earlier than rise, so that it can be
73216+ * override by late rise event. This can helps to solve key release
73217+ * glitch which make a wrongly fall event immediately after rise.
73218+ */
73219+#define RK805_IRQ_PWRON_FALL		0
73220+#define RK805_IRQ_PWRON_RISE		7
73221 
73222 #define RK805_IRQ_PWRON_RISE_MSK	BIT(0)
73223 #define RK805_IRQ_VB_LOW_MSK		BIT(1)
73224@@ -289,6 +605,16 @@ enum rk805_reg {
73225 #define RK805_INT_ALARM_EN		(1 << 3)
73226 #define RK805_INT_TIMER_EN		(1 << 2)
73227 
73228+#define RK805_SLP_LDO_EN_OFFSET		-1
73229+#define RK805_SLP_DCDC_EN_OFFSET	2
73230+
73231+#define RK805_RAMP_RATE_OFFSET		3
73232+#define RK805_RAMP_RATE_MASK		(3 << RK805_RAMP_RATE_OFFSET)
73233+#define RK805_RAMP_RATE_3MV_PER_US	(0 << RK805_RAMP_RATE_OFFSET)
73234+#define RK805_RAMP_RATE_6MV_PER_US	(1 << RK805_RAMP_RATE_OFFSET)
73235+#define RK805_RAMP_RATE_12_5MV_PER_US	(2 << RK805_RAMP_RATE_OFFSET)
73236+#define RK805_RAMP_RATE_25MV_PER_US	(3 << RK805_RAMP_RATE_OFFSET)
73237+
73238 /* RK808 IRQ Definitions */
73239 #define RK808_IRQ_VOUT_LO	0
73240 #define RK808_IRQ_VB_LO		1
73241@@ -348,6 +674,107 @@ enum rk805_reg {
73242 
73243 #define RK818_NUM_IRQ		16
73244 
73245+/*RK818_DCDC_EN_REG*/
73246+#define BUCK1_EN_MASK		BIT(0)
73247+#define BUCK2_EN_MASK		BIT(1)
73248+#define BUCK3_EN_MASK		BIT(2)
73249+#define BUCK4_EN_MASK		BIT(3)
73250+#define BOOST_EN_MASK		BIT(4)
73251+#define LDO9_EN_MASK		BIT(5)
73252+#define SWITCH_EN_MASK		BIT(6)
73253+#define OTG_EN_MASK		BIT(7)
73254+
73255+#define BUCK1_EN_ENABLE		BIT(0)
73256+#define BUCK2_EN_ENABLE		BIT(1)
73257+#define BUCK3_EN_ENABLE		BIT(2)
73258+#define BUCK4_EN_ENABLE		BIT(3)
73259+#define BOOST_EN_ENABLE		BIT(4)
73260+#define LDO9_EN_ENABLE		BIT(5)
73261+#define SWITCH_EN_ENABLE	BIT(6)
73262+#define OTG_EN_ENABLE		BIT(7)
73263+
73264+#define BUCK1_SLP_SET_MASK	BIT(0)
73265+#define BUCK2_SLP_SET_MASK	BIT(1)
73266+#define BUCK3_SLP_SET_MASK	BIT(2)
73267+#define BUCK4_SLP_SET_MASK	BIT(3)
73268+#define BOOST_SLP_SET_MASK	BIT(4)
73269+#define LDO9_SLP_SET_MASK	BIT(5)
73270+#define SWITCH_SLP_SET_MASK	BIT(6)
73271+#define OTG_SLP_SET_MASK	BIT(7)
73272+
73273+#define BUCK1_SLP_SET_OFF	BIT(0)
73274+#define BUCK2_SLP_SET_OFF	BIT(1)
73275+#define BUCK3_SLP_SET_OFF	BIT(2)
73276+#define BUCK4_SLP_SET_OFF	BIT(3)
73277+#define BOOST_SLP_SET_OFF	BIT(4)
73278+#define LDO9_SLP_SET_OFF	BIT(5)
73279+#define SWITCH_SLP_SET_OFF	BIT(6)
73280+#define OTG_SLP_SET_OFF		BIT(7)
73281+#define OTG_BOOST_SLP_OFF	(BOOST_SLP_SET_OFF | OTG_SLP_SET_OFF)
73282+
73283+#define BUCK1_SLP_SET_ON	BIT(0)
73284+#define BUCK2_SLP_SET_ON	BIT(1)
73285+#define BUCK3_SLP_SET_ON	BIT(2)
73286+#define BUCK4_SLP_SET_ON	BIT(3)
73287+#define BOOST_SLP_SET_ON	BIT(4)
73288+#define LDO9_SLP_SET_ON		BIT(5)
73289+#define SWITCH_SLP_SET_ON	BIT(6)
73290+#define OTG_SLP_SET_ON		BIT(7)
73291+
73292+#define VOUT_LO_MASK		BIT(0)
73293+#define VB_LO_MASK		BIT(1)
73294+#define PWRON_MASK		BIT(2)
73295+#define PWRON_LP_MASK		BIT(3)
73296+#define HOTDIE_MASK		BIT(4)
73297+#define RTC_ALARM_MASK		BIT(5)
73298+#define RTC_PERIOD_MASK		BIT(6)
73299+#define USB_OV_MASK		BIT(7)
73300+
73301+#define VOUT_LO_DISABLE		BIT(0)
73302+#define VB_LO_DISABLE		BIT(1)
73303+#define PWRON_DISABLE		BIT(2)
73304+#define PWRON_LP_DISABLE	BIT(3)
73305+#define HOTDIE_DISABLE		BIT(4)
73306+#define RTC_ALARM_DISABLE	BIT(5)
73307+#define RTC_PERIOD_DISABLE	BIT(6)
73308+#define USB_OV_INT_DISABLE	BIT(7)
73309+
73310+#define VOUT_LO_ENABLE		(0 << 0)
73311+#define VB_LO_ENABLE		(0 << 1)
73312+#define PWRON_ENABLE		(0 << 2)
73313+#define PWRON_LP_ENABLE		(0 << 3)
73314+#define HOTDIE_ENABLE		(0 << 4)
73315+#define RTC_ALARM_ENABLE	(0 << 5)
73316+#define RTC_PERIOD_ENABLE	(0 << 6)
73317+#define USB_OV_INT_ENABLE	(0 << 7)
73318+
73319+#define PLUG_IN_MASK		BIT(0)
73320+#define PLUG_OUT_MASK		BIT(1)
73321+#define CHGOK_MASK		BIT(2)
73322+#define CHGTE_MASK		BIT(3)
73323+#define CHGTS1_MASK		BIT(4)
73324+#define TS2_MASK		BIT(5)
73325+#define CHG_CVTLIM_MASK		BIT(6)
73326+#define DISCHG_ILIM_MASK	BIT(7)
73327+
73328+#define PLUG_IN_DISABLE		BIT(0)
73329+#define PLUG_OUT_DISABLE	BIT(1)
73330+#define CHGOK_DISABLE		BIT(2)
73331+#define CHGTE_DISABLE		BIT(3)
73332+#define CHGTS1_DISABLE		BIT(4)
73333+#define TS2_DISABLE		BIT(5)
73334+#define CHG_CVTLIM_DISABLE	BIT(6)
73335+#define DISCHG_ILIM_DISABLE	BIT(7)
73336+
73337+#define PLUG_IN_ENABLE		BIT(0)
73338+#define PLUG_OUT_ENABLE		BIT(1)
73339+#define CHGOK_ENABLE		BIT(2)
73340+#define CHGTE_ENABLE		BIT(3)
73341+#define CHGTS1_ENABLE		BIT(4)
73342+#define TS2_ENABLE		BIT(5)
73343+#define CHG_CVTLIM_ENABLE	BIT(6)
73344+#define DISCHG_ILIM_ENABLE	BIT(7)
73345+
73346 #define RK808_VBAT_LOW_2V8	0x00
73347 #define RK808_VBAT_LOW_2V9	0x01
73348 #define RK808_VBAT_LOW_3V0	0x02
73349@@ -381,7 +808,10 @@ enum rk805_reg {
73350 
73351 #define VOUT_LO_INT	BIT(0)
73352 #define CLK32KOUT2_EN	BIT(0)
73353+#define CLK32KOUT2_FUNC		(0 << 1)
73354+#define CLK32KOUT2_FUNC_MASK	BIT(1)
73355 
73356+#define TEMP105C			0x08
73357 #define TEMP115C			0x0c
73358 #define TEMP_HOTDIE_MSK			0x0c
73359 #define SLP_SD_MSK			(0x3 << 2)
73360@@ -391,6 +821,7 @@ enum rk805_reg {
73361 #define PWM_MODE_MSK			BIT(7)
73362 #define FPWM_MODE			BIT(7)
73363 #define AUTO_PWM_MODE			0
73364+#define REGS_WMSK			0xf0
73365 
73366 enum rk817_reg_id {
73367 	RK817_ID_DCDC1 = 0,
73368@@ -436,6 +867,10 @@ enum rk809_reg_id {
73369 #define RK817_RTC_INT_REG		0xf
73370 #define RK817_RTC_COMP_LSB_REG		0x10
73371 #define RK817_RTC_COMP_MSB_REG		0x11
73372+#define RK817_ADC_CONFIG0		0x50
73373+#define RK817_CURE_ADC_K0		0xb0
73374+#define RK817_POWER_EN_SAVE0            0x99
73375+#define RK817_POWER_EN_SAVE1            0xa4
73376 
73377 #define RK817_POWER_EN_REG(i)		(0xb1 + (i))
73378 #define RK817_POWER_SLP_EN_REG(i)	(0xb5 + (i))
73379@@ -462,6 +897,9 @@ enum rk809_reg_id {
73380 #define RK817_LDO_ON_VSEL_REG(idx)	(0xcc + (idx) * 2)
73381 #define RK817_BOOST_OTG_CFG		(0xde)
73382 
73383+#define RK817_CHRG_OUT			0xe4
73384+#define RK817_CHRG_IN			0xe5
73385+#define RK817_CHRG_STS			0xeb
73386 #define RK817_ID_MSB			0xed
73387 #define RK817_ID_LSB			0xee
73388 
73389@@ -609,16 +1047,27 @@ enum {
73390 	RK805_ID = 0x8050,
73391 	RK808_ID = 0x0000,
73392 	RK809_ID = 0x8090,
73393+	RK816_ID = 0x8160,
73394 	RK817_ID = 0x8170,
73395 	RK818_ID = 0x8180,
73396 };
73397 
73398+struct rk808_pin_info {
73399+	struct pinctrl *p;
73400+	struct pinctrl_state *reset;
73401+	struct pinctrl_state *power_off;
73402+	struct pinctrl_state *sleep;
73403+};
73404+
73405 struct rk808 {
73406 	struct i2c_client		*i2c;
73407 	struct regmap_irq_chip_data	*irq_data;
73408+	struct regmap_irq_chip_data	*battery_irq_data;
73409 	struct regmap			*regmap;
73410 	long				variant;
73411 	const struct regmap_config	*regmap_cfg;
73412 	const struct regmap_irq_chip	*regmap_irq_chip;
73413+	void				(*pm_pwroff_prep_fn)(void);
73414+	struct rk808_pin_info *pins;
73415 };
73416 #endif /* __LINUX_REGULATOR_RK808_H */
73417diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
73418index 29aa50711..7768c644a 100644
73419--- a/include/linux/mmc/core.h
73420+++ b/include/linux/mmc/core.h
73421@@ -162,6 +162,10 @@ struct mmc_request {
73422 	bool			cap_cmd_during_tfr;
73423 
73424 	int			tag;
73425+#ifdef CONFIG_MMC_CRYPTO
73426+	const struct bio_crypt_ctx *crypto_ctx;
73427+	int			crypto_key_slot;
73428+#endif
73429 };
73430 
73431 struct mmc_card;
73432diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
73433index 40d7e98fc..4e8b65492 100644
73434--- a/include/linux/mmc/host.h
73435+++ b/include/linux/mmc/host.h
73436@@ -374,6 +374,11 @@ struct mmc_host {
73437 #define MMC_CAP2_CQE_DCMD	(1 << 24)	/* CQE can issue a direct command */
73438 #define MMC_CAP2_AVOID_3_3V	(1 << 25)	/* Host must negotiate down from 3.3V */
73439 #define MMC_CAP2_MERGE_CAPABLE	(1 << 26)	/* Host can merge a segment over the segment size */
73440+#ifdef CONFIG_MMC_CRYPTO
73441+#define MMC_CAP2_CRYPTO		(1 << 27)	/* Host supports inline encryption */
73442+#else
73443+#define MMC_CAP2_CRYPTO		0
73444+#endif
73445 
73446 	int			fixed_drv_type;	/* fixed driver type for non-removable media */
73447 
73448@@ -468,6 +473,9 @@ struct mmc_host {
73449 	bool			cqe_enabled;
73450 	bool			cqe_on;
73451 
73452+#ifdef CONFIG_MMC_CRYPTO
73453+	struct blk_keyslot_manager ksm;
73454+#endif
73455 	/* Host Software Queue support */
73456 	bool			hsq_enabled;
73457 
73458diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
73459index 545578fb8..29e09d23e 100644
73460--- a/include/linux/mmc/mmc.h
73461+++ b/include/linux/mmc/mmc.h
73462@@ -449,4 +449,11 @@ static inline bool mmc_ready_for_data(u32 status)
73463 
73464 #define mmc_driver_type_mask(n)		(1 << (n))
73465 
73466+struct mmc_card;
73467+extern int mmc_select_bus_width(struct mmc_card *card);
73468+extern int mmc_select_hs(struct mmc_card *card);
73469+extern int mmc_select_hs_ddr(struct mmc_card *card);
73470+extern int mmc_select_hs400(struct mmc_card *card);
73471+extern int mmc_hs200_tuning(struct mmc_card *card);
73472+extern int mmc_select_timing(struct mmc_card *card);
73473 #endif /* LINUX_MMC_MMC_H */
73474diff --git a/include/linux/mmc/pm.h b/include/linux/mmc/pm.h
73475index 3549f8045..1d554b858 100644
73476--- a/include/linux/mmc/pm.h
73477+++ b/include/linux/mmc/pm.h
73478@@ -23,5 +23,6 @@ typedef unsigned int mmc_pm_flag_t;
73479 
73480 #define MMC_PM_KEEP_POWER	(1 << 0)	/* preserve card power during suspend */
73481 #define MMC_PM_WAKE_SDIO_IRQ	(1 << 1)	/* wake up host system on SDIO IRQ assertion */
73482+#define MMC_PM_IGNORE_PM_NOTIFY	(1 << 2)	/* ignore mmc pm notify */
73483 
73484 #endif /* LINUX_MMC_PM_H */
73485diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h
73486index 03dee12d2..bc4ac3c52 100644
73487--- a/include/linux/mmu_context.h
73488+++ b/include/linux/mmu_context.h
73489@@ -14,4 +14,12 @@
73490 static inline void leave_mm(int cpu) { }
73491 #endif
73492 
73493+/*
73494+ * CPUs that are capable of running task @p. By default, we assume a sane,
73495+ * homogeneous system. Must contain at least one active CPU.
73496+ */
73497+#ifndef task_cpu_possible_mask
73498+# define task_cpu_possible_mask(p)	cpu_possible_mask
73499+#endif
73500+
73501 #endif
73502diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
73503index 81a55e974..e51b3d8e6 100644
73504--- a/include/linux/power_supply.h
73505+++ b/include/linux/power_supply.h
73506@@ -49,6 +49,7 @@ enum {
73507 	POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE,	/* dynamically adjusted speed */
73508 	POWER_SUPPLY_CHARGE_TYPE_CUSTOM,	/* use CHARGE_CONTROL_* props */
73509 	POWER_SUPPLY_CHARGE_TYPE_LONGLIFE,	/* slow speed, longer life */
73510+	POWER_SUPPLY_CHARGE_TYPE_TAPER = 50,	/* charging in CV phase */
73511 };
73512 
73513 enum {
73514@@ -386,12 +387,22 @@ extern void power_supply_put(struct power_supply *psy);
73515 #ifdef CONFIG_OF
73516 extern struct power_supply *power_supply_get_by_phandle(struct device_node *np,
73517 							const char *property);
73518+extern int power_supply_get_by_phandle_array(struct device_node *np,
73519+					     const char *property,
73520+					     struct power_supply **psy,
73521+					     ssize_t size);
73522 extern struct power_supply *devm_power_supply_get_by_phandle(
73523 				    struct device *dev, const char *property);
73524 #else /* !CONFIG_OF */
73525 static inline struct power_supply *
73526 power_supply_get_by_phandle(struct device_node *np, const char *property)
73527 { return NULL; }
73528+static inline int
73529+power_supply_get_by_phandle_array(struct device_node *np,
73530+				  const char *property,
73531+				  struct power_supply **psy,
73532+				  int size)
73533+{ return 0; }
73534 static inline struct power_supply *
73535 devm_power_supply_get_by_phandle(struct device *dev, const char *property)
73536 { return NULL; }
73537@@ -426,9 +437,16 @@ static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
73538 extern int power_supply_get_property(struct power_supply *psy,
73539 			    enum power_supply_property psp,
73540 			    union power_supply_propval *val);
73541+#if IS_ENABLED(CONFIG_POWER_SUPPLY)
73542 extern int power_supply_set_property(struct power_supply *psy,
73543 			    enum power_supply_property psp,
73544 			    const union power_supply_propval *val);
73545+#else
73546+static inline int power_supply_set_property(struct power_supply *psy,
73547+			    enum power_supply_property psp,
73548+			    const union power_supply_propval *val)
73549+{ return 0; }
73550+#endif
73551 extern int power_supply_property_is_writeable(struct power_supply *psy,
73552 					enum power_supply_property psp);
73553 extern void power_supply_external_power_changed(struct power_supply *psy);
73554diff --git a/include/linux/pwm.h b/include/linux/pwm.h
73555index a13ff383f..cb4abed98 100644
73556--- a/include/linux/pwm.h
73557+++ b/include/linux/pwm.h
73558@@ -48,6 +48,10 @@ enum {
73559 	PWMF_EXPORTED = 1 << 1,
73560 };
73561 
73562+enum pwm_output_type {
73563+	PWM_OUTPUT_FIXED = 1 << 0,
73564+	PWM_OUTPUT_MODULATED = 1 << 1,
73565+};
73566 /*
73567  * struct pwm_state - state of a PWM channel
73568  * @period: PWM period (in nanoseconds)
73569@@ -59,6 +63,10 @@ struct pwm_state {
73570 	u64 period;
73571 	u64 duty_cycle;
73572 	enum pwm_polarity polarity;
73573+	enum pwm_output_type output_type;
73574+#ifdef CONFIG_PWM_ROCKCHIP_ONESHOT
73575+	u64 oneshot_count;
73576+#endif /* CONFIG_PWM_ROCKCHIP_ONESHOT */
73577 	bool enabled;
73578 };
73579 
73580@@ -145,6 +153,13 @@ static inline enum pwm_polarity pwm_get_polarity(const struct pwm_device *pwm)
73581 
73582 	return state.polarity;
73583 }
73584+static inline enum pwm_output_type pwm_get_output_type(
73585+		const struct pwm_device *pwm)
73586+{
73587+	struct pwm_state state;
73588+	pwm_get_state(pwm, &state);
73589+	return state.output_type;
73590+}
73591 
73592 static inline void pwm_get_args(const struct pwm_device *pwm,
73593 				struct pwm_args *args)
73594@@ -264,6 +279,8 @@ struct pwm_ops {
73595 		     const struct pwm_state *state);
73596 	void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm,
73597 			  struct pwm_state *state);
73598+	int (*get_output_type_supported)(struct pwm_chip *chip,
73599+			struct pwm_device *pwm);
73600 	struct module *owner;
73601 
73602 	/* Only used by legacy drivers */
73603@@ -317,6 +334,15 @@ struct pwm_device *pwm_request(int pwm_id, const char *label);
73604 void pwm_free(struct pwm_device *pwm);
73605 int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state);
73606 int pwm_adjust_config(struct pwm_device *pwm);
73607+static inline int pwm_get_output_type_supported(struct pwm_device *pwm)
73608+{
73609+	if (!pwm)
73610+		return -EINVAL;
73611+	if (pwm->chip->ops->get_output_type_supported)
73612+		return pwm->chip->ops->get_output_type_supported(pwm->chip,
73613+				pwm);
73614+	return PWM_OUTPUT_FIXED;
73615+}
73616 
73617 /**
73618  * pwm_config() - change a PWM device configuration
73619@@ -436,6 +462,10 @@ static inline int pwm_adjust_config(struct pwm_device *pwm)
73620 	return -ENOTSUPP;
73621 }
73622 
73623+static inline int pwm_get_output_type_supported(struct pwm_device *pwm)
73624+{
73625+	return -EINVAL;
73626+}
73627 static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
73628 			     int period_ns)
73629 {
73630diff --git a/include/linux/reboot-mode.h b/include/linux/reboot-mode.h
73631index 4a2abb38d..a7aa69d00 100644
73632--- a/include/linux/reboot-mode.h
73633+++ b/include/linux/reboot-mode.h
73634@@ -6,7 +6,9 @@ struct reboot_mode_driver {
73635 	struct device *dev;
73636 	struct list_head head;
73637 	int (*write)(struct reboot_mode_driver *reboot, unsigned int magic);
73638+	int (*read)(struct reboot_mode_driver *reboot);
73639 	struct notifier_block reboot_notifier;
73640+	struct notifier_block panic_notifier;
73641 };
73642 
73643 int reboot_mode_register(struct reboot_mode_driver *reboot);
73644diff --git a/include/linux/reboot.h b/include/linux/reboot.h
73645index 3734cd8f3..1c8d7856c 100644
73646--- a/include/linux/reboot.h
73647+++ b/include/linux/reboot.h
73648@@ -49,6 +49,26 @@ extern int register_restart_handler(struct notifier_block *);
73649 extern int unregister_restart_handler(struct notifier_block *);
73650 extern void do_kernel_restart(char *cmd);
73651 
73652+#ifdef CONFIG_NO_GKI
73653+extern int register_pre_restart_handler(struct notifier_block *nb);
73654+extern int unregister_pre_restart_handler(struct notifier_block *nb);
73655+extern void do_kernel_pre_restart(char *cmd);
73656+#else
73657+static inline int register_pre_restart_handler(struct notifier_block *nb)
73658+{
73659+	return 0;
73660+}
73661+
73662+static inline int unregister_pre_restart_handler(struct notifier_block *nb)
73663+{
73664+	return 0;
73665+}
73666+
73667+static inline void do_kernel_pre_restart(char *cmd)
73668+{
73669+}
73670+#endif
73671+
73672 /*
73673  * Architecture-specific implementations of sys_reboot commands.
73674  */
73675diff --git a/include/linux/sched.h b/include/linux/sched.h
73676index d590141f0..6e1cafd23 100644
73677--- a/include/linux/sched.h
73678+++ b/include/linux/sched.h
73679@@ -851,6 +851,9 @@ struct task_struct {
73680 	 */
73681 	struct uclamp_se		uclamp[UCLAMP_CNT];
73682 #endif
73683+#ifdef CONFIG_HOTPLUG_CPU
73684+	struct list_head		percpu_kthread_node;
73685+#endif
73686 
73687 #ifdef CONFIG_PREEMPT_NOTIFIERS
73688 	/* List of struct preempt_notifier: */
73689@@ -1034,6 +1037,10 @@ struct task_struct {
73690 	u64				stimescaled;
73691 #endif
73692 	u64				gtime;
73693+#ifdef CONFIG_CPU_FREQ_TIMES
73694+	u64				*time_in_state;
73695+	unsigned int			max_state;
73696+#endif
73697 	struct prev_cputime		prev_cputime;
73698 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
73699 	struct vtime			vtime;
73700@@ -1145,6 +1152,7 @@ struct task_struct {
73701 	raw_spinlock_t			pi_lock;
73702 
73703 	struct wake_q_node		wake_q;
73704+	int				wake_q_count;
73705 
73706 #ifdef CONFIG_RT_MUTEXES
73707 	/* PI waiters blocked on a rt_mutex held by this task: */
73708@@ -1369,7 +1377,7 @@ struct task_struct {
73709 	u64				timer_slack_ns;
73710 	u64				default_timer_slack_ns;
73711 
73712-#ifdef CONFIG_KASAN
73713+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
73714 	unsigned int			kasan_depth;
73715 #endif
73716 
73717diff --git a/include/linux/soc/rockchip/pvtm.h b/include/linux/soc/rockchip/pvtm.h
73718new file mode 100755
73719index 000000000..3d2495cfd
73720--- /dev/null
73721+++ b/include/linux/soc/rockchip/pvtm.h
73722@@ -0,0 +1,17 @@
73723+/* SPDX-License-Identifier: GPL-2.0 */
73724+#ifndef __SOC_ROCKCHIP_PVTM_H
73725+#define __SOC_ROCKCHIP_PVTM_H
73726+
73727+#if IS_ENABLED(CONFIG_ROCKCHIP_PVTM)
73728+u32 rockchip_get_pvtm_value(unsigned int id, unsigned int ring_sel,
73729+			    unsigned int time_us);
73730+#else
73731+static inline u32 rockchip_get_pvtm_value(unsigned int id,
73732+					  unsigned int ring_sel,
73733+					  unsigned int time_us)
73734+{
73735+	return 0;
73736+}
73737+#endif
73738+
73739+#endif /* __SOC_ROCKCHIP_PVTM_H */
73740diff --git a/include/linux/soc/rockchip/rk_fiq_debugger.h b/include/linux/soc/rockchip/rk_fiq_debugger.h
73741new file mode 100755
73742index 000000000..f5ec8d143
73743--- /dev/null
73744+++ b/include/linux/soc/rockchip/rk_fiq_debugger.h
73745@@ -0,0 +1,22 @@
73746+/* SPDX-License-Identifier: GPL-2.0 */
73747+#ifndef __PLAT_RK_FIQ_DEBUGGER_H
73748+#define __PLAT_RK_FIQ_DEBUGGER_H
73749+
73750+#ifdef CONFIG_FIQ_DEBUGGER_TRUST_ZONE
73751+void fiq_debugger_fiq(void *regs, u32 cpu);
73752+
73753+#ifdef CONFIG_ARM_SDE_INTERFACE
73754+int sdei_fiq_debugger_is_enabled(void);
73755+int fiq_sdei_event_enable(u32 event_num);
73756+int fiq_sdei_event_routing_set(u32 event_num, unsigned long flags,
73757+			       unsigned long affinity);
73758+int fiq_sdei_event_disable(u32 event_num);
73759+#else
73760+static inline int sdei_fiq_debugger_is_enabled(void)
73761+{
73762+	return 0;
73763+}
73764+#endif
73765+#endif
73766+
73767+#endif
73768diff --git a/include/linux/soc/rockchip/rk_sdmmc.h b/include/linux/soc/rockchip/rk_sdmmc.h
73769new file mode 100755
73770index 000000000..14f70f72e
73771--- /dev/null
73772+++ b/include/linux/soc/rockchip/rk_sdmmc.h
73773@@ -0,0 +1,13 @@
73774+/* SPDX-License-Identifier: GPL-2.0 */
73775+#ifndef __RK_SDMMC_H
73776+#define __RK_SDMMC_H
73777+
73778+#if IS_ENABLED(CONFIG_CPU_RV1106) && IS_REACHABLE(CONFIG_MMC_DW)
73779+void rv1106_sdmmc_get_lock(void);
73780+void rv1106_sdmmc_put_lock(void);
73781+#else
73782+static inline void rv1106_sdmmc_get_lock(void) {}
73783+static inline void rv1106_sdmmc_put_lock(void) {}
73784+#endif
73785+
73786+#endif
73787diff --git a/include/linux/soc/rockchip/rk_vendor_storage.h b/include/linux/soc/rockchip/rk_vendor_storage.h
73788new file mode 100755
73789index 000000000..29cee9bf6
73790--- /dev/null
73791+++ b/include/linux/soc/rockchip/rk_vendor_storage.h
73792@@ -0,0 +1,59 @@
73793+/*
73794+ * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd
73795+ *
73796+ * This program is free software; you can redistribute it and/or modify
73797+ * it under the terms of the GNU General Public License as published by
73798+ * the Free Software Foundation; either version 2 of the License, or (at
73799+ * your option) any later version.
73800+ */
73801+
73802+#ifndef __PLAT_RK_VENDOR_STORAGE_H
73803+#define __PLAT_RK_VENDOR_STORAGE_H
73804+
73805+#define RSV_ID				0
73806+#define SN_ID				1
73807+#define WIFI_MAC_ID			2
73808+#define LAN_MAC_ID			3
73809+#define BT_MAC_ID			4
73810+#define HDCP_14_HDMI_ID			5
73811+#define HDCP_14_DP_ID			6
73812+#define HDCP_2X_ID			7
73813+#define DRM_KEY_ID			8
73814+#define PLAYREADY_CERT_ID		9
73815+#define ATTENTION_KEY_ID		10
73816+#define PLAYREADY_ROOT_KEY_0_ID		11
73817+#define PLAYREADY_ROOT_KEY_1_ID		12
73818+#define HDCP_14_HDMIRX_ID		13
73819+#define SENSOR_CALIBRATION_ID		14
73820+#define IMEI_ID				15
73821+#define LAN_RGMII_DL_ID			16
73822+#define EINK_VCOM_ID			17
73823+
73824+#if IS_REACHABLE(CONFIG_ROCKCHIP_VENDOR_STORAGE)
73825+int rk_vendor_read(u32 id, void *pbuf, u32 size);
73826+int rk_vendor_write(u32 id, void *pbuf, u32 size);
73827+int rk_vendor_register(void *read, void *write);
73828+bool is_rk_vendor_ready(void);
73829+#else
73830+static inline int rk_vendor_read(u32 id, void *pbuf, u32 size)
73831+{
73832+	return -1;
73833+}
73834+
73835+static inline int rk_vendor_write(u32 id, void *pbuf, u32 size)
73836+{
73837+	return -1;
73838+}
73839+
73840+static inline int rk_vendor_register(void *read, void *write)
73841+{
73842+	return -1;
73843+}
73844+
73845+static inline bool is_rk_vendor_ready(void)
73846+{
73847+	return false;
73848+}
73849+#endif
73850+
73851+#endif
73852diff --git a/include/linux/soc/rockchip/rockchip_decompress.h b/include/linux/soc/rockchip/rockchip_decompress.h
73853new file mode 100755
73854index 000000000..120ae907c
73855--- /dev/null
73856+++ b/include/linux/soc/rockchip/rockchip_decompress.h
73857@@ -0,0 +1,42 @@
73858+/* SPDX-License-Identifier: GPL-2.0+ */
73859+/* Copyright (c) 2020 Rockchip Electronics Co., Ltd */
73860+
73861+#ifndef _ROCKCHIP_DECOMPRESS
73862+#define _ROCKCHIP_DECOMPRESS
73863+
73864+enum decom_mod {
73865+	LZ4_MOD,
73866+	GZIP_MOD,
73867+	ZLIB_MOD,
73868+};
73869+
73870+/* The high 16 bits indicate whether decompression is non-blocking */
73871+#define DECOM_NOBLOCKING		(0x00010000)
73872+
73873+static inline u32 rk_get_decom_mode(u32 mode)
73874+{
73875+	return mode & 0x0000ffff;
73876+}
73877+
73878+static inline bool rk_get_noblocking_flag(u32 mode)
73879+{
73880+	return !!(mode & DECOM_NOBLOCKING);
73881+}
73882+
73883+#ifdef CONFIG_ROCKCHIP_HW_DECOMPRESS
73884+int rk_decom_start(u32 mode, phys_addr_t src, phys_addr_t dst, u32 dst_max_size);
73885+/* timeout in seconds */
73886+int rk_decom_wait_done(u32 timeout, u64 *decom_len);
73887+#else
73888+static inline int rk_decom_start(u32 mode, phys_addr_t src, phys_addr_t dst, u32 dst_max_size)
73889+{
73890+	return -EINVAL;
73891+}
73892+
73893+static inline int rk_decom_wait_done(u32 timeout, u64 *decom_len)
73894+{
73895+	return -EINVAL;
73896+}
73897+#endif
73898+
73899+#endif
73900diff --git a/include/linux/soc/rockchip/rockchip_thunderboot_crypto.h b/include/linux/soc/rockchip/rockchip_thunderboot_crypto.h
73901new file mode 100755
73902index 000000000..2fe176649
73903--- /dev/null
73904+++ b/include/linux/soc/rockchip/rockchip_thunderboot_crypto.h
73905@@ -0,0 +1,9 @@
73906+/* SPDX-License-Identifier: GPL-2.0+ */
73907+/* Copyright (c) 2021 Rockchip Electronics Co., Ltd */
73908+
73909+#ifndef _ROCKCHIP_THUNDERBOOT_CRYPTO_
73910+#define _ROCKCHIP_THUNDERBOOT_CRYPTO_
73911+
73912+int rk_tb_sha256(dma_addr_t data, size_t data_len, void *user_data);
73913+
73914+#endif
73915diff --git a/include/linux/soc/rockchip/rockchip_thunderboot_service.h b/include/linux/soc/rockchip/rockchip_thunderboot_service.h
73916new file mode 100755
73917index 000000000..5ab1cf490
73918--- /dev/null
73919+++ b/include/linux/soc/rockchip/rockchip_thunderboot_service.h
73920@@ -0,0 +1,29 @@
73921+/* SPDX-License-Identifier: GPL-2.0+ */
73922+/* Copyright (c) 2022 Rockchip Electronics Co., Ltd */
73923+
73924+#ifndef _ROCKCHIP_THUNDERBOOT_SERVICE_H
73925+#define _ROCKCHIP_THUNDERBOOT_SERVICE_H
73926+
73927+struct rk_tb_client {
73928+	struct list_head node;
73929+	void *data;
73930+	void (*cb)(void *data);
73931+};
73932+
73933+#ifdef CONFIG_ROCKCHIP_THUNDER_BOOT_SERVICE
73934+bool rk_tb_mcu_is_done(void);
73935+int rk_tb_client_register_cb(struct rk_tb_client *client);
73936+#else
73937+static inline bool rk_tb_mcu_is_done(void)
73938+{
73939+	return true;
73940+}
73941+static inline int rk_tb_client_register_cb(struct rk_tb_client *client)
73942+{
73943+	if (client && client->cb)
73944+		client->cb(client->data);
73945+
73946+	return 0;
73947+}
73948+#endif
73949+#endif
73950diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
73951index abf7b8ec1..bf41c178e 100644
73952--- a/include/linux/stmmac.h
73953+++ b/include/linux/stmmac.h
73954@@ -183,6 +183,7 @@ struct plat_stmmacenet_data {
73955 	void (*serdes_powerdown)(struct net_device *ndev, void *priv);
73956 	int (*init)(struct platform_device *pdev, void *priv);
73957 	void (*exit)(struct platform_device *pdev, void *priv);
73958+	void (*get_eth_addr)(void *priv, unsigned char *addr);
73959 	struct mac_device_info *(*setup)(void *priv);
73960 	void *bsp_priv;
73961 	struct clk *stmmac_clk;
73962diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
73963index a2d229ab6..4d352204e 100644
73964--- a/include/linux/usb/composite.h
73965+++ b/include/linux/usb/composite.h
73966@@ -525,6 +525,8 @@ extern struct usb_string *usb_gstrings_attach(struct usb_composite_dev *cdev,
73967 extern int usb_string_ids_n(struct usb_composite_dev *c, unsigned n);
73968 
73969 extern void composite_disconnect(struct usb_gadget *gadget);
73970+extern void composite_reset(struct usb_gadget *gadget);
73971+
73972 extern int composite_setup(struct usb_gadget *gadget,
73973 		const struct usb_ctrlrequest *ctrl);
73974 extern void composite_suspend(struct usb_gadget *gadget);
73975@@ -590,6 +592,7 @@ struct usb_function_instance {
73976 	struct config_group group;
73977 	struct list_head cfs_list;
73978 	struct usb_function_driver *fd;
73979+	struct usb_function *f;
73980 	int (*set_inst_name)(struct usb_function_instance *inst,
73981 			      const char *name);
73982 	void (*free_func_inst)(struct usb_function_instance *inst);
73983diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
73984index e7351d64f..e5cdba2db 100644
73985--- a/include/linux/usb/gadget.h
73986+++ b/include/linux/usb/gadget.h
73987@@ -26,6 +26,7 @@
73988 #include <linux/types.h>
73989 #include <linux/workqueue.h>
73990 #include <linux/usb/ch9.h>
73991+#include <linux/android_kabi.h>
73992 
73993 #define UDC_TRACE_STR_MAX	512
73994 
73995@@ -122,6 +123,8 @@ struct usb_request {
73996 
73997 	int			status;
73998 	unsigned		actual;
73999+
74000+	ANDROID_KABI_RESERVE(1);
74001 };
74002 
74003 /*-------------------------------------------------------------------------*/
74004@@ -152,6 +155,8 @@ struct usb_ep_ops {
74005 
74006 	int (*fifo_status) (struct usb_ep *ep);
74007 	void (*fifo_flush) (struct usb_ep *ep);
74008+
74009+	ANDROID_KABI_RESERVE(1);
74010 };
74011 
74012 /**
74013@@ -217,6 +222,7 @@ struct usb_ep_caps {
74014  *	enabled and remains valid until the endpoint is disabled.
74015  * @comp_desc: In case of SuperSpeed support, this is the endpoint companion
74016  *	descriptor that is used to configure the endpoint
74017+ * @transfer_type: Used to specify transfer type of EP.
74018  *
74019  * the bus controller driver lists all the general purpose endpoints in
74020  * gadget->ep_list.  the control endpoint (gadget->ep0) is not in that list,
74021@@ -240,6 +246,11 @@ struct usb_ep {
74022 	u8			address;
74023 	const struct usb_endpoint_descriptor	*desc;
74024 	const struct usb_ss_ep_comp_descriptor	*comp_desc;
74025+#if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
74026+	u8			transfer_type;
74027+#endif
74028+
74029+	ANDROID_KABI_RESERVE(1);
74030 };
74031 
74032 /*-------------------------------------------------------------------------*/
74033@@ -323,9 +334,18 @@ struct usb_gadget_ops {
74034 			struct usb_gadget_driver *);
74035 	int	(*udc_stop)(struct usb_gadget *);
74036 	void	(*udc_set_speed)(struct usb_gadget *, enum usb_device_speed);
74037+	void	(*udc_set_ssp_rate)(struct usb_gadget *gadget,
74038+			enum usb_ssp_rate rate);
74039+	void	(*udc_async_callbacks)(struct usb_gadget *gadget, bool enable);
74040 	struct usb_ep *(*match_ep)(struct usb_gadget *,
74041 			struct usb_endpoint_descriptor *,
74042 			struct usb_ss_ep_comp_descriptor *);
74043+	int	(*check_config)(struct usb_gadget *gadget);
74044+
74045+	ANDROID_KABI_RESERVE(1);
74046+	ANDROID_KABI_RESERVE(2);
74047+	ANDROID_KABI_RESERVE(3);
74048+	ANDROID_KABI_RESERVE(4);
74049 };
74050 
74051 /**
74052@@ -339,6 +359,10 @@ struct usb_gadget_ops {
74053  * @speed: Speed of current connection to USB host.
74054  * @max_speed: Maximal speed the UDC can handle.  UDC must support this
74055  *      and all slower speeds.
74056+ * @ssp_rate: Current connected SuperSpeed Plus signaling rate and lane count.
74057+ * @max_ssp_rate: Maximum SuperSpeed Plus signaling rate and lane count the UDC
74058+ *	can handle. The UDC must support this and all slower speeds and lower
74059+ *	number of lanes.
74060  * @state: the state we are now (attached, suspended, configured, etc)
74061  * @name: Identifies the controller hardware type.  Used in diagnostics
74062  *	and sometimes configuration.
74063@@ -406,6 +430,11 @@ struct usb_gadget {
74064 	struct list_head		ep_list;	/* of usb_ep */
74065 	enum usb_device_speed		speed;
74066 	enum usb_device_speed		max_speed;
74067+
74068+	/* USB SuperSpeed Plus only */
74069+	enum usb_ssp_rate		ssp_rate;
74070+	enum usb_ssp_rate		max_ssp_rate;
74071+
74072 	enum usb_device_state		state;
74073 	const char			*name;
74074 	struct device			dev;
74075@@ -433,6 +462,11 @@ struct usb_gadget {
74076 	unsigned			connected:1;
74077 	unsigned			lpm_capable:1;
74078 	int				irq;
74079+
74080+	ANDROID_KABI_RESERVE(1);
74081+	ANDROID_KABI_RESERVE(2);
74082+	ANDROID_KABI_RESERVE(3);
74083+	ANDROID_KABI_RESERVE(4);
74084 };
74085 #define work_to_gadget(w)	(container_of((w), struct usb_gadget, work))
74086 
74087@@ -596,6 +630,7 @@ int usb_gadget_connect(struct usb_gadget *gadget);
74088 int usb_gadget_disconnect(struct usb_gadget *gadget);
74089 int usb_gadget_deactivate(struct usb_gadget *gadget);
74090 int usb_gadget_activate(struct usb_gadget *gadget);
74091+int usb_gadget_check_config(struct usb_gadget *gadget);
74092 #else
74093 static inline int usb_gadget_frame_number(struct usb_gadget *gadget)
74094 { return 0; }
74095@@ -619,6 +654,8 @@ static inline int usb_gadget_deactivate(struct usb_gadget *gadget)
74096 { return 0; }
74097 static inline int usb_gadget_activate(struct usb_gadget *gadget)
74098 { return 0; }
74099+static inline int usb_gadget_check_config(struct usb_gadget *gadget)
74100+{ return 0; }
74101 #endif /* CONFIG_USB_GADGET */
74102 
74103 /*-------------------------------------------------------------------------*/
74104diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
74105index c0cf20b19..3dbb42c63 100644
74106--- a/include/linux/usb/hcd.h
74107+++ b/include/linux/usb/hcd.h
74108@@ -66,7 +66,6 @@
74109 
74110 struct giveback_urb_bh {
74111 	bool running;
74112-	bool high_prio;
74113 	spinlock_t lock;
74114 	struct list_head  head;
74115 	struct tasklet_struct bh;
74116@@ -125,7 +124,6 @@ struct usb_hcd {
74117 #define HCD_FLAG_RH_RUNNING		5	/* root hub is running? */
74118 #define HCD_FLAG_DEAD			6	/* controller has died? */
74119 #define HCD_FLAG_INTF_AUTHORIZED	7	/* authorize interfaces? */
74120-#define HCD_FLAG_DEFER_RH_REGISTER	8	/* Defer roothub registration */
74121 
74122 	/* The flags can be tested using these macros; they are likely to
74123 	 * be slightly faster than test_bit().
74124@@ -136,7 +134,6 @@ struct usb_hcd {
74125 #define HCD_WAKEUP_PENDING(hcd)	((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING))
74126 #define HCD_RH_RUNNING(hcd)	((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING))
74127 #define HCD_DEAD(hcd)		((hcd)->flags & (1U << HCD_FLAG_DEAD))
74128-#define HCD_DEFER_RH_REGISTER(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEFER_RH_REGISTER))
74129 
74130 	/*
74131 	 * Specifies if interfaces are authorized by default
74132diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
74133index 69f1b6328..10cb3d193 100644
74134--- a/include/linux/usb/otg.h
74135+++ b/include/linux/usb/otg.h
74136@@ -12,6 +12,7 @@
74137 
74138 #include <linux/phy/phy.h>
74139 #include <linux/usb/phy.h>
74140+#include <linux/android_kabi.h>
74141 
74142 struct usb_otg {
74143 	u8			default_a;
74144@@ -40,6 +41,7 @@ struct usb_otg {
74145 	/* start or continue HNP role switch */
74146 	int	(*start_hnp)(struct usb_otg *otg);
74147 
74148+	ANDROID_KABI_RESERVE(1);
74149 };
74150 
74151 /**
74152diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
74153index 433040ff8..96b7ff66f 100644
74154--- a/include/linux/usb/pd.h
74155+++ b/include/linux/usb/pd.h
74156@@ -225,6 +225,7 @@ enum pd_pdo_type {
74157 #define PDO_FIXED_EXTPOWER		BIT(27) /* Externally powered */
74158 #define PDO_FIXED_USB_COMM		BIT(26) /* USB communications capable */
74159 #define PDO_FIXED_DATA_SWAP		BIT(25) /* Data role swap supported */
74160+#define PDO_FIXED_UNCHUNK_EXT		BIT(24) /* Unchunked Extended Message supported (Source) */
74161 #define PDO_FIXED_FRS_CURR_MASK		(BIT(24) | BIT(23)) /* FR_Swap Current (Sink) */
74162 #define PDO_FIXED_FRS_CURR_SHIFT	23
74163 #define PDO_FIXED_VOLT_SHIFT		10	/* 50mV units */
74164@@ -466,6 +467,7 @@ static inline unsigned int rdo_max_power(u32 rdo)
74165 #define PD_T_DRP_SRC		30
74166 #define PD_T_PS_SOURCE_OFF	920
74167 #define PD_T_PS_SOURCE_ON	480
74168+#define PD_T_PS_SOURCE_ON_PRS	450	/* 390 - 480ms */
74169 #define PD_T_PS_HARD_RESET	30
74170 #define PD_T_SRC_RECOVER	760
74171 #define PD_T_SRC_RECOVER_MAX	1000
74172@@ -478,14 +480,19 @@ static inline unsigned int rdo_max_power(u32 rdo)
74173 #define PD_T_NEWSRC		250	/* Maximum of 275ms */
74174 #define PD_T_SWAP_SRC_START	20	/* Minimum of 20ms */
74175 #define PD_T_BIST_CONT_MODE	50	/* 30 - 60 ms */
74176+#define PD_T_SINK_TX		16	/* 16 - 20 ms */
74177+#define PD_T_CHUNK_NOT_SUPP	42	/* 40 - 50 ms */
74178 
74179 #define PD_T_DRP_TRY		100	/* 75 - 150 ms */
74180 #define PD_T_DRP_TRYWAIT	600	/* 400 - 800 ms */
74181 
74182 #define PD_T_CC_DEBOUNCE	200	/* 100 - 200 ms */
74183 #define PD_T_PD_DEBOUNCE	20	/* 10 - 20 ms */
74184+#define PD_T_TRY_CC_DEBOUNCE	15	/* 10 - 20 ms */
74185 
74186 #define PD_N_CAPS_COUNT		(PD_T_NO_RESPONSE / PD_T_SEND_SOURCE_CAP)
74187 #define PD_N_HARD_RESET_COUNT	2
74188 
74189+#define PD_P_SNK_STDBY_MW	2500	/* 2500 mW */
74190+
74191 #endif /* __LINUX_USB_PD_H */
74192diff --git a/include/linux/usb/pd_ext_sdb.h b/include/linux/usb/pd_ext_sdb.h
74193index 0eb83ce19..b517ebc8f 100644
74194--- a/include/linux/usb/pd_ext_sdb.h
74195+++ b/include/linux/usb/pd_ext_sdb.h
74196@@ -24,8 +24,4 @@ enum usb_pd_ext_sdb_fields {
74197 #define USB_PD_EXT_SDB_EVENT_OVP		BIT(3)
74198 #define USB_PD_EXT_SDB_EVENT_CF_CV_MODE		BIT(4)
74199 
74200-#define USB_PD_EXT_SDB_PPS_EVENTS	(USB_PD_EXT_SDB_EVENT_OCP |	\
74201-					 USB_PD_EXT_SDB_EVENT_OTP |	\
74202-					 USB_PD_EXT_SDB_EVENT_OVP)
74203-
74204 #endif /* __LINUX_USB_PD_EXT_SDB_H */
74205diff --git a/include/linux/usb/pd_vdo.h b/include/linux/usb/pd_vdo.h
74206index 68bdc4e2f..7f5e330a6 100644
74207--- a/include/linux/usb/pd_vdo.h
74208+++ b/include/linux/usb/pd_vdo.h
74209@@ -21,22 +21,24 @@
74210  * ----------
74211  * <31:16>  :: SVID
74212  * <15>     :: VDM type ( 1b == structured, 0b == unstructured )
74213- * <14:13>  :: Structured VDM version (can only be 00 == 1.0 currently)
74214+ * <14:13>  :: Structured VDM version
74215  * <12:11>  :: reserved
74216  * <10:8>   :: object position (1-7 valid ... used for enter/exit mode only)
74217  * <7:6>    :: command type (SVDM only?)
74218  * <5>      :: reserved (SVDM), command type (UVDM)
74219  * <4:0>    :: command
74220  */
74221-#define VDO(vid, type, custom)				\
74222+#define VDO(vid, type, ver, custom)			\
74223 	(((vid) << 16) |				\
74224 	 ((type) << 15) |				\
74225+	 ((ver) << 13) |				\
74226 	 ((custom) & 0x7FFF))
74227 
74228 #define VDO_SVDM_TYPE		(1 << 15)
74229 #define VDO_SVDM_VERS(x)	((x) << 13)
74230 #define VDO_OPOS(x)		((x) << 8)
74231 #define VDO_CMDT(x)		((x) << 6)
74232+#define VDO_SVDM_VERS_MASK	VDO_SVDM_VERS(0x3)
74233 #define VDO_OPOS_MASK		VDO_OPOS(0x7)
74234 #define VDO_CMDT_MASK		VDO_CMDT(0x3)
74235 
74236@@ -74,6 +76,7 @@
74237 
74238 #define PD_VDO_VID(vdo)		((vdo) >> 16)
74239 #define PD_VDO_SVDM(vdo)	(((vdo) >> 15) & 1)
74240+#define PD_VDO_SVDM_VER(vdo)	(((vdo) >> 13) & 0x3)
74241 #define PD_VDO_OPOS(vdo)	(((vdo) >> 8) & 0x7)
74242 #define PD_VDO_CMD(vdo)		((vdo) & 0x1f)
74243 #define PD_VDO_CMDT(vdo)	(((vdo) >> 6) & 0x3)
74244@@ -103,25 +106,50 @@
74245  * --------------------
74246  * <31>     :: data capable as a USB host
74247  * <30>     :: data capable as a USB device
74248- * <29:27>  :: product type
74249+ * <29:27>  :: product type (UFP / Cable / VPD)
74250  * <26>     :: modal operation supported (1b == yes)
74251- * <25:16>  :: Reserved, Shall be set to zero
74252+ * <25:23>  :: product type (DFP) (SVDM version 2.0+ only; set to zero in version 1.0)
74253+ * <22:21>  :: connector type (SVDM version 2.0+ only; set to zero in version 1.0)
74254+ * <20:16>  :: Reserved, Shall be set to zero
74255  * <15:0>   :: USB-IF assigned VID for this cable vendor
74256  */
74257+
74258+/* PD Rev2.0 definition */
74259 #define IDH_PTYPE_UNDEF		0
74260+
74261+/* SOP Product Type (UFP) */
74262+#define IDH_PTYPE_NOT_UFP	0
74263 #define IDH_PTYPE_HUB		1
74264 #define IDH_PTYPE_PERIPH	2
74265+#define IDH_PTYPE_PSD		3
74266+#define IDH_PTYPE_AMA		5
74267+
74268+/* SOP' Product Type (Cable Plug / VPD) */
74269+#define IDH_PTYPE_NOT_CABLE	0
74270 #define IDH_PTYPE_PCABLE	3
74271 #define IDH_PTYPE_ACABLE	4
74272-#define IDH_PTYPE_AMA		5
74273+#define IDH_PTYPE_VPD		6
74274 
74275-#define VDO_IDH(usbh, usbd, ptype, is_modal, vid)		\
74276-	((usbh) << 31 | (usbd) << 30 | ((ptype) & 0x7) << 27	\
74277-	 | (is_modal) << 26 | ((vid) & 0xffff))
74278+/* SOP Product Type (DFP) */
74279+#define IDH_PTYPE_NOT_DFP	0
74280+#define IDH_PTYPE_DFP_HUB	1
74281+#define IDH_PTYPE_DFP_HOST	2
74282+#define IDH_PTYPE_DFP_PB	3
74283+
74284+/* ID Header Mask */
74285+#define IDH_DFP_MASK		GENMASK(25, 23)
74286+#define IDH_CONN_MASK		GENMASK(22, 21)
74287+
74288+#define VDO_IDH(usbh, usbd, ufp_cable, is_modal, dfp, conn, vid)		\
74289+	((usbh) << 31 | (usbd) << 30 | ((ufp_cable) & 0x7) << 27		\
74290+	 | (is_modal) << 26 | ((dfp) & 0x7) << 23 | ((conn) & 0x3) << 21	\
74291+	 | ((vid) & 0xffff))
74292 
74293 #define PD_IDH_PTYPE(vdo)	(((vdo) >> 27) & 0x7)
74294 #define PD_IDH_VID(vdo)		((vdo) & 0xffff)
74295 #define PD_IDH_MODAL_SUPP(vdo)	((vdo) & (1 << 26))
74296+#define PD_IDH_DFP_PTYPE(vdo)	(((vdo) >> 23) & 0x7)
74297+#define PD_IDH_CONN_TYPE(vdo)	(((vdo) >> 21) & 0x3)
74298 
74299 /*
74300  * Cert Stat VDO
74301@@ -129,6 +157,7 @@
74302  * <31:0>  : USB-IF assigned XID for this cable
74303  */
74304 #define PD_CSTAT_XID(vdo)	(vdo)
74305+#define VDO_CERT(xid)		((xid) & 0xffffffff)
74306 
74307 /*
74308  * Product VDO
74309@@ -140,77 +169,270 @@
74310 #define PD_PRODUCT_PID(vdo)	(((vdo) >> 16) & 0xffff)
74311 
74312 /*
74313- * UFP VDO1
74314+ * UFP VDO (PD Revision 3.0+ only)
74315  * --------
74316  * <31:29> :: UFP VDO version
74317  * <28>    :: Reserved
74318  * <27:24> :: Device capability
74319- * <23:6>  :: Reserved
74320+ * <23:22> :: Connector type (10b == receptacle, 11b == captive plug)
74321+ * <21:11> :: Reserved
74322+ * <10:8>  :: Vconn power (AMA only)
74323+ * <7>     :: Vconn required (AMA only, 0b == no, 1b == yes)
74324+ * <6>     :: Vbus required (AMA only, 0b == yes, 1b == no)
74325  * <5:3>   :: Alternate modes
74326  * <2:0>   :: USB highest speed
74327  */
74328-#define PD_VDO1_UFP_DEVCAP(vdo)	(((vdo) & GENMASK(27, 24)) >> 24)
74329+#define PD_VDO_UFP_DEVCAP(vdo)	(((vdo) & GENMASK(27, 24)) >> 24)
74330 
74331+/* UFP VDO Version */
74332+#define UFP_VDO_VER1_2		2
74333+
74334+/* Device Capability */
74335 #define DEV_USB2_CAPABLE	BIT(0)
74336 #define DEV_USB2_BILLBOARD	BIT(1)
74337 #define DEV_USB3_CAPABLE	BIT(2)
74338 #define DEV_USB4_CAPABLE	BIT(3)
74339 
74340+/* Connector Type */
74341+#define UFP_RECEPTACLE		2
74342+#define UFP_CAPTIVE		3
74343+
74344+/* Vconn Power (AMA only, set to AMA_VCONN_NOT_REQ if Vconn is not required) */
74345+#define AMA_VCONN_PWR_1W	0
74346+#define AMA_VCONN_PWR_1W5	1
74347+#define AMA_VCONN_PWR_2W	2
74348+#define AMA_VCONN_PWR_3W	3
74349+#define AMA_VCONN_PWR_4W	4
74350+#define AMA_VCONN_PWR_5W	5
74351+#define AMA_VCONN_PWR_6W	6
74352+
74353+/* Vconn Required (AMA only) */
74354+#define AMA_VCONN_NOT_REQ	0
74355+#define AMA_VCONN_REQ		1
74356+
74357+/* Vbus Required (AMA only) */
74358+#define AMA_VBUS_REQ		0
74359+#define AMA_VBUS_NOT_REQ	1
74360+
74361+/* Alternate Modes */
74362+#define UFP_ALTMODE_NOT_SUPP	0
74363+#define UFP_ALTMODE_TBT3	BIT(0)
74364+#define UFP_ALTMODE_RECFG	BIT(1)
74365+#define UFP_ALTMODE_NO_RECFG	BIT(2)
74366+
74367+/* USB Highest Speed */
74368+#define UFP_USB2_ONLY		0
74369+#define UFP_USB32_GEN1		1
74370+#define UFP_USB32_4_GEN2	2
74371+#define UFP_USB4_GEN3		3
74372+
74373+#define VDO_UFP(ver, cap, conn, vcpwr, vcr, vbr, alt, spd)			\
74374+	(((ver) & 0x7) << 29 | ((cap) & 0xf) << 24 | ((conn) & 0x3) << 22	\
74375+	 | ((vcpwr) & 0x7) << 8 | (vcr) << 7 | (vbr) << 6 | ((alt) & 0x7) << 3	\
74376+	 | ((spd) & 0x7))
74377+
74378 /*
74379- * DFP VDO
74380+ * DFP VDO (PD Revision 3.0+ only)
74381  * --------
74382  * <31:29> :: DFP VDO version
74383  * <28:27> :: Reserved
74384  * <26:24> :: Host capability
74385- * <23:5>  :: Reserved
74386+ * <23:22> :: Connector type (10b == receptacle, 11b == captive plug)
74387+ * <21:5>  :: Reserved
74388  * <4:0>   :: Port number
74389  */
74390 #define PD_VDO_DFP_HOSTCAP(vdo)	(((vdo) & GENMASK(26, 24)) >> 24)
74391 
74392+#define DFP_VDO_VER1_1		1
74393 #define HOST_USB2_CAPABLE	BIT(0)
74394 #define HOST_USB3_CAPABLE	BIT(1)
74395 #define HOST_USB4_CAPABLE	BIT(2)
74396+#define DFP_RECEPTACLE		2
74397+#define DFP_CAPTIVE		3
74398+
74399+#define VDO_DFP(ver, cap, conn, pnum)						\
74400+	(((ver) & 0x7) << 29 | ((cap) & 0x7) << 24 | ((conn) & 0x3) << 22	\
74401+	 | ((pnum) & 0x1f))
74402 
74403 /*
74404- * Cable VDO
74405+ * Cable VDO (for both Passive and Active Cable VDO in PD Rev2.0)
74406  * ---------
74407  * <31:28> :: Cable HW version
74408  * <27:24> :: Cable FW version
74409  * <23:20> :: Reserved, Shall be set to zero
74410- * <19:18> :: type-C to Type-A/B/C (00b == A, 01 == B, 10 == C)
74411- * <17>    :: Type-C to Plug/Receptacle (0b == plug, 1b == receptacle)
74412+ * <19:18> :: type-C to Type-A/B/C/Captive (00b == A, 01 == B, 10 == C, 11 == Captive)
74413+ * <17>    :: Reserved, Shall be set to zero
74414  * <16:13> :: cable latency (0001 == <10ns(~1m length))
74415  * <12:11> :: cable termination type (11b == both ends active VCONN req)
74416  * <10>    :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
74417  * <9>     :: SSTX2 Directionality support
74418  * <8>     :: SSRX1 Directionality support
74419  * <7>     :: SSRX2 Directionality support
74420- * <6:5>   :: Vbus current handling capability
74421+ * <6:5>   :: Vbus current handling capability (01b == 3A, 10b == 5A)
74422  * <4>     :: Vbus through cable (0b == no, 1b == yes)
74423  * <3>     :: SOP" controller present? (0b == no, 1b == yes)
74424  * <2:0>   :: USB SS Signaling support
74425+ *
74426+ * Passive Cable VDO (PD Rev3.0+)
74427+ * ---------
74428+ * <31:28> :: Cable HW version
74429+ * <27:24> :: Cable FW version
74430+ * <23:21> :: VDO version
74431+ * <20>    :: Reserved, Shall be set to zero
74432+ * <19:18> :: Type-C to Type-C/Captive (10b == C, 11b == Captive)
74433+ * <17>    :: Reserved, Shall be set to zero
74434+ * <16:13> :: cable latency (0001 == <10ns(~1m length))
74435+ * <12:11> :: cable termination type (10b == Vconn not req, 01b == Vconn req)
74436+ * <10:9>  :: Maximum Vbus voltage (00b == 20V, 01b == 30V, 10b == 40V, 11b == 50V)
74437+ * <8:7>   :: Reserved, Shall be set to zero
74438+ * <6:5>   :: Vbus current handling capability (01b == 3A, 10b == 5A)
74439+ * <4:3>   :: Reserved, Shall be set to zero
74440+ * <2:0>   :: USB highest speed
74441+ *
74442+ * Active Cable VDO 1 (PD Rev3.0+)
74443+ * ---------
74444+ * <31:28> :: Cable HW version
74445+ * <27:24> :: Cable FW version
74446+ * <23:21> :: VDO version
74447+ * <20>    :: Reserved, Shall be set to zero
74448+ * <19:18> :: Connector type (10b == C, 11b == Captive)
74449+ * <17>    :: Reserved, Shall be set to zero
74450+ * <16:13> :: cable latency (0001 == <10ns(~1m length))
74451+ * <12:11> :: cable termination type (10b == one end active, 11b == both ends active VCONN req)
74452+ * <10:9>  :: Maximum Vbus voltage (00b == 20V, 01b == 30V, 10b == 40V, 11b == 50V)
74453+ * <8>     :: SBU supported (0b == supported, 1b == not supported)
74454+ * <7>     :: SBU type (0b == passive, 1b == active)
74455+ * <6:5>   :: Vbus current handling capability (01b == 3A, 10b == 5A)
74456+ * <2:0>   :: USB highest speed
74457  */
74458+/* Cable VDO Version */
74459+#define CABLE_VDO_VER1_0	0
74460+#define CABLE_VDO_VER1_3	3
74461+
74462+/* Connector Type (_ATYPE and _BTYPE are for PD Rev2.0 only) */
74463 #define CABLE_ATYPE		0
74464 #define CABLE_BTYPE		1
74465 #define CABLE_CTYPE		2
74466-#define CABLE_PLUG		0
74467-#define CABLE_RECEPTACLE	1
74468-#define CABLE_CURR_1A5		0
74469+#define CABLE_CAPTIVE		3
74470+
74471+/* Cable Latency */
74472+#define CABLE_LATENCY_1M	1
74473+#define CABLE_LATENCY_2M	2
74474+#define CABLE_LATENCY_3M	3
74475+#define CABLE_LATENCY_4M	4
74476+#define CABLE_LATENCY_5M	5
74477+#define CABLE_LATENCY_6M	6
74478+#define CABLE_LATENCY_7M	7
74479+#define CABLE_LATENCY_7M_PLUS	8
74480+
74481+/* Cable Termination Type */
74482+#define PCABLE_VCONN_NOT_REQ	0
74483+#define PCABLE_VCONN_REQ	1
74484+#define ACABLE_ONE_END		2
74485+#define ACABLE_BOTH_END		3
74486+
74487+/* Maximum Vbus Voltage */
74488+#define CABLE_MAX_VBUS_20V	0
74489+#define CABLE_MAX_VBUS_30V	1
74490+#define CABLE_MAX_VBUS_40V	2
74491+#define CABLE_MAX_VBUS_50V	3
74492+
74493+/* Active Cable SBU Supported/Type */
74494+#define ACABLE_SBU_SUPP		0
74495+#define ACABLE_SBU_NOT_SUPP	1
74496+#define ACABLE_SBU_PASSIVE	0
74497+#define ACABLE_SBU_ACTIVE	1
74498+
74499+/* Vbus Current Handling Capability */
74500+#define CABLE_CURR_DEF		0
74501 #define CABLE_CURR_3A		1
74502 #define CABLE_CURR_5A		2
74503+
74504+/* USB SuperSpeed Signaling Support (PD Rev2.0) */
74505 #define CABLE_USBSS_U2_ONLY	0
74506 #define CABLE_USBSS_U31_GEN1	1
74507 #define CABLE_USBSS_U31_GEN2	2
74508-#define VDO_CABLE(hw, fw, cbl, gdr, lat, term, tx1d, tx2d, rx1d, rx2d, cur,\
74509-		  vps, sopp, usbss) \
74510-	(((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 | ((cbl) & 0x3) << 18	\
74511-	 | (gdr) << 17 | ((lat) & 0x7) << 13 | ((term) & 0x3) << 11	\
74512-	 | (tx1d) << 10 | (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7	\
74513-	 | ((cur) & 0x3) << 5 | (vps) << 4 | (sopp) << 3		\
74514-	 | ((usbss) & 0x7))
74515+
74516+/* USB Highest Speed */
74517+#define CABLE_USB2_ONLY		0
74518+#define CABLE_USB32_GEN1	1
74519+#define CABLE_USB32_4_GEN2	2
74520+#define CABLE_USB4_GEN3		3
74521+
74522+#define VDO_CABLE(hw, fw, cbl, lat, term, tx1d, tx2d, rx1d, rx2d, cur, vps, sopp, usbss) \
74523+	(((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 | ((cbl) & 0x3) << 18		\
74524+	 | ((lat) & 0x7) << 13 | ((term) & 0x3) << 11 | (tx1d) << 10		\
74525+	 | (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 | ((cur) & 0x3) << 5		\
74526+	 | (vps) << 4 | (sopp) << 3 | ((usbss) & 0x7))
74527+#define VDO_PCABLE(hw, fw, ver, conn, lat, term, vbm, cur, spd)			\
74528+	(((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21		\
74529+	 | ((conn) & 0x3) << 18 | ((lat) & 0xf) << 13 | ((term) & 0x3) << 11	\
74530+	 | ((vbm) & 0x3) << 9 | ((cur) & 0x3) << 5 | ((spd) & 0x7))
74531+#define VDO_ACABLE1(hw, fw, ver, conn, lat, term, vbm, sbu, sbut, cur, vbt, sopp, spd) \
74532+	(((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21		\
74533+	 | ((conn) & 0x3) << 18	| ((lat) & 0xf) << 13 | ((term) & 0x3) << 11	\
74534+	 | ((vbm) & 0x3) << 9 | (sbu) << 8 | (sbut) << 7 | ((cur) & 0x3) << 5	\
74535+	 | (vbt) << 4 | (sopp) << 3 | ((spd) & 0x7))
74536+
74537+#define VDO_TYPEC_CABLE_TYPE(vdo)	(((vdo) >> 18) & 0x3)
74538+
74539+/*
74540+ * Active Cable VDO 2
74541+ * ---------
74542+ * <31:24> :: Maximum operating temperature
74543+ * <23:16> :: Shutdown temperature
74544+ * <15>    :: Reserved, Shall be set to zero
74545+ * <14:12> :: U3/CLd power
74546+ * <11>    :: U3 to U0 transition mode (0b == direct, 1b == through U3S)
74547+ * <10>    :: Physical connection (0b == copper, 1b == optical)
74548+ * <9>     :: Active element (0b == redriver, 1b == retimer)
74549+ * <8>     :: USB4 supported (0b == yes, 1b == no)
74550+ * <7:6>   :: USB2 hub hops consumed
74551+ * <5>     :: USB2 supported (0b == yes, 1b == no)
74552+ * <4>     :: USB3.2 supported (0b == yes, 1b == no)
74553+ * <3>     :: USB lanes supported (0b == one lane, 1b == two lanes)
74554+ * <2>     :: Optically isolated active cable (0b == no, 1b == yes)
74555+ * <1>     :: Reserved, Shall be set to zero
74556+ * <0>     :: USB gen (0b == gen1, 1b == gen2+)
74557+ */
74558+
74559+/* U3/CLd Power*/
74560+#define ACAB2_U3_CLD_10MW_PLUS	0
74561+#define ACAB2_U3_CLD_10MW	1
74562+#define ACAB2_U3_CLD_5MW	2
74563+#define ACAB2_U3_CLD_1MW	3
74564+#define ACAB2_U3_CLD_500UW	4
74565+#define ACAB2_U3_CLD_200UW	5
74566+#define ACAB2_U3_CLD_50UW	6
74567+
74568+/* Other Active Cable VDO 2 Fields */
74569+#define ACAB2_U3U0_DIRECT	0
74570+#define ACAB2_U3U0_U3S		1
74571+#define ACAB2_PHY_COPPER	0
74572+#define ACAB2_PHY_OPTICAL	1
74573+#define ACAB2_REDRIVER		0
74574+#define ACAB2_RETIMER		1
74575+#define ACAB2_USB4_SUPP		0
74576+#define ACAB2_USB4_NOT_SUPP	1
74577+#define ACAB2_USB2_SUPP		0
74578+#define ACAB2_USB2_NOT_SUPP	1
74579+#define ACAB2_USB32_SUPP	0
74580+#define ACAB2_USB32_NOT_SUPP	1
74581+#define ACAB2_LANES_ONE		0
74582+#define ACAB2_LANES_TWO		1
74583+#define ACAB2_OPT_ISO_NO	0
74584+#define ACAB2_OPT_ISO_YES	1
74585+#define ACAB2_GEN_1		0
74586+#define ACAB2_GEN_2_PLUS	1
74587+
74588+#define VDO_ACABLE2(mtemp, stemp, u3p, trans, phy, ele, u4, hops, u2, u32, lane, iso, gen)	\
74589+	(((mtemp) & 0xff) << 24 | ((stemp) & 0xff) << 16 | ((u3p) & 0x7) << 12	\
74590+	 | (trans) << 11 | (phy) << 10 | (ele) << 9 | (u4) << 8			\
74591+	 | ((hops) & 0x3) << 6 | (u2) << 5 | (u32) << 4 | (lane) << 3		\
74592+	 | (iso) << 2 | (gen))
74593 
74594 /*
74595- * AMA VDO
74596+ * AMA VDO (PD Rev2.0)
74597  * ---------
74598  * <31:28> :: Cable HW version
74599  * <27:24> :: Cable FW version
74600@@ -233,18 +455,40 @@
74601 #define PD_VDO_AMA_VCONN_REQ(vdo)	(((vdo) >> 4) & 1)
74602 #define PD_VDO_AMA_VBUS_REQ(vdo)	(((vdo) >> 3) & 1)
74603 
74604-#define AMA_VCONN_PWR_1W	0
74605-#define AMA_VCONN_PWR_1W5	1
74606-#define AMA_VCONN_PWR_2W	2
74607-#define AMA_VCONN_PWR_3W	3
74608-#define AMA_VCONN_PWR_4W	4
74609-#define AMA_VCONN_PWR_5W	5
74610-#define AMA_VCONN_PWR_6W	6
74611 #define AMA_USBSS_U2_ONLY	0
74612 #define AMA_USBSS_U31_GEN1	1
74613 #define AMA_USBSS_U31_GEN2	2
74614 #define AMA_USBSS_BBONLY	3
74615 
74616+/*
74617+ * VPD VDO
74618+ * ---------
74619+ * <31:28> :: HW version
74620+ * <27:24> :: FW version
74621+ * <23:21> :: VDO version
74622+ * <20:17> :: Reserved, Shall be set to zero
74623+ * <16:15> :: Maximum Vbus voltage (00b == 20V, 01b == 30V, 10b == 40V, 11b == 50V)
74624+ * <14>    :: Charge through current support (0b == 3A, 1b == 5A)
74625+ * <13>    :: Reserved, Shall be set to zero
74626+ * <12:7>  :: Vbus impedance
74627+ * <6:1>   :: Ground impedance
74628+ * <0>     :: Charge through support (0b == no, 1b == yes)
74629+ */
74630+#define VPD_VDO_VER1_0		0
74631+#define VPD_MAX_VBUS_20V	0
74632+#define VPD_MAX_VBUS_30V	1
74633+#define VPD_MAX_VBUS_40V	2
74634+#define VPD_MAX_VBUS_50V	3
74635+#define VPDCT_CURR_3A		0
74636+#define VPDCT_CURR_5A		1
74637+#define VPDCT_NOT_SUPP		0
74638+#define VPDCT_SUPP		1
74639+
74640+#define VDO_VPD(hw, fw, ver, vbm, curr, vbi, gi, ct)			\
74641+	(((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21	\
74642+	 | ((vbm) & 0x3) << 15 | (curr) << 14 | ((vbi) & 0x3f) << 7	\
74643+	 | ((gi) & 0x3f) << 1 | (ct))
74644+
74645 /*
74646  * SVDM Discover SVIDs request -> response
74647  *
74648diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
74649index e4de6bc1f..888d2fdb6 100644
74650--- a/include/linux/usb/phy.h
74651+++ b/include/linux/usb/phy.h
74652@@ -13,6 +13,7 @@
74653 #include <linux/extcon.h>
74654 #include <linux/notifier.h>
74655 #include <linux/usb.h>
74656+#include <linux/android_kabi.h>
74657 #include <uapi/linux/usb/charger.h>
74658 
74659 enum usb_phy_interface {
74660@@ -155,6 +156,8 @@ struct usb_phy {
74661 	 * manually detect the charger type.
74662 	 */
74663 	enum usb_charger_type (*charger_detect)(struct usb_phy *x);
74664+
74665+	ANDROID_KABI_RESERVE(1);
74666 };
74667 
74668 /* for board-specific init logic */
74669diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
74670index 5e4c497f5..6567f7445 100644
74671--- a/include/linux/usb/quirks.h
74672+++ b/include/linux/usb/quirks.h
74673@@ -72,4 +72,7 @@
74674 /* device has endpoints that should be ignored */
74675 #define USB_QUIRK_ENDPOINT_IGNORE		BIT(15)
74676 
74677+/* device can't support auto suspend function */
74678+#define USB_QUIRK_AUTO_SUSPEND			BIT(16)
74679+
74680 #endif /* __LINUX_USB_QUIRKS_H */
74681diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
74682index 09762d26f..cb69546f8 100644
74683--- a/include/linux/usb/tcpm.h
74684+++ b/include/linux/usb/tcpm.h
74685@@ -19,6 +19,10 @@ enum typec_cc_status {
74686 	TYPEC_CC_RP_3_0,
74687 };
74688 
74689+/* Collision Avoidance */
74690+#define SINK_TX_NG	TYPEC_CC_RP_1_5
74691+#define SINK_TX_OK	TYPEC_CC_RP_3_0
74692+
74693 enum typec_cc_polarity {
74694 	TYPEC_POLARITY_CC1,
74695 	TYPEC_POLARITY_CC2,
74696@@ -62,6 +66,8 @@ enum tcpm_transmit_type {
74697  *		For example, some tcpcs may include BC1.2 charger detection
74698  *		and use that in this case.
74699  * @set_cc:	Called to set value of CC pins
74700+ * @apply_rc:	Optional; Needed to move TCPCI based chipset to APPLY_RC state
74701+ *		as stated by the TCPCI specification.
74702  * @get_cc:	Called to read current CC pin values
74703  * @set_polarity:
74704  *		Called to set polarity
74705@@ -83,6 +89,39 @@ enum tcpm_transmit_type {
74706  *		Optional; Called to enable/disable PD 3.0 fast role swap.
74707  *		Enabling frs is accessory dependent as not all PD3.0
74708  *		accessories support fast role swap.
74709+ * @frs_sourcing_vbus:
74710+ *		Optional; Called to notify that vbus is now being sourced.
74711+ *		Low level drivers can perform chip specific operations, if any.
74712+ * @enable_auto_vbus_discharge:
74713+ *		Optional; TCPCI spec based TCPC implementations can optionally
74714+ *		support hardware to autonomously dischrge vbus upon disconnecting
74715+ *		as sink or source. TCPM signals TCPC to enable the mechanism upon
74716+ *		entering connected state and signals disabling upon disconnect.
74717+ * @set_auto_vbus_discharge_threshold:
74718+ *		Mandatory when enable_auto_vbus_discharge is implemented. TCPM
74719+ *		calls this function to allow lower levels drivers to program the
74720+ *		vbus threshold voltage below which the vbus discharge circuit
74721+ *		will be turned on. requested_vbus_voltage is set to 0 when vbus
74722+ *		is going to disappear knowingly i.e. during PR_SWAP and
74723+ *		HARD_RESET etc.
74724+ * @is_vbus_vsafe0v:
74725+ *		Optional; TCPCI spec based TCPC implementations are expected to
74726+ *		detect VSAFE0V voltage level at vbus. When detection of VSAFE0V
74727+ *		is supported by TCPC, set this callback for TCPM to query
74728+ *		whether vbus is at VSAFE0V when needed.
74729+ *		Returns true when vbus is at VSAFE0V, false otherwise.
74730+ * @set_partner_usb_comm_capable:
74731+ *              Optional; The USB Communications Capable bit indicates if port
74732+ *              partner is capable of communication over the USB data lines
74733+ *              (e.g. D+/- or SS Tx/Rx). Called to notify the status of the bit.
74734+ * @check_contaminant:
74735+ *		Optional; The callback is called when CC pins report open status
74736+ *		at the end of the toggling period. Chip level drivers are
74737+ *		expected to check for contaminant and re-enable toggling if
74738+ *		needed. When 0 is not returned, check_contaminant is expected to
74739+ *		restart toggling after checking the connector for contaminant.
74740+ *		This forces the TCPM state machine to tranistion to TOGGLING state
74741+ *		without calling start_toggling callback.
74742  */
74743 struct tcpc_dev {
74744 	struct fwnode_handle *fwnode;
74745@@ -91,6 +130,8 @@ struct tcpc_dev {
74746 	int (*get_vbus)(struct tcpc_dev *dev);
74747 	int (*get_current_limit)(struct tcpc_dev *dev);
74748 	int (*set_cc)(struct tcpc_dev *dev, enum typec_cc_status cc);
74749+	int (*apply_rc)(struct tcpc_dev *dev, enum typec_cc_status cc,
74750+			enum typec_cc_polarity polarity);
74751 	int (*get_cc)(struct tcpc_dev *dev, enum typec_cc_status *cc1,
74752 		      enum typec_cc_status *cc2);
74753 	int (*set_polarity)(struct tcpc_dev *dev,
74754@@ -106,9 +147,16 @@ struct tcpc_dev {
74755 			      enum typec_cc_status cc);
74756 	int (*try_role)(struct tcpc_dev *dev, int role);
74757 	int (*pd_transmit)(struct tcpc_dev *dev, enum tcpm_transmit_type type,
74758-			   const struct pd_message *msg);
74759+			   const struct pd_message *msg, unsigned int negotiated_rev);
74760 	int (*set_bist_data)(struct tcpc_dev *dev, bool on);
74761 	int (*enable_frs)(struct tcpc_dev *dev, bool enable);
74762+	void (*frs_sourcing_vbus)(struct tcpc_dev *dev);
74763+	int (*enable_auto_vbus_discharge)(struct tcpc_dev *dev, bool enable);
74764+	int (*set_auto_vbus_discharge_threshold)(struct tcpc_dev *dev, enum typec_pwr_opmode mode,
74765+						 bool pps_active, u32 requested_vbus_voltage);
74766+	int (*check_contaminant)(struct tcpc_dev *dev);
74767+	bool (*is_vbus_vsafe0v)(struct tcpc_dev *dev);
74768+	void (*set_partner_usb_comm_capable)(struct tcpc_dev *dev, bool enable);
74769 };
74770 
74771 struct tcpm_port;
74772@@ -116,6 +164,10 @@ struct tcpm_port;
74773 struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc);
74774 void tcpm_unregister_port(struct tcpm_port *port);
74775 
74776+int tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo,
74777+				  unsigned int nr_pdo,
74778+				  unsigned int operating_snk_mw);
74779+
74780 void tcpm_vbus_change(struct tcpm_port *port);
74781 void tcpm_cc_change(struct tcpm_port *port);
74782 void tcpm_sink_frs(struct tcpm_port *port);
74783@@ -126,5 +178,7 @@ void tcpm_pd_transmit_complete(struct tcpm_port *port,
74784 			       enum tcpm_transmit_status status);
74785 void tcpm_pd_hard_reset(struct tcpm_port *port);
74786 void tcpm_tcpc_reset(struct tcpm_port *port);
74787+bool tcpm_is_debouncing(struct tcpm_port *tcpm);
74788+bool tcpm_is_toggling(struct tcpm_port *port);
74789 
74790 #endif /* __LINUX_USB_TCPM_H */
74791diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h
74792index 6be558045..34cc102b3 100644
74793--- a/include/linux/usb/typec.h
74794+++ b/include/linux/usb/typec.h
74795@@ -4,6 +4,7 @@
74796 #define __LINUX_USB_TYPEC_H
74797 
74798 #include <linux/types.h>
74799+#include <linux/android_kabi.h>
74800 
74801 /* USB Type-C Specification releases */
74802 #define USB_TYPEC_REV_1_0	0x100 /* 1.0 */
74803@@ -17,6 +18,7 @@ struct typec_partner;
74804 struct typec_cable;
74805 struct typec_plug;
74806 struct typec_port;
74807+struct typec_altmode_ops;
74808 
74809 struct fwnode_handle;
74810 struct device;
74811@@ -126,15 +128,31 @@ struct typec_altmode_desc {
74812 	enum typec_port_data	roles;
74813 };
74814 
74815+void typec_partner_set_pd_revision(struct typec_partner *partner, u16 pd_revision);
74816+int typec_partner_set_num_altmodes(struct typec_partner *partner, int num_altmodes);
74817 struct typec_altmode
74818 *typec_partner_register_altmode(struct typec_partner *partner,
74819 				const struct typec_altmode_desc *desc);
74820+int typec_plug_set_num_altmodes(struct typec_plug *plug, int num_altmodes);
74821 struct typec_altmode
74822 *typec_plug_register_altmode(struct typec_plug *plug,
74823 			     const struct typec_altmode_desc *desc);
74824 struct typec_altmode
74825 *typec_port_register_altmode(struct typec_port *port,
74826 			     const struct typec_altmode_desc *desc);
74827+
74828+#ifdef CONFIG_NO_GKI
74829+void typec_port_register_altmodes(struct typec_port *port,
74830+	const struct typec_altmode_ops *ops, void *drvdata,
74831+	struct typec_altmode **altmodes, size_t n);
74832+#else
74833+static inline void typec_port_register_altmodes(struct typec_port *port,
74834+	const struct typec_altmode_ops *ops, void *drvdata,
74835+	struct typec_altmode **altmodes, size_t n)
74836+{
74837+}
74838+#endif
74839+
74840 void typec_unregister_altmode(struct typec_altmode *altmode);
74841 
74842 struct typec_port *typec_altmode2port(struct typec_altmode *alt);
74843@@ -162,6 +180,7 @@ struct typec_plug_desc {
74844  * @type: The plug type from USB PD Cable VDO
74845  * @active: Is the cable active or passive
74846  * @identity: Result of Discover Identity command
74847+ * @pd_revision: USB Power Delivery Specification revision if supported
74848  *
74849  * Represents USB Type-C Cable attached to USB Type-C port.
74850  */
74851@@ -169,6 +188,8 @@ struct typec_cable_desc {
74852 	enum typec_plug_type	type;
74853 	unsigned int		active:1;
74854 	struct usb_pd_identity	*identity;
74855+	u16			pd_revision; /* 0300H = "3.0" */
74856+
74857 };
74858 
74859 /*
74860@@ -176,15 +197,22 @@ struct typec_cable_desc {
74861  * @usb_pd: USB Power Delivery support
74862  * @accessory: Audio, Debug or none.
74863  * @identity: Discover Identity command data
74864+ * @pd_revision: USB Power Delivery Specification Revision if supported
74865  *
74866  * Details about a partner that is attached to USB Type-C port. If @identity
74867  * member exists when partner is registered, a directory named "identity" is
74868  * created to sysfs for the partner device.
74869+ *
74870+ * @pd_revision is based on the setting of the "Specification Revision" field
74871+ * in the message header on the initial "Source Capabilities" message received
74872+ * from the partner, or a "Request" message received from the partner, depending
74873+ * on whether our port is a Sink or a Source.
74874  */
74875 struct typec_partner_desc {
74876 	unsigned int		usb_pd:1;
74877 	enum typec_accessory	accessory;
74878 	struct usb_pd_identity	*identity;
74879+	u16			pd_revision; /* 0300H = "3.0" */
74880 };
74881 
74882 /**
74883@@ -202,6 +230,13 @@ struct typec_operations {
74884 	int (*vconn_set)(struct typec_port *port, enum typec_role role);
74885 	int (*port_type_set)(struct typec_port *port,
74886 			     enum typec_port_type type);
74887+	ANDROID_KABI_RESERVE(1);
74888+};
74889+
74890+enum usb_pd_svdm_ver {
74891+	SVDM_VER_1_0 = 0,
74892+	SVDM_VER_2_0 = 1,
74893+	SVDM_VER_MAX = SVDM_VER_2_0,
74894 };
74895 
74896 /*
74897@@ -210,6 +245,7 @@ struct typec_operations {
74898  * @data: Supported data role of the port
74899  * @revision: USB Type-C Specification release. Binary coded decimal
74900  * @pd_revision: USB Power Delivery Specification revision if supported
74901+ * @svdm_version: USB PD Structured VDM version if supported
74902  * @prefer_role: Initial role preference (DRP ports).
74903  * @accessory: Supported Accessory Modes
74904  * @fwnode: Optional fwnode of the port
74905@@ -223,6 +259,7 @@ struct typec_capability {
74906 	enum typec_port_data	data;
74907 	u16			revision; /* 0120H = "1.2" */
74908 	u16			pd_revision; /* 0300H = "3.0" */
74909+	enum usb_pd_svdm_ver	svdm_version;
74910 	int			prefer_role;
74911 	enum typec_accessory	accessory[TYPEC_MAX_ACCESSORY];
74912 	unsigned int		orientation_aware:1;
74913@@ -231,6 +268,7 @@ struct typec_capability {
74914 	void			*driver_data;
74915 
74916 	const struct typec_operations	*ops;
74917+	ANDROID_KABI_RESERVE(1);
74918 };
74919 
74920 /* Specific to try_role(). Indicates the user want's to clear the preference. */
74921@@ -273,4 +311,8 @@ int typec_find_orientation(const char *name);
74922 int typec_find_port_power_role(const char *name);
74923 int typec_find_power_role(const char *name);
74924 int typec_find_port_data_role(const char *name);
74925+
74926+void typec_partner_set_svdm_version(struct typec_partner *partner,
74927+				    enum usb_pd_svdm_ver svdm_version);
74928+int typec_get_negotiated_svdm_version(struct typec_port *port);
74929 #endif /* __LINUX_USB_TYPEC_H */
74930diff --git a/include/linux/usb/typec_altmode.h b/include/linux/usb/typec_altmode.h
74931index 5e0a7b764..60531284d 100644
74932--- a/include/linux/usb/typec_altmode.h
74933+++ b/include/linux/usb/typec_altmode.h
74934@@ -30,6 +30,7 @@ struct typec_altmode {
74935 
74936 	char				*desc;
74937 	const struct typec_altmode_ops	*ops;
74938+	ANDROID_KABI_RESERVE(1);
74939 };
74940 
74941 #define to_typec_altmode(d) container_of(d, struct typec_altmode, dev)
74942@@ -63,6 +64,7 @@ struct typec_altmode_ops {
74943 	int (*notify)(struct typec_altmode *altmode, unsigned long conf,
74944 		      void *data);
74945 	int (*activate)(struct typec_altmode *altmode, int activate);
74946+	ANDROID_KABI_RESERVE(1);
74947 };
74948 
74949 int typec_altmode_enter(struct typec_altmode *altmode, u32 *vdo);
74950@@ -132,6 +134,16 @@ typec_altmode_get_orientation(struct typec_altmode *altmode)
74951 	return typec_get_orientation(typec_altmode2port(altmode));
74952 }
74953 
74954+/**
74955+ * typec_altmode_get_svdm_version - Get negotiated SVDM version
74956+ * @altmode: Handle to the alternate mode
74957+ */
74958+static inline int
74959+typec_altmode_get_svdm_version(struct typec_altmode *altmode)
74960+{
74961+	return typec_get_negotiated_svdm_version(typec_altmode2port(altmode));
74962+}
74963+
74964 /**
74965  * struct typec_altmode_driver - USB Type-C alternate mode device driver
74966  * @id_table: Null terminated array of SVIDs
74967diff --git a/include/linux/usb/typec_tbt.h b/include/linux/usb/typec_tbt.h
74968index 47c2d501d..63dd44b72 100644
74969--- a/include/linux/usb/typec_tbt.h
74970+++ b/include/linux/usb/typec_tbt.h
74971@@ -39,12 +39,16 @@ struct typec_thunderbolt_data {
74972 #define   TBT_CABLE_USB3_GEN1		1
74973 #define   TBT_CABLE_USB3_PASSIVE	2
74974 #define   TBT_CABLE_10_AND_20GBPS	3
74975-#define TBT_CABLE_ROUNDED		BIT(19)
74976+#define TBT_CABLE_ROUNDED_SUPPORT(_vdo_) \
74977+					(((_vdo_) & GENMASK(20, 19)) >> 19)
74978+#define   TBT_GEN3_NON_ROUNDED                 0
74979+#define   TBT_GEN3_GEN4_ROUNDED_NON_ROUNDED    1
74980 #define TBT_CABLE_OPTICAL		BIT(21)
74981 #define TBT_CABLE_RETIMER		BIT(22)
74982 #define TBT_CABLE_LINK_TRAINING		BIT(23)
74983 
74984 #define TBT_SET_CABLE_SPEED(_s_)	(((_s_) & GENMASK(2, 0)) << 16)
74985+#define TBT_SET_CABLE_ROUNDED(_g_)	(((_g_) & GENMASK(1, 0)) << 19)
74986 
74987 /* TBT3 Device Enter Mode VDO bits */
74988 #define TBT_ENTER_MODE_CABLE_SPEED(s)	TBT_SET_CABLE_SPEED(s)
74989diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
74990index 8110c29fa..195512a3a 100644
74991--- a/include/linux/usb/usbnet.h
74992+++ b/include/linux/usb/usbnet.h
74993@@ -23,6 +23,8 @@
74994 #ifndef	__LINUX_USB_USBNET_H
74995 #define	__LINUX_USB_USBNET_H
74996 
74997+#include <linux/android_kabi.h>
74998+
74999 /* interface from usbnet core to each USB networking link we handle */
75000 struct usbnet {
75001 	/* housekeeping */
75002@@ -83,8 +85,11 @@ struct usbnet {
75003 #		define EVENT_LINK_CHANGE	11
75004 #		define EVENT_SET_RX_MODE	12
75005 #		define EVENT_NO_IP_ALIGN	13
75006-	u32			rx_speed;	/* in bps - NOT Mbps */
75007-	u32			tx_speed;	/* in bps - NOT Mbps */
75008+
75009+	ANDROID_KABI_RESERVE(1);
75010+	ANDROID_KABI_RESERVE(2);
75011+	ANDROID_KABI_RESERVE(3);
75012+	ANDROID_KABI_RESERVE(4);
75013 };
75014 
75015 static inline struct usb_driver *driver_of(struct usb_interface *intf)
75016@@ -174,6 +179,9 @@ struct driver_info {
75017 	int		out;		/* tx endpoint */
75018 
75019 	unsigned long	data;		/* Misc driver specific data */
75020+
75021+	ANDROID_KABI_RESERVE(1);
75022+	ANDROID_KABI_RESERVE(2);
75023 };
75024 
75025 /* Minidrivers are just drivers using the "usbnet" core as a powerful
75026diff --git a/include/media/v4l2-async.h b/include/media/v4l2-async.h
75027index 92cd9f038..03b43b0f7 100644
75028--- a/include/media/v4l2-async.h
75029+++ b/include/media/v4l2-async.h
75030@@ -266,6 +266,21 @@ int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
75031 int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd,
75032 					struct v4l2_async_notifier *notifier);
75033 
75034+/**
75035+ * v4l2_async_notifier_clr_unready_dev - remove unready subdevice
75036+ *
75037+ * @notifier: pointer to &struct v4l2_async_notifier
75038+ */
75039+#if IS_ENABLED(CONFIG_NO_GKI)
75040+int v4l2_async_notifier_clr_unready_dev(struct v4l2_async_notifier *notifier);
75041+#else
75042+static inline int
75043+v4l2_async_notifier_clr_unready_dev(struct v4l2_async_notifier *notifier)
75044+{
75045+	return 0;
75046+}
75047+#endif
75048+
75049 /**
75050  * v4l2_async_notifier_unregister - unregisters a subdevice
75051  *	asynchronous notifier
75052diff --git a/include/soc/rockchip/rockchip_sip.h b/include/soc/rockchip/rockchip_sip.h
75053index c46a9ae2a..4afba01c6 100644
75054--- a/include/soc/rockchip/rockchip_sip.h
75055+++ b/include/soc/rockchip/rockchip_sip.h
75056@@ -15,6 +15,12 @@
75057 #define ROCKCHIP_SIP_CONFIG_DRAM_GET_RATE	0x05
75058 #define ROCKCHIP_SIP_CONFIG_DRAM_CLR_IRQ	0x06
75059 #define ROCKCHIP_SIP_CONFIG_DRAM_SET_PARAM	0x07
75060-#define ROCKCHIP_SIP_CONFIG_DRAM_SET_ODT_PD	0x08
75061+#define ROCKCHIP_SIP_CONFIG_DRAM_GET_VERSION	0x08
75062+#define ROCKCHIP_SIP_CONFIG_DRAM_POST_SET_RATE	0x09
75063+#define ROCKCHIP_SIP_CONFIG_DRAM_SET_MSCH_RL	0x0a
75064+#define ROCKCHIP_SIP_CONFIG_DRAM_DEBUG		0x0b
75065+#define ROCKCHIP_SIP_CONFIG_MCU_START		0x0c
75066+#define ROCKCHIP_SIP_CONFIG_DRAM_GET_FREQ_INFO	0x0e
75067+#define ROCKCHIP_SIP_CONFIG_DRAM_ADDRMAP_GET	0x10
75068 
75069 #endif
75070diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h
75071index b55970859..4fc733c8c 100644
75072--- a/include/sound/hdmi-codec.h
75073+++ b/include/sound/hdmi-codec.h
75074@@ -34,6 +34,11 @@ struct hdmi_codec_daifmt {
75075 	unsigned int frame_clk_inv:1;
75076 	unsigned int bit_clk_master:1;
75077 	unsigned int frame_clk_master:1;
75078+	/* bit_fmt could be standard PCM format or
75079+	 * IEC958 encoded format. ALSA IEC958 plugin will pass
75080+	 * IEC958_SUBFRAME format to the underneath driver.
75081+	 */
75082+	snd_pcm_format_t bit_fmt;
75083 };
75084 
75085 /*
75086@@ -60,12 +65,22 @@ struct hdmi_codec_ops {
75087 
75088 	/*
75089 	 * Configures HDMI-encoder for audio stream.
75090-	 * Mandatory
75091+	 * Having either prepare or hw_params is mandatory.
75092 	 */
75093 	int (*hw_params)(struct device *dev, void *data,
75094 			 struct hdmi_codec_daifmt *fmt,
75095 			 struct hdmi_codec_params *hparms);
75096 
75097+	/*
75098+	 * Configures HDMI-encoder for audio stream. Can be called
75099+	 * multiple times for each setup.
75100+	 *
75101+	 * Having either prepare or hw_params is mandatory.
75102+	 */
75103+	int (*prepare)(struct device *dev, void *data,
75104+		       struct hdmi_codec_daifmt *fmt,
75105+		       struct hdmi_codec_params *hparms);
75106+
75107 	/*
75108 	 * Shuts down the audio stream.
75109 	 * Mandatory
75110diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
75111index 8d6cdb254..d99a53025 100644
75112--- a/include/sound/hwdep.h
75113+++ b/include/sound/hwdep.h
75114@@ -9,6 +9,7 @@
75115 
75116 #include <sound/asound.h>
75117 #include <linux/poll.h>
75118+#include <linux/android_kabi.h>
75119 
75120 struct snd_hwdep;
75121 
75122@@ -34,6 +35,8 @@ struct snd_hwdep_ops {
75123 			  struct snd_hwdep_dsp_status *status);
75124 	int (*dsp_load)(struct snd_hwdep *hw,
75125 			struct snd_hwdep_dsp_image *image);
75126+
75127+	ANDROID_KABI_RESERVE(1);
75128 };
75129 
75130 struct snd_hwdep {
75131@@ -59,6 +62,8 @@ struct snd_hwdep {
75132 	int used;			/* reference counter */
75133 	unsigned int dsp_loaded;	/* bit fields of loaded dsp indices */
75134 	unsigned int exclusive:1;	/* exclusive access mode */
75135+
75136+	ANDROID_KABI_RESERVE(1);
75137 };
75138 
75139 extern int snd_hwdep_new(struct snd_card *card, char *id, int device,
75140diff --git a/include/sound/pcm_iec958.h b/include/sound/pcm_iec958.h
75141index 0939aa45e..64e84441c 100644
75142--- a/include/sound/pcm_iec958.h
75143+++ b/include/sound/pcm_iec958.h
75144@@ -4,6 +4,14 @@
75145 
75146 #include <linux/types.h>
75147 
75148+int snd_pcm_create_iec958_consumer_default(u8 *cs, size_t len);
75149+
75150+int snd_pcm_fill_iec958_consumer(struct snd_pcm_runtime *runtime, u8 *cs,
75151+				 size_t len);
75152+
75153+int snd_pcm_fill_iec958_consumer_hw_params(struct snd_pcm_hw_params *params,
75154+					   u8 *cs, size_t len);
75155+
75156 int snd_pcm_create_iec958_consumer(struct snd_pcm_runtime *runtime, u8 *cs,
75157 	size_t len);
75158 
75159diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
75160index dad9d3b4a..1b26ff448 100644
75161--- a/include/uapi/drm/drm_fourcc.h
75162+++ b/include/uapi/drm/drm_fourcc.h
75163@@ -242,6 +242,8 @@ extern "C" {
75164  * index 1 = Cr:Cb plane, [39:0] Cr1:Cb1:Cr0:Cb0 little endian
75165  */
75166 #define DRM_FORMAT_NV15		fourcc_code('N', 'V', '1', '5') /* 2x2 subsampled Cr:Cb plane */
75167+#define DRM_FORMAT_NV20		fourcc_code('N', 'V', '2', '0') /* 2x1 subsampled Cr:Cb plane */
75168+#define DRM_FORMAT_NV30		fourcc_code('N', 'V', '3', '0') /* non-subsampled Cr:Cb plane */
75169 
75170 /*
75171  * 2 plane YCbCr MSB aligned
75172diff --git a/include/uapi/linux/dma-buf.h b/include/uapi/linux/dma-buf.h
75173index f76d11725..37aeab6d1 100644
75174--- a/include/uapi/linux/dma-buf.h
75175+++ b/include/uapi/linux/dma-buf.h
75176@@ -47,4 +47,12 @@ struct dma_buf_sync {
75177 #define DMA_BUF_SET_NAME_A	_IOW(DMA_BUF_BASE, 1, __u32)
75178 #define DMA_BUF_SET_NAME_B	_IOW(DMA_BUF_BASE, 1, __u64)
75179 
75180+struct dma_buf_sync_partial {
75181+	__u64 flags;
75182+	__u32 offset;
75183+	__u32 len;
75184+};
75185+
75186+#define DMA_BUF_IOCTL_SYNC_PARTIAL	_IOW(DMA_BUF_BASE, 2, struct dma_buf_sync_partial)
75187+
75188 #endif
75189diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h
75190index 84fa53ffb..096f891d4 100644
75191--- a/include/uapi/linux/media-bus-format.h
75192+++ b/include/uapi/linux/media-bus-format.h
75193@@ -34,7 +34,7 @@
75194 
75195 #define MEDIA_BUS_FMT_FIXED			0x0001
75196 
75197-/* RGB - next is	0x101d */
75198+/* RGB - next is	0x1024 */
75199 #define MEDIA_BUS_FMT_RGB444_1X12		0x1016
75200 #define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE	0x1001
75201 #define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE	0x1002
75202@@ -63,6 +63,10 @@
75203 #define MEDIA_BUS_FMT_RGB101010_1X30		0x1018
75204 #define MEDIA_BUS_FMT_RGB121212_1X36		0x1019
75205 #define MEDIA_BUS_FMT_RGB161616_1X48		0x101a
75206+#define MEDIA_BUS_FMT_RGB888_DUMMY_4X8		0x101f
75207+#define MEDIA_BUS_FMT_BGR888_DUMMY_4X8		0x1020
75208+#define MEDIA_BUS_FMT_RGB101010_1X7X5_SPWG	0x1022
75209+#define MEDIA_BUS_FMT_RGB101010_1X7X5_JEIDA	0x1023
75210 
75211 /* YUV (including grey) - next is	0x202e */
75212 #define MEDIA_BUS_FMT_Y8_1X8			0x2001
75213diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h
75214index f51bc8f36..7e3fe68af 100644
75215--- a/include/uapi/linux/serial_reg.h
75216+++ b/include/uapi/linux/serial_reg.h
75217@@ -30,6 +30,7 @@
75218  * Sleep mode for ST16650 and TI16750.  For the ST16650, EFR[4]=1
75219  */
75220 #define UART_IERX_SLEEP		0x10 /* Enable sleep mode */
75221+#define UART_IER_PTIME		0x80 /* Enable programmable transmit interrupt mode */
75222 
75223 #define UART_IIR	2	/* In:  Interrupt ID Register */
75224 #define UART_IIR_NO_INT		0x01 /* No interrupts pending */
75225diff --git a/include/uapi/linux/usb/g_uvc.h b/include/uapi/linux/usb/g_uvc.h
75226index 652f169a0..428926e35 100644
75227--- a/include/uapi/linux/usb/g_uvc.h
75228+++ b/include/uapi/linux/usb/g_uvc.h
75229@@ -19,7 +19,9 @@
75230 #define UVC_EVENT_STREAMOFF		(V4L2_EVENT_PRIVATE_START + 3)
75231 #define UVC_EVENT_SETUP			(V4L2_EVENT_PRIVATE_START + 4)
75232 #define UVC_EVENT_DATA			(V4L2_EVENT_PRIVATE_START + 5)
75233-#define UVC_EVENT_LAST			(V4L2_EVENT_PRIVATE_START + 5)
75234+#define UVC_EVENT_SUSPEND		(V4L2_EVENT_PRIVATE_START + 6)
75235+#define UVC_EVENT_RESUME		(V4L2_EVENT_PRIVATE_START + 7)
75236+#define UVC_EVENT_LAST			(V4L2_EVENT_PRIVATE_START + 7)
75237 
75238 struct uvc_request_data {
75239 	__s32 length;
75240diff --git a/include/uapi/linux/usb/video.h b/include/uapi/linux/usb/video.h
75241index bfdae12cd..b82436fd9 100644
75242--- a/include/uapi/linux/usb/video.h
75243+++ b/include/uapi/linux/usb/video.h
75244@@ -567,5 +567,63 @@ struct UVC_FRAME_MJPEG(n) {				\
75245 	__le32 dwFrameInterval[n];			\
75246 } __attribute__ ((packed))
75247 
75248+/* Frame Based Payload - 3.1.1. Frame Based Video Format Descriptor */
75249+struct uvc_format_framebased {
75250+	__u8  bLength;
75251+	__u8  bDescriptorType;
75252+	__u8  bDescriptorSubType;
75253+	__u8  bFormatIndex;
75254+	__u8  bNumFrameDescriptors;
75255+	__u8  guidFormat[16];
75256+	__u8  bBitsPerPixel;
75257+	__u8  bDefaultFrameIndex;
75258+	__u8  bAspectRatioX;
75259+	__u8  bAspectRatioY;
75260+	__u8  bmInterfaceFlags;
75261+	__u8  bCopyProtect;
75262+	__u8  bVariableSize;
75263+} __attribute__((__packed__));
75264+
75265+#define UVC_DT_FORMAT_FRAMEBASED_SIZE			28
75266+
75267+/* Frame Based Payload - 3.1.2. Frame Based Video Frame Descriptor */
75268+struct uvc_frame_framebased {
75269+	__u8  bLength;
75270+	__u8  bDescriptorType;
75271+	__u8  bDescriptorSubType;
75272+	__u8  bFrameIndex;
75273+	__u8  bmCapabilities;
75274+	__u16 wWidth;
75275+	__u16 wHeight;
75276+	__u32 dwMinBitRate;
75277+	__u32 dwMaxBitRate;
75278+	__u32 dwDefaultFrameInterval;
75279+	__u8  bFrameIntervalType;
75280+	__u32 dwBytesPerLine;
75281+	__u32 dwFrameInterval[];
75282+} __attribute__((__packed__));
75283+
75284+#define UVC_DT_FRAME_FRAMEBASED_SIZE(n)                        (26+4*(n))
75285+
75286+#define UVC_FRAME_FRAMEBASED(n) \
75287+	uvc_frame_framebased_##n
75288+
75289+#define DECLARE_UVC_FRAME_FRAMEBASED(n)		\
75290+struct UVC_FRAME_FRAMEBASED(n) {		\
75291+	__u8  bLength;				\
75292+	__u8  bDescriptorType;			\
75293+	__u8  bDescriptorSubType;		\
75294+	__u8  bFrameIndex;			\
75295+	__u8  bmCapabilities;			\
75296+	__u16 wWidth;				\
75297+	__u16 wHeight;				\
75298+	__u32 dwMinBitRate;			\
75299+	__u32 dwMaxBitRate;			\
75300+	__u32 dwDefaultFrameInterval;		\
75301+	__u8  bFrameIntervalType;		\
75302+	__u32 dwBytesPerLine;			\
75303+	__u32 dwFrameInterval[n];		\
75304+} __attribute__ ((packed))
75305+
75306 #endif /* __LINUX_USB_VIDEO_H */
75307 
75308diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
75309index a184c4939..b80ac7753 100644
75310--- a/include/uapi/linux/v4l2-controls.h
75311+++ b/include/uapi/linux/v4l2-controls.h
75312@@ -65,6 +65,7 @@
75313 #define V4L2_CTRL_CLASS_FM_RX		0x00a10000	/* FM Receiver controls */
75314 #define V4L2_CTRL_CLASS_RF_TUNER	0x00a20000	/* RF tuner controls */
75315 #define V4L2_CTRL_CLASS_DETECT		0x00a30000	/* Detection controls */
75316+#define V4L2_CTRL_CLASS_COLORIMETRY	0x00a50000	/* Colorimetry controls */
75317 
75318 /* User-class control IDs */
75319 
75320@@ -188,6 +189,12 @@ enum v4l2_colorfx {
75321  */
75322 #define V4L2_CID_USER_MAX217X_BASE		(V4L2_CID_USER_BASE + 0x1090)
75323 
75324+/*
75325+ * The base for the tc35874x driver controls.
75326+ * We reserve 16 controls for this driver.
75327+ */
75328+#define V4L2_CID_USER_TC35874X_BASE		(V4L2_CID_USER_BASE + 0x10a0)
75329+
75330 /* The base for the imx driver controls.
75331  * We reserve 16 controls for this driver. */
75332 #define V4L2_CID_USER_IMX_BASE			(V4L2_CID_USER_BASE + 0x10b0)
75333@@ -415,6 +422,12 @@ enum v4l2_mpeg_video_multi_slice_mode {
75334 #define V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE		(V4L2_CID_MPEG_BASE+227)
75335 #define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE		(V4L2_CID_MPEG_BASE+228)
75336 #define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME		(V4L2_CID_MPEG_BASE+229)
75337+#define V4L2_CID_MPEG_VIDEO_BASELAYER_PRIORITY_ID	(V4L2_CID_MPEG_BASE+230)
75338+#define V4L2_CID_MPEG_VIDEO_AU_DELIMITER		(V4L2_CID_MPEG_BASE+231)
75339+#define V4L2_CID_MPEG_VIDEO_LTR_COUNT			(V4L2_CID_MPEG_BASE+232)
75340+#define V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX		(V4L2_CID_MPEG_BASE+233)
75341+#define V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES		(V4L2_CID_MPEG_BASE+234)
75342+#define V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR		(V4L2_CID_MPEG_BASE+235)
75343 
75344 /* CIDs for the MPEG-2 Part 2 (H.262) codec */
75345 #define V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL			(V4L2_CID_MPEG_BASE+270)
75346@@ -578,6 +591,15 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type {
75347 #define V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP	(V4L2_CID_MPEG_BASE+386)
75348 #define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP	(V4L2_CID_MPEG_BASE+387)
75349 #define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP	(V4L2_CID_MPEG_BASE+388)
75350+#define V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP	(V4L2_CID_MPEG_BASE+389)
75351+#define V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP	(V4L2_CID_MPEG_BASE+390)
75352+#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L0_BR	(V4L2_CID_MPEG_BASE+391)
75353+#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L1_BR	(V4L2_CID_MPEG_BASE+392)
75354+#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L2_BR	(V4L2_CID_MPEG_BASE+393)
75355+#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L3_BR	(V4L2_CID_MPEG_BASE+394)
75356+#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L4_BR	(V4L2_CID_MPEG_BASE+395)
75357+#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L5_BR	(V4L2_CID_MPEG_BASE+396)
75358+#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L6_BR	(V4L2_CID_MPEG_BASE+397)
75359 #define V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP	(V4L2_CID_MPEG_BASE+400)
75360 #define V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP	(V4L2_CID_MPEG_BASE+401)
75361 #define V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP	(V4L2_CID_MPEG_BASE+402)
75362@@ -768,6 +790,15 @@ enum v4l2_mpeg_video_frame_skip_mode {
75363 	V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT	= 2,
75364 };
75365 
75366+#define V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP		(V4L2_CID_MPEG_BASE + 647)
75367+#define V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP		(V4L2_CID_MPEG_BASE + 648)
75368+#define V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP		(V4L2_CID_MPEG_BASE + 649)
75369+#define V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP		(V4L2_CID_MPEG_BASE + 650)
75370+#define V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP		(V4L2_CID_MPEG_BASE + 651)
75371+#define V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP		(V4L2_CID_MPEG_BASE + 652)
75372+#define V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY		(V4L2_CID_MPEG_BASE + 653)
75373+#define V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE	(V4L2_CID_MPEG_BASE + 654)
75374+
75375 /*  MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */
75376 #define V4L2_CID_MPEG_CX2341X_BASE				(V4L2_CTRL_CLASS_MPEG | 0x1000)
75377 #define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE		(V4L2_CID_MPEG_CX2341X_BASE+0)
75378@@ -1171,4 +1202,38 @@ enum v4l2_detect_md_mode {
75379 #define V4L2_CID_DETECT_MD_THRESHOLD_GRID	(V4L2_CID_DETECT_CLASS_BASE + 3)
75380 #define V4L2_CID_DETECT_MD_REGION_GRID		(V4L2_CID_DETECT_CLASS_BASE + 4)
75381 
75382+#define V4L2_CID_COLORIMETRY_CLASS_BASE	(V4L2_CTRL_CLASS_COLORIMETRY | 0x900)
75383+#define V4L2_CID_COLORIMETRY_CLASS	(V4L2_CTRL_CLASS_COLORIMETRY | 1)
75384+
75385+#define V4L2_CID_COLORIMETRY_HDR10_CLL_INFO	(V4L2_CID_COLORIMETRY_CLASS_BASE + 0)
75386+
75387+struct v4l2_ctrl_hdr10_cll_info {
75388+	__u16 max_content_light_level;
75389+	__u16 max_pic_average_light_level;
75390+};
75391+
75392+#define V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY	(V4L2_CID_COLORIMETRY_CLASS_BASE + 1)
75393+
75394+#define V4L2_HDR10_MASTERING_PRIMARIES_X_LOW	5
75395+#define V4L2_HDR10_MASTERING_PRIMARIES_X_HIGH	37000
75396+#define V4L2_HDR10_MASTERING_PRIMARIES_Y_LOW	5
75397+#define V4L2_HDR10_MASTERING_PRIMARIES_Y_HIGH	42000
75398+#define V4L2_HDR10_MASTERING_WHITE_POINT_X_LOW	5
75399+#define V4L2_HDR10_MASTERING_WHITE_POINT_X_HIGH	37000
75400+#define V4L2_HDR10_MASTERING_WHITE_POINT_Y_LOW	5
75401+#define V4L2_HDR10_MASTERING_WHITE_POINT_Y_HIGH	42000
75402+#define V4L2_HDR10_MASTERING_MAX_LUMA_LOW	50000
75403+#define V4L2_HDR10_MASTERING_MAX_LUMA_HIGH	100000000
75404+#define V4L2_HDR10_MASTERING_MIN_LUMA_LOW	1
75405+#define V4L2_HDR10_MASTERING_MIN_LUMA_HIGH	50000
75406+
75407+struct v4l2_ctrl_hdr10_mastering_display {
75408+	__u16 display_primaries_x[3];
75409+	__u16 display_primaries_y[3];
75410+	__u16 white_point_x;
75411+	__u16 white_point_y;
75412+	__u32 max_display_mastering_luminance;
75413+	__u32 min_display_mastering_luminance;
75414+};
75415+
75416 #endif
75417diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
75418index b28817c59..1e650c284 100644
75419--- a/include/uapi/linux/videodev2.h
75420+++ b/include/uapi/linux/videodev2.h
75421@@ -689,6 +689,7 @@ struct v4l2_pix_format {
75422 #define V4L2_PIX_FMT_JPEG     v4l2_fourcc('J', 'P', 'E', 'G') /* JFIF JPEG     */
75423 #define V4L2_PIX_FMT_DV       v4l2_fourcc('d', 'v', 's', 'd') /* 1394          */
75424 #define V4L2_PIX_FMT_MPEG     v4l2_fourcc('M', 'P', 'E', 'G') /* MPEG-1/2/4 Multiplexed */
75425+#define V4L2_PIX_FMT_H265     v4l2_fourcc('H', '2', '6', '5') /* H265 with start codes */
75426 #define V4L2_PIX_FMT_H264     v4l2_fourcc('H', '2', '6', '4') /* H264 with start codes */
75427 #define V4L2_PIX_FMT_H264_NO_SC v4l2_fourcc('A', 'V', 'C', '1') /* H264 without start codes */
75428 #define V4L2_PIX_FMT_H264_MVC v4l2_fourcc('M', '2', '6', '4') /* H264 MVC */
75429@@ -1778,6 +1779,14 @@ enum v4l2_ctrl_type {
75430 	V4L2_CTRL_TYPE_U16	     = 0x0101,
75431 	V4L2_CTRL_TYPE_U32	     = 0x0102,
75432 	V4L2_CTRL_TYPE_AREA          = 0x0106,
75433+	V4L2_CTRL_TYPE_HDR10_CLL_INFO		= 0x0110,
75434+	V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY	= 0x0111,
75435+	V4L2_CTRL_TYPE_H264_SPS             = 0x0200,
75436+	V4L2_CTRL_TYPE_H264_PPS		    = 0x0201,
75437+	V4L2_CTRL_TYPE_H264_SCALING_MATRIX  = 0x0202,
75438+	V4L2_CTRL_TYPE_H264_SLICE_PARAMS    = 0x0203,
75439+	V4L2_CTRL_TYPE_H264_DECODE_PARAMS   = 0x0204,
75440+	V4L2_CTRL_TYPE_H264_PRED_WEIGHTS    = 0x0205,
75441 };
75442 
75443 /*  Used in the VIDIOC_QUERYCTRL ioctl for querying controls */
75444diff --git a/init/Kconfig b/init/Kconfig
75445index 4b5a6fc0f..cf5edde30 100644
75446--- a/init/Kconfig
75447+++ b/init/Kconfig
75448@@ -1352,6 +1352,11 @@ config BLK_DEV_INITRD
75449 if BLK_DEV_INITRD
75450 
75451 source "usr/Kconfig"
75452+config INITRD_ASYNC
75453+	bool "Initrd async"
75454+	depends on NO_GKI
75455+	help
75456+	  Init ramdisk async, can reduce kernel init time.
75457 
75458 endif
75459 
75460@@ -1711,6 +1716,13 @@ config KALLSYMS
75461 	  symbolic stack backtraces. This increases the size of the kernel
75462 	  somewhat, as all symbols have to be loaded into the kernel image.
75463 
75464+config WGCM
75465+	bool "Enable Workgroup Control Monitor API"
75466+	default n
75467+	help
75468+	  Enable Workgroup Control Monitor API, userspace can get the number of
75469+	  blocked threads.
75470+
75471 config KALLSYMS_ALL
75472 	bool "Include all symbols in kallsyms"
75473 	depends on DEBUG_KERNEL && KALLSYMS
75474diff --git a/init/initramfs.c b/init/initramfs.c
75475index 55b74d7e5..f4c4e2404 100644
75476--- a/init/initramfs.c
75477+++ b/init/initramfs.c
75478@@ -12,6 +12,7 @@
75479 #include <linux/file.h>
75480 #include <linux/memblock.h>
75481 #include <linux/namei.h>
75482+#include <linux/initramfs.h>
75483 #include <linux/init_syscalls.h>
75484 
75485 static ssize_t __init xwrite(struct file *file, const char *p, size_t count,
75486@@ -465,6 +466,9 @@ static char * __init unpack_to_rootfs(char *buf, unsigned long len)
75487 	state = Start;
75488 	this_header = 0;
75489 	message = NULL;
75490+#if defined(CONFIG_ROCKCHIP_THUNDER_BOOT) && defined(CONFIG_ROCKCHIP_HW_DECOMPRESS) && defined(CONFIG_INITRD_ASYNC)
75491+	wait_initrd_hw_decom_done();
75492+#endif
75493 	while (!message && len) {
75494 		loff_t saved_offset = this_header;
75495 		if (*buf == '0' && !(this_header & 3)) {
75496@@ -639,4 +643,23 @@ static int __init populate_rootfs(void)
75497 	flush_delayed_fput();
75498 	return 0;
75499 }
75500+
75501+#if IS_BUILTIN(CONFIG_INITRD_ASYNC)
75502+#include <linux/kthread.h>
75503+#include <linux/async.h>
75504+
75505+static void __init unpack_rootfs_async(void *unused, async_cookie_t cookie)
75506+{
75507+	populate_rootfs();
75508+}
75509+
75510+static int __init populate_rootfs_async(void)
75511+{
75512+	async_schedule(unpack_rootfs_async, NULL);
75513+	return 0;
75514+}
75515+
75516+pure_initcall(populate_rootfs_async);
75517+#else
75518 rootfs_initcall(populate_rootfs);
75519+#endif
75520diff --git a/init/main.c b/init/main.c
75521index 30d6afeae..f54324ddb 100644
75522--- a/init/main.c
75523+++ b/init/main.c
75524@@ -113,6 +113,10 @@
75525 
75526 #include <kunit/test.h>
75527 
75528+#ifdef CONFIG_SCHED_RTG_AUTHORITY
75529+extern int init_rtg_authority_control(void);
75530+#endif
75531+
75532 static int kernel_init(void *);
75533 
75534 extern void init_IRQ(void);
75535@@ -926,6 +930,10 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
75536 		local_irq_disable();
75537 	radix_tree_init();
75538 
75539+#ifdef CONFIG_SCHED_RTG_AUTHORITY
75540+	BUG_ON(init_rtg_authority_control());
75541+#endif
75542+
75543 	/*
75544 	 * Set up housekeeping before setting up workqueues to allow the unbound
75545 	 * workqueue to take non-housekeeping into account.
75546@@ -1524,6 +1532,9 @@ static noinline void __init kernel_init_freeable(void)
75547 	smp_init();
75548 	sched_init_smp();
75549 
75550+#ifdef CONFIG_ROCKCHIP_THUNDER_BOOT
75551+	kthread_run(defer_free_memblock, NULL, "defer_mem");
75552+#endif
75553 	padata_init();
75554 	page_alloc_init_late();
75555 	/* Initialize page ext after all struct pages are initialized. */
75556@@ -1532,6 +1543,9 @@ static noinline void __init kernel_init_freeable(void)
75557 	do_basic_setup();
75558 
75559 	kunit_run_all_tests();
75560+#if IS_BUILTIN(CONFIG_INITRD_ASYNC)
75561+	async_synchronize_full();
75562+#endif
75563 
75564 	console_on_rootfs();
75565 
75566diff --git a/ipc/msg.c b/ipc/msg.c
75567index 8ded6b8f1..6e6c8e0c9 100644
75568--- a/ipc/msg.c
75569+++ b/ipc/msg.c
75570@@ -147,7 +147,7 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
75571 	key_t key = params->key;
75572 	int msgflg = params->flg;
75573 
75574-	msq = kvmalloc(sizeof(*msq), GFP_KERNEL_ACCOUNT);
75575+	msq = kvmalloc(sizeof(*msq), GFP_KERNEL);
75576 	if (unlikely(!msq))
75577 		return -ENOMEM;
75578 
75579diff --git a/ipc/namespace.c b/ipc/namespace.c
75580index c94c05846..24e7b4532 100644
75581--- a/ipc/namespace.c
75582+++ b/ipc/namespace.c
75583@@ -42,7 +42,7 @@ static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns,
75584 		goto fail;
75585 
75586 	err = -ENOMEM;
75587-	ns = kzalloc(sizeof(struct ipc_namespace), GFP_KERNEL_ACCOUNT);
75588+	ns = kzalloc(sizeof(struct ipc_namespace), GFP_KERNEL);
75589 	if (ns == NULL)
75590 		goto fail_dec;
75591 
75592diff --git a/ipc/sem.c b/ipc/sem.c
75593index 916f7a90b..d735cecd9 100644
75594--- a/ipc/sem.c
75595+++ b/ipc/sem.c
75596@@ -511,7 +511,7 @@ static struct sem_array *sem_alloc(size_t nsems)
75597 	if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0]))
75598 		return NULL;
75599 
75600-	sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL_ACCOUNT);
75601+	sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL);
75602 	if (unlikely(!sma))
75603 		return NULL;
75604 
75605@@ -1852,7 +1852,7 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp)
75606 
75607 	undo_list = current->sysvsem.undo_list;
75608 	if (!undo_list) {
75609-		undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL_ACCOUNT);
75610+		undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
75611 		if (undo_list == NULL)
75612 			return -ENOMEM;
75613 		spin_lock_init(&undo_list->lock);
75614@@ -1937,7 +1937,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
75615 	rcu_read_unlock();
75616 
75617 	/* step 2: allocate new undo structure */
75618-	new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL_ACCOUNT);
75619+	new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
75620 	if (!new) {
75621 		ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
75622 		return ERR_PTR(-ENOMEM);
75623diff --git a/ipc/shm.c b/ipc/shm.c
75624index b418731d6..471ac3e74 100644
75625--- a/ipc/shm.c
75626+++ b/ipc/shm.c
75627@@ -711,7 +711,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
75628 			ns->shm_tot + numpages > ns->shm_ctlall)
75629 		return -ENOSPC;
75630 
75631-	shp = kvmalloc(sizeof(*shp), GFP_KERNEL_ACCOUNT);
75632+	shp = kvmalloc(sizeof(*shp), GFP_KERNEL);
75633 	if (unlikely(!shp))
75634 		return -ENOMEM;
75635 
75636diff --git a/ipc/util.c b/ipc/util.c
75637index 7c3601dad..bbb5190af 100644
75638--- a/ipc/util.c
75639+++ b/ipc/util.c
75640@@ -754,13 +754,21 @@ struct pid_namespace *ipc_seq_pid_ns(struct seq_file *s)
75641 static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
75642 					      loff_t *new_pos)
75643 {
75644-	struct kern_ipc_perm *ipc = NULL;
75645-	int max_idx = ipc_get_maxidx(ids);
75646+	struct kern_ipc_perm *ipc;
75647+	int total, id;
75648+
75649+	total = 0;
75650+	for (id = 0; id < pos && total < ids->in_use; id++) {
75651+		ipc = idr_find(&ids->ipcs_idr, id);
75652+		if (ipc != NULL)
75653+			total++;
75654+	}
75655 
75656-	if (max_idx == -1 || pos > max_idx)
75657+	ipc = NULL;
75658+	if (total >= ids->in_use)
75659 		goto out;
75660 
75661-	for (; pos <= max_idx; pos++) {
75662+	for (; pos < ipc_mni; pos++) {
75663 		ipc = idr_find(&ids->ipcs_idr, pos);
75664 		if (ipc != NULL) {
75665 			rcu_read_lock();
75666diff --git a/kernel/cgroup/legacy_freezer.c b/kernel/cgroup/legacy_freezer.c
75667index 08236798d..081d026f1 100644
75668--- a/kernel/cgroup/legacy_freezer.c
75669+++ b/kernel/cgroup/legacy_freezer.c
75670@@ -479,3 +479,4 @@ struct cgroup_subsys freezer_cgrp_subsys = {
75671 	.fork		= freezer_fork,
75672 	.legacy_cftypes	= files,
75673 };
75674+EXPORT_SYMBOL_GPL(freezer_cgrp_subsys);
75675diff --git a/kernel/cpu.c b/kernel/cpu.c
75676index 2cda6a3ef..7b8933e78 100644
75677--- a/kernel/cpu.c
75678+++ b/kernel/cpu.c
75679@@ -275,11 +275,13 @@ void cpu_maps_update_begin(void)
75680 {
75681 	mutex_lock(&cpu_add_remove_lock);
75682 }
75683+EXPORT_SYMBOL_GPL(cpu_maps_update_begin);
75684 
75685 void cpu_maps_update_done(void)
75686 {
75687 	mutex_unlock(&cpu_add_remove_lock);
75688 }
75689+EXPORT_SYMBOL_GPL(cpu_maps_update_done);
75690 
75691 /*
75692  * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
75693@@ -1054,7 +1056,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
75694 	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
75695 	int prev_state, ret = 0;
75696 
75697-	if (num_online_cpus() == 1)
75698+	if (num_active_cpus() == 1 && cpu_active(cpu))
75699 		return -EBUSY;
75700 
75701 	if (!cpu_present(cpu))
75702diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
75703index e2999a070..79cb6d063 100644
75704--- a/kernel/irq/generic-chip.c
75705+++ b/kernel/irq/generic-chip.c
75706@@ -200,6 +200,7 @@ int irq_gc_set_wake(struct irq_data *d, unsigned int on)
75707 	irq_gc_unlock(gc);
75708 	return 0;
75709 }
75710+EXPORT_SYMBOL_GPL(irq_gc_set_wake);
75711 
75712 static u32 irq_readl_be(void __iomem *addr)
75713 {
75714diff --git a/kernel/power/Makefile b/kernel/power/Makefile
75715index 5899260a8..f5b51d8fe 100644
75716--- a/kernel/power/Makefile
75717+++ b/kernel/power/Makefile
75718@@ -17,4 +17,5 @@ obj-$(CONFIG_PM_WAKELOCKS)	+= wakelock.o
75719 
75720 obj-$(CONFIG_MAGIC_SYSRQ)	+= poweroff.o
75721 
75722+
75723 obj-$(CONFIG_ENERGY_MODEL)	+= energy_model.o
75724diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c
75725index 119b929dc..41430128d 100644
75726--- a/kernel/power/energy_model.c
75727+++ b/kernel/power/energy_model.c
75728@@ -52,6 +52,17 @@ static int em_debug_cpus_show(struct seq_file *s, void *unused)
75729 }
75730 DEFINE_SHOW_ATTRIBUTE(em_debug_cpus);
75731 
75732+static int em_debug_units_show(struct seq_file *s, void *unused)
75733+{
75734+	struct em_perf_domain *pd = s->private;
75735+	char *units = pd->milliwatts ? "milliWatts" : "bogoWatts";
75736+
75737+	seq_printf(s, "%s\n", units);
75738+
75739+	return 0;
75740+}
75741+DEFINE_SHOW_ATTRIBUTE(em_debug_units);
75742+
75743 static void em_debug_create_pd(struct device *dev)
75744 {
75745 	struct dentry *d;
75746@@ -64,6 +75,8 @@ static void em_debug_create_pd(struct device *dev)
75747 		debugfs_create_file("cpus", 0444, d, dev->em_pd->cpus,
75748 				    &em_debug_cpus_fops);
75749 
75750+	debugfs_create_file("units", 0444, d, dev->em_pd, &em_debug_units_fops);
75751+
75752 	/* Create a sub-directory for each performance state */
75753 	for (i = 0; i < dev->em_pd->nr_perf_states; i++)
75754 		em_debug_create_ps(&dev->em_pd->table[i], d);
75755@@ -245,17 +258,24 @@ EXPORT_SYMBOL_GPL(em_cpu_get);
75756  * @cpus	: Pointer to cpumask_t, which in case of a CPU device is
75757  *		obligatory. It can be taken from i.e. 'policy->cpus'. For other
75758  *		type of devices this should be set to NULL.
75759+ * @milliwatts	: Flag indicating that the power values are in milliWatts or
75760+ *		in some other scale. It must be set properly.
75761  *
75762  * Create Energy Model tables for a performance domain using the callbacks
75763  * defined in cb.
75764  *
75765+ * The @milliwatts is important to set with correct value. Some kernel
75766+ * sub-systems might rely on this flag and check if all devices in the EM are
75767+ * using the same scale.
75768+ *
75769  * If multiple clients register the same performance domain, all but the first
75770  * registration will be ignored.
75771  *
75772  * Return 0 on success
75773  */
75774 int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
75775-				struct em_data_callback *cb, cpumask_t *cpus)
75776+				struct em_data_callback *cb, cpumask_t *cpus,
75777+				bool milliwatts)
75778 {
75779 	unsigned long cap, prev_cap = 0;
75780 	int cpu, ret;
75781@@ -308,6 +328,8 @@ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
75782 	if (ret)
75783 		goto unlock;
75784 
75785+	dev->em_pd->milliwatts = milliwatts;
75786+
75787 	em_debug_create_pd(dev);
75788 	dev_info(dev, "EM: created perf domain\n");
75789 
75790diff --git a/kernel/power/power.h b/kernel/power/power.h
75791index 24f12d534..778bf431e 100644
75792--- a/kernel/power/power.h
75793+++ b/kernel/power/power.h
75794@@ -106,7 +106,7 @@ extern int create_basic_memory_bitmaps(void);
75795 extern void free_basic_memory_bitmaps(void);
75796 extern int hibernate_preallocate_memory(void);
75797 
75798-extern void clear_free_pages(void);
75799+extern void clear_or_poison_free_pages(void);
75800 
75801 /**
75802  *	Auxiliary structure used for reading the snapshot image data and
75803diff --git a/kernel/power/process.c b/kernel/power/process.c
75804index b9faa363c..013dda3f1 100644
75805--- a/kernel/power/process.c
75806+++ b/kernel/power/process.c
75807@@ -85,23 +85,27 @@ static int try_to_freeze_tasks(bool user_only)
75808 	elapsed = ktime_sub(end, start);
75809 	elapsed_msecs = ktime_to_ms(elapsed);
75810 
75811-	if (todo) {
75812+	if (wakeup) {
75813 		pr_cont("\n");
75814-		pr_err("Freezing of tasks %s after %d.%03d seconds "
75815-		       "(%d tasks refusing to freeze, wq_busy=%d):\n",
75816-		       wakeup ? "aborted" : "failed",
75817+		pr_err("Freezing of tasks aborted after %d.%03d seconds",
75818+		       elapsed_msecs / 1000, elapsed_msecs % 1000);
75819+	} else if (todo) {
75820+		pr_cont("\n");
75821+		pr_err("Freezing of tasks failed after %d.%03d seconds"
75822+		       " (%d tasks refusing to freeze, wq_busy=%d):\n",
75823 		       elapsed_msecs / 1000, elapsed_msecs % 1000,
75824 		       todo - wq_busy, wq_busy);
75825 
75826 		if (wq_busy)
75827 			show_workqueue_state();
75828 
75829-		if (!wakeup || pm_debug_messages_on) {
75830+		if (pm_debug_messages_on) {
75831 			read_lock(&tasklist_lock);
75832 			for_each_process_thread(g, p) {
75833 				if (p != current && !freezer_should_skip(p)
75834-				    && freezing(p) && !frozen(p))
75835+				    && freezing(p) && !frozen(p)) {
75836 					sched_show_task(p);
75837+				}
75838 			}
75839 			read_unlock(&tasklist_lock);
75840 		}
75841@@ -134,7 +138,7 @@ int freeze_processes(void)
75842 	if (!pm_freezing)
75843 		atomic_inc(&system_freezing_cnt);
75844 
75845-	pm_wakeup_clear(0);
75846+	pm_wakeup_clear(true);
75847 	pr_info("Freezing user space processes ... ");
75848 	pm_freezing = true;
75849 	error = try_to_freeze_tasks(true);
75850diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
75851index 4aa4d5d39..545958377 100644
75852--- a/kernel/power/suspend.c
75853+++ b/kernel/power/suspend.c
75854@@ -30,6 +30,7 @@
75855 #include <trace/events/power.h>
75856 #include <linux/compiler.h>
75857 #include <linux/moduleparam.h>
75858+#include <linux/wakeup_reason.h>
75859 
75860 #include "power.h"
75861 
75862@@ -138,6 +139,9 @@ static void s2idle_loop(void)
75863 			break;
75864 		}
75865 
75866+		pm_wakeup_clear(false);
75867+		clear_wakeup_reasons();
75868+
75869 		s2idle_enter();
75870 	}
75871 
75872@@ -357,6 +361,7 @@ static int suspend_prepare(suspend_state_t state)
75873 	if (!error)
75874 		return 0;
75875 
75876+	log_suspend_abort_reason("One or more tasks refusing to freeze");
75877 	suspend_stats.failed_freeze++;
75878 	dpm_save_failed_step(SUSPEND_FREEZE);
75879 	pm_notifier_call_chain(PM_POST_SUSPEND);
75880@@ -386,7 +391,7 @@ void __weak arch_suspend_enable_irqs(void)
75881  */
75882 static int suspend_enter(suspend_state_t state, bool *wakeup)
75883 {
75884-	int error;
75885+	int error, last_dev;
75886 
75887 	error = platform_suspend_prepare(state);
75888 	if (error)
75889@@ -394,7 +399,11 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
75890 
75891 	error = dpm_suspend_late(PMSG_SUSPEND);
75892 	if (error) {
75893+		last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
75894+		last_dev %= REC_FAILED_NUM;
75895 		pr_err("late suspend of devices failed\n");
75896+		log_suspend_abort_reason("late suspend of %s device failed",
75897+					 suspend_stats.failed_devs[last_dev]);
75898 		goto Platform_finish;
75899 	}
75900 	error = platform_suspend_prepare_late(state);
75901@@ -403,7 +412,11 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
75902 
75903 	error = dpm_suspend_noirq(PMSG_SUSPEND);
75904 	if (error) {
75905+		last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
75906+		last_dev %= REC_FAILED_NUM;
75907 		pr_err("noirq suspend of devices failed\n");
75908+		log_suspend_abort_reason("noirq suspend of %s device failed",
75909+					 suspend_stats.failed_devs[last_dev]);
75910 		goto Platform_early_resume;
75911 	}
75912 	error = platform_suspend_prepare_noirq(state);
75913@@ -419,8 +432,10 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
75914 	}
75915 
75916 	error = suspend_disable_secondary_cpus();
75917-	if (error || suspend_test(TEST_CPUS))
75918+	if (error || suspend_test(TEST_CPUS)) {
75919+		log_suspend_abort_reason("Disabling non-boot cpus failed");
75920 		goto Enable_cpus;
75921+	}
75922 
75923 	arch_suspend_disable_irqs();
75924 	BUG_ON(!irqs_disabled());
75925@@ -491,6 +506,8 @@ int suspend_devices_and_enter(suspend_state_t state)
75926 	error = dpm_suspend_start(PMSG_SUSPEND);
75927 	if (error) {
75928 		pr_err("Some devices failed to suspend, or early wake event detected\n");
75929+		log_suspend_abort_reason(
75930+				"Some devices failed to suspend, or early wake event detected");
75931 		goto Recover_platform;
75932 	}
75933 	suspend_test_finish("suspend devices");
75934diff --git a/kernel/reboot.c b/kernel/reboot.c
75935index af6f23d8b..240805041 100644
75936--- a/kernel/reboot.c
75937+++ b/kernel/reboot.c
75938@@ -32,7 +32,9 @@ EXPORT_SYMBOL(cad_pid);
75939 #define DEFAULT_REBOOT_MODE
75940 #endif
75941 enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE;
75942+EXPORT_SYMBOL_GPL(reboot_mode);
75943 enum reboot_mode panic_reboot_mode = REBOOT_UNDEFINED;
75944+EXPORT_SYMBOL_GPL(panic_reboot_mode);
75945 
75946 /*
75947  * This variable is used privately to keep track of whether or not
75948@@ -215,6 +217,27 @@ void do_kernel_restart(char *cmd)
75949 	atomic_notifier_call_chain(&restart_handler_list, reboot_mode, cmd);
75950 }
75951 
75952+#ifdef CONFIG_NO_GKI
75953+static ATOMIC_NOTIFIER_HEAD(pre_restart_handler_list);
75954+
75955+int register_pre_restart_handler(struct notifier_block *nb)
75956+{
75957+	return atomic_notifier_chain_register(&pre_restart_handler_list, nb);
75958+}
75959+EXPORT_SYMBOL(register_pre_restart_handler);
75960+
75961+int unregister_pre_restart_handler(struct notifier_block *nb)
75962+{
75963+	return atomic_notifier_chain_unregister(&pre_restart_handler_list, nb);
75964+}
75965+EXPORT_SYMBOL(unregister_pre_restart_handler);
75966+
75967+void do_kernel_pre_restart(char *cmd)
75968+{
75969+	atomic_notifier_call_chain(&pre_restart_handler_list, reboot_mode, cmd);
75970+}
75971+#endif
75972+
75973 void migrate_to_reboot_cpu(void)
75974 {
75975 	/* The boot cpu is always logical cpu 0 */
75976diff --git a/kernel/sched/core.c b/kernel/sched/core.c
75977index d4ebc1d4e..c6242ad9f 100644
75978--- a/kernel/sched/core.c
75979+++ b/kernel/sched/core.c
75980@@ -18,6 +18,7 @@
75981 #include <linux/scs.h>
75982 #include <linux/irq.h>
75983 #include <linux/delay.h>
75984+#include <linux/wgcm.h>
75985 
75986 #ifdef CONFIG_QOS_CTRL
75987 #include <linux/sched/qos_ctrl.h>
75988@@ -34,6 +35,7 @@
75989 #include "smp.h"
75990 #include "walt.h"
75991 #include "rtg/rtg.h"
75992+#include <rtg/rtg_qos.h>
75993 
75994 /*
75995  * Export tracepoints that act as a bare tracehook (ie: have no trace event
75996@@ -50,6 +52,14 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
75997 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
75998 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
75999 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
76000+EXPORT_TRACEPOINT_SYMBOL_GPL(sched_switch);
76001+EXPORT_TRACEPOINT_SYMBOL_GPL(sched_waking);
76002+#ifdef CONFIG_SCHEDSTATS
76003+EXPORT_TRACEPOINT_SYMBOL_GPL(sched_stat_sleep);
76004+EXPORT_TRACEPOINT_SYMBOL_GPL(sched_stat_wait);
76005+EXPORT_TRACEPOINT_SYMBOL_GPL(sched_stat_iowait);
76006+EXPORT_TRACEPOINT_SYMBOL_GPL(sched_stat_blocked);
76007+#endif
76008 
76009 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
76010 
76011@@ -663,7 +673,7 @@ int get_nohz_timer_target(void)
76012 	int i, cpu = smp_processor_id(), default_cpu = -1;
76013 	struct sched_domain *sd;
76014 
76015-	if (housekeeping_cpu(cpu, HK_FLAG_TIMER)) {
76016+	if (housekeeping_cpu(cpu, HK_FLAG_TIMER) && cpu_active(cpu)) {
76017 		if (!idle_cpu(cpu))
76018 			return cpu;
76019 		default_cpu = cpu;
76020@@ -683,8 +693,20 @@ int get_nohz_timer_target(void)
76021 		}
76022 	}
76023 
76024-	if (default_cpu == -1)
76025-		default_cpu = housekeeping_any_cpu(HK_FLAG_TIMER);
76026+	if (default_cpu == -1) {
76027+		for_each_cpu_and(i, cpu_active_mask,
76028+				 housekeeping_cpumask(HK_FLAG_TIMER)) {
76029+			if (cpu == i)
76030+				continue;
76031+			if (!idle_cpu(i)) {
76032+				cpu = i;
76033+				goto unlock;
76034+			}
76035+		}
76036+		default_cpu = cpumask_any(cpu_active_mask);
76037+		if (unlikely(default_cpu >= nr_cpu_ids))
76038+			goto unlock;
76039+	}
76040 	cpu = default_cpu;
76041 unlock:
76042 	rcu_read_unlock();
76043@@ -1654,11 +1676,15 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags)
76044 {
76045 	enqueue_task(rq, p, flags);
76046 
76047+	wgcm_activate_task(p);
76048+
76049 	p->on_rq = TASK_ON_RQ_QUEUED;
76050 }
76051 
76052 void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
76053 {
76054+	wgcm_deactivate_task(p, flags);
76055+
76056 	p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING;
76057 
76058 	dequeue_task(rq, p, flags);
76059@@ -1770,7 +1796,9 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
76060 	if (is_per_cpu_kthread(p))
76061 		return cpu_online(cpu);
76062 
76063-	return cpu_active(cpu);
76064+	if (!cpu_active(cpu))
76065+		return false;
76066+	return cpumask_test_cpu(cpu, task_cpu_possible_mask(p));
76067 }
76068 
76069 /*
76070@@ -2433,7 +2461,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
76071 			}
76072 			fallthrough;
76073 		case possible:
76074-			do_set_cpus_allowed(p, cpu_possible_mask);
76075+			do_set_cpus_allowed(p, task_cpu_possible_mask(p));
76076 			state = fail;
76077 			break;
76078 
76079@@ -2627,6 +2655,8 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
76080 {
76081 	int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
76082 
76083+	if (wake_flags & WF_SYNC)
76084+		en_flags |= ENQUEUE_WAKEUP_SYNC;
76085 	lockdep_assert_held(&rq->lock);
76086 
76087 	if (p->sched_contributes_to_load)
76088@@ -3023,6 +3053,19 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
76089 	if (!(p->state & state))
76090 		goto unlock;
76091 
76092+#ifdef CONFIG_FREEZER
76093+	/*
76094+	 * If we're going to wake up a thread which may be frozen, then
76095+	 * we can only do so if we have an active CPU which is capable of
76096+	 * running it. This may not be the case when resuming from suspend,
76097+	 * as the secondary CPUs may not yet be back online. See __thaw_task()
76098+	 * for the actual wakeup.
76099+	 */
76100+	if (unlikely(frozen_or_skipped(p)) &&
76101+	    !cpumask_intersects(cpu_active_mask, task_cpu_possible_mask(p)))
76102+		goto unlock;
76103+#endif
76104+
76105 	trace_sched_waking(p);
76106 
76107 	/* We're going to change ->state: */
76108@@ -3269,6 +3312,8 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
76109 #ifdef CONFIG_SCHED_RTG
76110 	p->rtg_depth = 0;
76111 #endif
76112+
76113+	wgcm_clear_child(p);
76114 }
76115 
76116 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
76117@@ -5719,16 +5764,19 @@ int sched_setscheduler(struct task_struct *p, int policy,
76118 {
76119 	return _sched_setscheduler(p, policy, param, true);
76120 }
76121+EXPORT_SYMBOL_GPL(sched_setscheduler);
76122 
76123 int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
76124 {
76125 	return __sched_setscheduler(p, attr, true, true);
76126 }
76127+EXPORT_SYMBOL_GPL(sched_setattr);
76128 
76129 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
76130 {
76131 	return __sched_setscheduler(p, attr, false, true);
76132 }
76133+EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
76134 
76135 /**
76136  * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
76137@@ -5748,6 +5796,7 @@ int sched_setscheduler_nocheck(struct task_struct *p, int policy,
76138 {
76139 	return _sched_setscheduler(p, policy, param, false);
76140 }
76141+EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
76142 
76143 /*
76144  * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
76145@@ -7055,6 +7104,9 @@ void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf,
76146 	 */
76147 	update_rq_clock(rq);
76148 
76149+#ifdef CONFIG_SCHED_DEBUG
76150+	orf.clock_update_flags |= RQCF_UPDATED;
76151+#endif
76152 	for (;;) {
76153 		/*
76154 		 * There's this thread running, bail when that's the only
76155@@ -9190,6 +9242,10 @@ void sched_exit(struct task_struct *p)
76156 	sched_set_group_id(p, 0);
76157 #endif
76158 
76159+#ifdef CONFIG_SCHED_RTG_QOS
76160+	sched_exit_qos_list(p);
76161+#endif
76162+
76163 	rq = task_rq_lock(p, &rf);
76164 
76165 	/* rq->curr == p */
76166diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
76167index 0a94f01a1..932f44711 100644
76168--- a/kernel/sched/fair.c
76169+++ b/kernel/sched/fair.c
76170@@ -86,6 +86,7 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_L
76171  * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
76172  */
76173 unsigned int sysctl_sched_min_granularity			= 750000ULL;
76174+EXPORT_SYMBOL_GPL(sysctl_sched_min_granularity);
76175 static unsigned int normalized_sysctl_sched_min_granularity	= 750000ULL;
76176 
76177 /*
76178@@ -3406,6 +3407,7 @@ void set_task_rq_fair(struct sched_entity *se,
76179 	se->avg.last_update_time = n_last_update_time;
76180 }
76181 
76182+
76183 /*
76184  * When on migration a sched_entity joins/leaves the PELT hierarchy, we need to
76185  * propagate its contribution. The key to this propagation is the invariant
76186@@ -3473,6 +3475,7 @@ void set_task_rq_fair(struct sched_entity *se,
76187  * XXX: only do this for the part of runnable > running ?
76188  *
76189  */
76190+
76191 static inline void
76192 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
76193 {
76194@@ -3701,19 +3704,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
76195 
76196 		r = removed_util;
76197 		sub_positive(&sa->util_avg, r);
76198-		sub_positive(&sa->util_sum, r * divider);
76199-		/*
76200-		 * Because of rounding, se->util_sum might ends up being +1 more than
76201-		 * cfs->util_sum. Although this is not a problem by itself, detaching
76202-		 * a lot of tasks with the rounding problem between 2 updates of
76203-		 * util_avg (~1ms) can make cfs->util_sum becoming null whereas
76204-		 * cfs_util_avg is not.
76205-		 * Check that util_sum is still above its lower bound for the new
76206-		 * util_avg. Given that period_contrib might have moved since the last
76207-		 * sync, we are only sure that util_sum must be above or equal to
76208-		 *    util_avg * minimum possible divider
76209-		 */
76210-		sa->util_sum = max_t(u32, sa->util_sum, sa->util_avg * PELT_MIN_DIVIDER);
76211+		sa->util_sum = sa->util_avg * divider;
76212 
76213 		r = removed_runnable;
76214 		sub_positive(&sa->runnable_avg, r);
76215@@ -5361,7 +5352,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
76216 /*
76217  * When a group wakes up we want to make sure that its quota is not already
76218  * expired/exceeded, otherwise it may be allowed to steal additional ticks of
76219- * runtime as update_curr() throttling can not trigger until it's on-rq.
76220+ * runtime as update_curr() throttling can not not trigger until it's on-rq.
76221  */
76222 static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
76223 {
76224@@ -10821,8 +10812,20 @@ void nohz_balance_enter_idle(int cpu)
76225 	SCHED_WARN_ON(cpu != smp_processor_id());
76226 
76227 	/* If this CPU is going down, then nothing needs to be done: */
76228-	if (!cpu_active(cpu))
76229+	if (!cpu_active(cpu)) {
76230+		/*
76231+		 * A CPU can be paused while it is idle with it's tick
76232+		 * stopped. nohz_balance_exit_idle() should be called
76233+		 * from the local CPU, so it can't be called during
76234+		 * pause. This results in paused CPU participating in
76235+		 * the nohz idle balance, which should be avoided.
76236+		 *
76237+		 * When the paused CPU exits idle and enters again,
76238+		 * exempt the paused CPU from nohz_balance_exit_idle.
76239+		 */
76240+		nohz_balance_exit_idle(rq);
76241 		return;
76242+	}
76243 
76244 	/* Spare idle load balancing on CPUs that don't want to be disturbed: */
76245 	if (!housekeeping_cpu(cpu, HK_FLAG_SCHED))
76246diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
76247index d2a655643..b5837e277 100644
76248--- a/kernel/sched/loadavg.c
76249+++ b/kernel/sched/loadavg.c
76250@@ -75,6 +75,7 @@ void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
76251 	loads[1] = (avenrun[1] + offset) << shift;
76252 	loads[2] = (avenrun[2] + offset) << shift;
76253 }
76254+EXPORT_SYMBOL_GPL(get_avenrun);
76255 
76256 long calc_load_fold_active(struct rq *this_rq, long adjust)
76257 {
76258diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
76259index 2c613e1cf..134aa5ee2 100644
76260--- a/kernel/sched/pelt.c
76261+++ b/kernel/sched/pelt.c
76262@@ -28,6 +28,57 @@
76263 #include "sched.h"
76264 #include "pelt.h"
76265 
76266+int pelt_load_avg_period = PELT32_LOAD_AVG_PERIOD;
76267+int pelt_load_avg_max = PELT32_LOAD_AVG_MAX;
76268+const u32 *pelt_runnable_avg_yN_inv = pelt32_runnable_avg_yN_inv;
76269+int get_pelt_halflife(void)
76270+{
76271+	return pelt_load_avg_period;
76272+}
76273+EXPORT_SYMBOL_GPL(get_pelt_halflife);
76274+static int __set_pelt_halflife(void *data)
76275+{
76276+	int rc = 0;
76277+	int num = *(int *)data;
76278+	switch (num) {
76279+	case PELT8_LOAD_AVG_PERIOD:
76280+		pelt_load_avg_period = PELT8_LOAD_AVG_PERIOD;
76281+		pelt_load_avg_max = PELT8_LOAD_AVG_MAX;
76282+		pelt_runnable_avg_yN_inv = pelt8_runnable_avg_yN_inv;
76283+		pr_info("PELT half life is set to %dms\n", num);
76284+		break;
76285+	case PELT32_LOAD_AVG_PERIOD:
76286+		pelt_load_avg_period = PELT32_LOAD_AVG_PERIOD;
76287+		pelt_load_avg_max = PELT32_LOAD_AVG_MAX;
76288+		pelt_runnable_avg_yN_inv = pelt32_runnable_avg_yN_inv;
76289+		pr_info("PELT half life is set to %dms\n", num);
76290+		break;
76291+	default:
76292+		rc = -EINVAL;
76293+		pr_err("Failed to set PELT half life to %dms, the current value is %dms\n",
76294+			num, pelt_load_avg_period);
76295+	}
76296+	return rc;
76297+}
76298+int set_pelt_halflife(int num)
76299+{
76300+	return stop_machine(__set_pelt_halflife, &num, NULL);
76301+}
76302+EXPORT_SYMBOL_GPL(set_pelt_halflife);
76303+static int __init set_pelt(char *str)
76304+{
76305+	int rc, num;
76306+	rc = kstrtoint(str, 0, &num);
76307+	if (rc) {
76308+		pr_err("%s: kstrtoint failed. rc=%d\n", __func__, rc);
76309+		return 0;
76310+	}
76311+	__set_pelt_halflife(&num);
76312+	return rc;
76313+}
76314+
76315+early_param("pelt", set_pelt);
76316+
76317 /*
76318  * Approximate:
76319  *   val * y^n,    where y^32 ~= 0.5 (~1 scheduling period)
76320@@ -54,7 +105,7 @@ static u64 decay_load(u64 val, u64 n)
76321 		local_n %= LOAD_AVG_PERIOD;
76322 	}
76323 
76324-	val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
76325+	val = mul_u64_u32_shr(val, pelt_runnable_avg_yN_inv[local_n], 32);
76326 	return val;
76327 }
76328 
76329diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
76330index 0f296598a..2944bdcc0 100644
76331--- a/kernel/sched/rt.c
76332+++ b/kernel/sched/rt.c
76333@@ -1393,6 +1393,27 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
76334 	enqueue_top_rt_rq(&rq->rt);
76335 }
76336 
76337+#ifdef CONFIG_SMP
76338+static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p,
76339+					bool sync)
76340+{
76341+	/*
76342+	 * If the waker is CFS, then an RT sync wakeup would preempt the waker
76343+	 * and force it to run for a likely small time after the RT wakee is
76344+	 * done. So, only honor RT sync wakeups from RT wakers.
76345+	 */
76346+	return sync && task_has_rt_policy(rq->curr) &&
76347+		p->prio <= rq->rt.highest_prio.next &&
76348+		rq->rt.rt_nr_running <= 2;
76349+}
76350+#else
76351+static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p,
76352+					bool sync)
76353+{
76354+	return 0;
76355+}
76356+#endif
76357+
76358 /*
76359  * Adding/removing a task to/from a priority array:
76360  */
76361@@ -1400,6 +1421,7 @@ static void
76362 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
76363 {
76364 	struct sched_rt_entity *rt_se = &p->rt;
76365+	bool sync = !!(flags & ENQUEUE_WAKEUP_SYNC);
76366 
76367 	if (flags & ENQUEUE_WAKEUP)
76368 		rt_se->timeout = 0;
76369@@ -1407,7 +1429,8 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
76370 	enqueue_rt_entity(rt_se, flags);
76371 	walt_inc_cumulative_runnable_avg(rq, p);
76372 
76373-	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
76374+	if (!task_current(rq, p) && p->nr_cpus_allowed > 1 &&
76375+	    !should_honor_rt_sync(rq, p, sync))
76376 		enqueue_pushable_task(rq, p);
76377 }
76378 
76379@@ -1464,7 +1487,12 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
76380 {
76381 	struct task_struct *curr;
76382 	struct rq *rq;
76383+	struct rq *this_cpu_rq;
76384 	bool test;
76385+	int target_cpu = -1;
76386+	bool may_not_preempt;
76387+	bool sync = !!(flags & WF_SYNC);
76388+	int this_cpu;
76389 
76390 	/* For anything but wake ups, just return the task_cpu */
76391 	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
76392@@ -1474,6 +1502,8 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
76393 
76394 	rcu_read_lock();
76395 	curr = READ_ONCE(rq->curr); /* unlocked access */
76396+	this_cpu = smp_processor_id();
76397+	this_cpu_rq = cpu_rq(this_cpu);
76398 
76399 	/*
76400 	 * If the current task on @p's runqueue is an RT task, then
76401@@ -1508,6 +1538,14 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
76402 	test |= sysctl_sched_enable_rt_cas;
76403 #endif
76404 
76405+	/*
76406+	 * Respect the sync flag as long as the task can run on this CPU.
76407+	 */
76408+	if (should_honor_rt_sync(this_cpu_rq, p, sync) &&
76409+	    cpumask_test_cpu(this_cpu, p->cpus_ptr)) {
76410+		cpu = this_cpu;
76411+		goto out_unlock;
76412+	}
76413 	if (test || !rt_task_fits_capacity(p, cpu)) {
76414 		int target = find_lowest_rq(p);
76415 
76416diff --git a/kernel/sched/sched-pelt.h b/kernel/sched/sched-pelt.h
76417index c529706be..92a6875bc 100644
76418--- a/kernel/sched/sched-pelt.h
76419+++ b/kernel/sched/sched-pelt.h
76420@@ -1,7 +1,7 @@
76421 /* SPDX-License-Identifier: GPL-2.0 */
76422 /* Generated by Documentation/scheduler/sched-pelt; do not modify. */
76423 
76424-static const u32 runnable_avg_yN_inv[] __maybe_unused = {
76425+static const u32 pelt32_runnable_avg_yN_inv[] __maybe_unused = {
76426 	0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
76427 	0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
76428 	0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
76429@@ -10,5 +10,20 @@ static const u32 runnable_avg_yN_inv[] __maybe_unused = {
76430 	0x85aac367, 0x82cd8698,
76431 };
76432 
76433-#define LOAD_AVG_PERIOD 32
76434-#define LOAD_AVG_MAX 47742
76435+#define PELT32_LOAD_AVG_PERIOD 32
76436+#define PELT32_LOAD_AVG_MAX 47742
76437+
76438+static const u32 pelt8_runnable_avg_yN_inv[] __maybe_unused = {
76439+	0xffffffff, 0xeac0c6e6, 0xd744fcc9, 0xc5672a10,
76440+	0xb504f333, 0xa5fed6a9, 0x9837f050, 0x8b95c1e3,
76441+};
76442+
76443+#define PELT8_LOAD_AVG_PERIOD 8
76444+#define PELT8_LOAD_AVG_MAX 12336
76445+
76446+extern const u32 *pelt_runnable_avg_yN_inv;
76447+extern int pelt_load_avg_period;
76448+extern int pelt_load_avg_max;
76449+
76450+#define LOAD_AVG_PERIOD pelt_load_avg_period
76451+#define LOAD_AVG_MAX pelt_load_avg_max
76452diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
76453index 3457a8ac7..5d526e366 100644
76454--- a/kernel/sched/sched.h
76455+++ b/kernel/sched/sched.h
76456@@ -1911,6 +1911,7 @@ extern const int		sched_latency_to_weight[40];
76457 #else
76458 #define ENQUEUE_MIGRATED	0x00
76459 #endif
76460+#define ENQUEUE_WAKEUP_SYNC	0x80
76461 
76462 #define RETRY_TASK		((void *)-1UL)
76463 
76464diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
76465index 9191e5daa..58d840c62 100644
76466--- a/kernel/sched/topology.c
76467+++ b/kernel/sched/topology.c
76468@@ -5,6 +5,9 @@
76469 #include "sched.h"
76470 
76471 DEFINE_MUTEX(sched_domains_mutex);
76472+#ifdef CONFIG_LOCKDEP
76473+EXPORT_SYMBOL_GPL(sched_domains_mutex);
76474+#endif
76475 
76476 /* Protected by sched_domains_mutex: */
76477 static cpumask_var_t sched_domains_tmpmask;
76478diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
76479index a55642aa3..6911bbca0 100644
76480--- a/kernel/sched/wait.c
76481+++ b/kernel/sched/wait.c
76482@@ -396,7 +396,8 @@ void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_en
76483 }
76484 EXPORT_SYMBOL(finish_wait);
76485 
76486-int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
76487+__sched int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned int mode,
76488+				     int sync, void *key)
76489 {
76490 	int ret = default_wake_function(wq_entry, mode, sync, key);
76491 
76492@@ -432,7 +433,7 @@ static inline bool is_kthread_should_stop(void)
76493  * }						smp_mb(); // C
76494  * remove_wait_queue(&wq_head, &wait);		wq_entry->flags |= WQ_FLAG_WOKEN;
76495  */
76496-long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
76497+__sched long wait_woken(struct wait_queue_entry *wq_entry, unsigned int mode, long timeout)
76498 {
76499 	/*
76500 	 * The below executes an smp_mb(), which matches with the full barrier
76501@@ -457,7 +458,8 @@ long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
76502 }
76503 EXPORT_SYMBOL(wait_woken);
76504 
76505-int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
76506+__sched int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned int mode,
76507+				int sync, void *key)
76508 {
76509 	/* Pairs with the smp_store_mb() in wait_woken(). */
76510 	smp_mb(); /* C */
76511diff --git a/mm/memblock.c b/mm/memblock.c
76512index f72d53957..16331c252 100644
76513--- a/mm/memblock.c
76514+++ b/mm/memblock.c
76515@@ -97,6 +97,26 @@ struct pglist_data __refdata contig_page_data;
76516 EXPORT_SYMBOL(contig_page_data);
76517 #endif
76518 
76519+#ifdef CONFIG_ROCKCHIP_THUNDER_BOOT
76520+static unsigned long defer_start __initdata;
76521+static unsigned long defer_end __initdata;
76522+
76523+#define DEFAULT_DEFER_FREE_BLOCK_SIZE SZ_256M
76524+static unsigned long defer_free_block_size __initdata =
76525+	DEFAULT_DEFER_FREE_BLOCK_SIZE;
76526+
76527+static int __init early_defer_free_block_size(char *p)
76528+{
76529+	defer_free_block_size = memparse(p, &p);
76530+
76531+	pr_debug("defer_free_block_size = 0x%lx\n", defer_free_block_size);
76532+
76533+	return 0;
76534+}
76535+
76536+early_param("defer_free_block_size", early_defer_free_block_size);
76537+#endif
76538+
76539 unsigned long max_low_pfn;
76540 unsigned long min_low_pfn;
76541 unsigned long max_pfn;
76542@@ -814,6 +834,9 @@ int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
76543 	kmemleak_free_part_phys(base, size);
76544 	return memblock_remove_range(&memblock.reserved, base, size);
76545 }
76546+#ifdef CONFIG_ARCH_KEEP_MEMBLOCK
76547+EXPORT_SYMBOL_GPL(memblock_free);
76548+#endif
76549 
76550 int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
76551 {
76552@@ -1391,6 +1414,9 @@ phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
76553 					     phys_addr_t start,
76554 					     phys_addr_t end)
76555 {
76556+	memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n",
76557+		     __func__, (u64)size, (u64)align, &start, &end,
76558+		     (void *)_RET_IP_);
76559 	return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
76560 					false);
76561 }
76562@@ -1628,6 +1654,7 @@ phys_addr_t __init_memblock memblock_end_of_DRAM(void)
76563 
76564 	return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
76565 }
76566+EXPORT_SYMBOL_GPL(memblock_end_of_DRAM);
76567 
76568 static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
76569 {
76570@@ -1913,6 +1940,28 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
76571 	}
76572 }
76573 
76574+#ifdef CONFIG_ROCKCHIP_THUNDER_BOOT
76575+int __init defer_free_memblock(void *unused)
76576+{
76577+	if (defer_start == 0)
76578+		return 0;
76579+
76580+	pr_debug("start = %ld, end = %ld\n", defer_start, defer_end);
76581+
76582+	__free_pages_memory(defer_start, defer_end);
76583+
76584+	totalram_pages_add(defer_end - defer_start);
76585+
76586+	pr_info("%s: size %luM free %luM [%luM - %luM] total %luM\n", __func__,
76587+		defer_free_block_size >> 20,
76588+		(defer_end - defer_start) >> (20 - PAGE_SHIFT),
76589+		defer_end >> (20 - PAGE_SHIFT),
76590+		defer_start >> (20 - PAGE_SHIFT),
76591+		totalram_pages() >> (20 - PAGE_SHIFT));
76592+	return 0;
76593+}
76594+#endif
76595+
76596 static unsigned long __init __free_memory_core(phys_addr_t start,
76597 				 phys_addr_t end)
76598 {
76599@@ -1923,6 +1972,15 @@ static unsigned long __init __free_memory_core(phys_addr_t start,
76600 	if (start_pfn >= end_pfn)
76601 		return 0;
76602 
76603+#ifdef CONFIG_ROCKCHIP_THUNDER_BOOT
76604+	if ((end - start) > defer_free_block_size) {
76605+		defer_start = start_pfn;
76606+		defer_end = end_pfn;
76607+
76608+		return 0;
76609+	}
76610+#endif
76611+
76612 	__free_pages_memory(start_pfn, end_pfn);
76613 
76614 	return end_pfn - start_pfn;
76615diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
76616index 756523e54..e5036cfe5 100644
76617--- a/net/bluetooth/l2cap_sock.c
76618+++ b/net/bluetooth/l2cap_sock.c
76619@@ -162,11 +162,7 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
76620 		break;
76621 	}
76622 
76623-	/* Use L2CAP_MODE_LE_FLOWCTL (CoC) in case of LE address and
76624-	 * L2CAP_MODE_EXT_FLOWCTL (ECRED) has not been set.
76625-	 */
76626-	if (chan->psm && bdaddr_type_is_le(chan->src_type) &&
76627-	    chan->mode != L2CAP_MODE_EXT_FLOWCTL)
76628+	if (chan->psm && bdaddr_type_is_le(chan->src_type))
76629 		chan->mode = L2CAP_MODE_LE_FLOWCTL;
76630 
76631 	chan->state = BT_BOUND;
76632@@ -260,11 +256,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
76633 			return -EINVAL;
76634 	}
76635 
76636-	/* Use L2CAP_MODE_LE_FLOWCTL (CoC) in case of LE address and
76637-	 * L2CAP_MODE_EXT_FLOWCTL (ECRED) has not been set.
76638-	 */
76639-	if (chan->psm && bdaddr_type_is_le(chan->src_type) &&
76640-	    chan->mode != L2CAP_MODE_EXT_FLOWCTL)
76641+	if (chan->psm && bdaddr_type_is_le(chan->src_type) && !chan->mode)
76642 		chan->mode = L2CAP_MODE_LE_FLOWCTL;
76643 
76644 	l2cap_sock_init_pid(sk);
76645@@ -904,8 +896,6 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
76646 	struct l2cap_conn *conn;
76647 	int len, err = 0;
76648 	u32 opt;
76649-	u16 mtu;
76650-	u8 mode;
76651 
76652 	BT_DBG("sk %p", sk);
76653 
76654@@ -1088,16 +1078,16 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
76655 			break;
76656 		}
76657 
76658-		if (copy_from_sockptr(&mtu, optval, sizeof(u16))) {
76659+		if (copy_from_sockptr(&opt, optval, sizeof(u16))) {
76660 			err = -EFAULT;
76661 			break;
76662 		}
76663 
76664 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL &&
76665 		    sk->sk_state == BT_CONNECTED)
76666-			err = l2cap_chan_reconfigure(chan, mtu);
76667+			err = l2cap_chan_reconfigure(chan, opt);
76668 		else
76669-			chan->imtu = mtu;
76670+			chan->imtu = opt;
76671 
76672 		break;
76673 
76674@@ -1119,14 +1109,14 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
76675 			break;
76676 		}
76677 
76678-		if (copy_from_sockptr(&mode, optval, sizeof(u8))) {
76679+		if (copy_from_sockptr(&opt, optval, sizeof(u8))) {
76680 			err = -EFAULT;
76681 			break;
76682 		}
76683 
76684-		BT_DBG("mode %u", mode);
76685+		BT_DBG("opt %u", opt);
76686 
76687-		err = l2cap_set_mode(chan, mode);
76688+		err = l2cap_set_mode(chan, opt);
76689 		if (err)
76690 			break;
76691 
76692@@ -1539,9 +1529,6 @@ static void l2cap_sock_close_cb(struct l2cap_chan *chan)
76693 {
76694 	struct sock *sk = chan->data;
76695 
76696-	if (!sk)
76697-		return;
76698-
76699 	l2cap_sock_kill(sk);
76700 }
76701 
76702@@ -1550,9 +1537,6 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
76703 	struct sock *sk = chan->data;
76704 	struct sock *parent;
76705 
76706-	if (!sk)
76707-		return;
76708-
76709 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
76710 
76711 	/* This callback can be called both for server (BT_LISTEN)
76712@@ -1752,10 +1736,8 @@ static void l2cap_sock_destruct(struct sock *sk)
76713 {
76714 	BT_DBG("sk %p", sk);
76715 
76716-	if (l2cap_pi(sk)->chan) {
76717-		l2cap_pi(sk)->chan->data = NULL;
76718+	if (l2cap_pi(sk)->chan)
76719 		l2cap_chan_put(l2cap_pi(sk)->chan);
76720-	}
76721 
76722 	if (l2cap_pi(sk)->rx_busy_skb) {
76723 		kfree_skb(l2cap_pi(sk)->rx_busy_skb);
76724diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
76725index 081d1ee3d..6a8796933 100644
76726--- a/net/bluetooth/sco.c
76727+++ b/net/bluetooth/sco.c
76728@@ -93,10 +93,10 @@ static void sco_sock_timeout(struct work_struct *work)
76729 
76730 	BT_DBG("sock %p state %d", sk, sk->sk_state);
76731 
76732-	lock_sock(sk);
76733+	bh_lock_sock(sk);
76734 	sk->sk_err = ETIMEDOUT;
76735 	sk->sk_state_change(sk);
76736-	release_sock(sk);
76737+	bh_unlock_sock(sk);
76738 
76739 	sock_put(sk);
76740 }
76741@@ -193,10 +193,10 @@ static void sco_conn_del(struct hci_conn *hcon, int err)
76742 
76743 	if (sk) {
76744 		sock_hold(sk);
76745-		lock_sock(sk);
76746+		bh_lock_sock(sk);
76747 		sco_sock_clear_timer(sk);
76748 		sco_chan_del(sk, err);
76749-		release_sock(sk);
76750+		bh_unlock_sock(sk);
76751 		sock_put(sk);
76752 	}
76753 
76754@@ -280,10 +280,11 @@ static int sco_connect(struct hci_dev *hdev, struct sock *sk)
76755 	return err;
76756 }
76757 
76758-static int sco_send_frame(struct sock *sk, struct sk_buff *skb)
76759+static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
76760 {
76761 	struct sco_conn *conn = sco_pi(sk)->conn;
76762-	int len = skb->len;
76763+	struct sk_buff *skb;
76764+	int err;
76765 
76766 	/* Check outgoing MTU */
76767 	if (len > conn->mtu)
76768@@ -291,6 +292,14 @@ static int sco_send_frame(struct sock *sk, struct sk_buff *skb)
76769 
76770 	BT_DBG("sk %p len %d", sk, len);
76771 
76772+	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
76773+	if (!skb)
76774+		return err;
76775+
76776+	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
76777+		kfree_skb(skb);
76778+		return -EFAULT;
76779+	}
76780 	hci_send_sco(conn->hcon, skb);
76781 
76782 	return len;
76783@@ -568,24 +577,19 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
76784 	    addr->sa_family != AF_BLUETOOTH)
76785 		return -EINVAL;
76786 
76787-	lock_sock(sk);
76788-	if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
76789-		err = -EBADFD;
76790-		goto done;
76791-	}
76792+	if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND)
76793+		return -EBADFD;
76794 
76795-	if (sk->sk_type != SOCK_SEQPACKET) {
76796-		err = -EINVAL;
76797-		goto done;
76798-	}
76799+	if (sk->sk_type != SOCK_SEQPACKET)
76800+		return -EINVAL;
76801 
76802 	hdev = hci_get_route(&sa->sco_bdaddr, &sco_pi(sk)->src, BDADDR_BREDR);
76803-	if (!hdev) {
76804-		err = -EHOSTUNREACH;
76805-		goto done;
76806-	}
76807+	if (!hdev)
76808+		return -EHOSTUNREACH;
76809 	hci_dev_lock(hdev);
76810 
76811+	lock_sock(sk);
76812+
76813 	/* Set destination address and psm */
76814 	bacpy(&sco_pi(sk)->dst, &sa->sco_bdaddr);
76815 
76816@@ -720,7 +724,6 @@ static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg,
76817 			    size_t len)
76818 {
76819 	struct sock *sk = sock->sk;
76820-	struct sk_buff *skb;
76821 	int err;
76822 
76823 	BT_DBG("sock %p, sk %p", sock, sk);
76824@@ -732,21 +735,16 @@ static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg,
76825 	if (msg->msg_flags & MSG_OOB)
76826 		return -EOPNOTSUPP;
76827 
76828-	skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0);
76829-	if (IS_ERR(skb))
76830-		return PTR_ERR(skb);
76831+
76832 
76833 	lock_sock(sk);
76834 
76835 	if (sk->sk_state == BT_CONNECTED)
76836-		err = sco_send_frame(sk, skb);
76837+		err = sco_send_frame(sk, msg, len);
76838 	else
76839 		err = -ENOTCONN;
76840 
76841 	release_sock(sk);
76842-
76843-	if (err < 0)
76844-		kfree_skb(skb);
76845 	return err;
76846 }
76847 
76848@@ -1103,10 +1101,10 @@ static void sco_conn_ready(struct sco_conn *conn)
76849 
76850 	if (sk) {
76851 		sco_sock_clear_timer(sk);
76852-		lock_sock(sk);
76853+		bh_lock_sock(sk);
76854 		sk->sk_state = BT_CONNECTED;
76855 		sk->sk_state_change(sk);
76856-		release_sock(sk);
76857+		bh_unlock_sock(sk);
76858 	} else {
76859 		sco_conn_lock(conn);
76860 
76861@@ -1121,12 +1119,12 @@ static void sco_conn_ready(struct sco_conn *conn)
76862 			return;
76863 		}
76864 
76865-		lock_sock(parent);
76866+		bh_lock_sock(parent);
76867 
76868 		sk = sco_sock_alloc(sock_net(parent), NULL,
76869 				    BTPROTO_SCO, GFP_ATOMIC, 0);
76870 		if (!sk) {
76871-			release_sock(parent);
76872+			bh_unlock_sock(parent);
76873 			sco_conn_unlock(conn);
76874 			return;
76875 		}
76876@@ -1147,7 +1145,7 @@ static void sco_conn_ready(struct sco_conn *conn)
76877 		/* Wake up parent */
76878 		parent->sk_data_ready(parent);
76879 
76880-		release_sock(parent);
76881+		bh_unlock_sock(parent);
76882 
76883 		sco_conn_unlock(conn);
76884 	}
76885diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
76886index 53aa3e18c..13ad74747 100644
76887--- a/scripts/Makefile.lib
76888+++ b/scripts/Makefile.lib
76889@@ -378,7 +378,11 @@ quiet_cmd_lzo = LZO     $@
76890       cmd_lzo = { cat $(real-prereqs) | $(KLZOP) -9; $(size_append); } > $@
76891 
76892 quiet_cmd_lz4 = LZ4     $@
76893-      cmd_lz4 = { cat $(real-prereqs) | $(LZ4) -l -c1 stdin stdout; \
76894+      cmd_lz4 = { cat $(real-prereqs) | lz4 -l --favor-decSpeed stdin stdout; \
76895+                  $(size_append); } > $@
76896+
76897+quiet_cmd_lz4c = LZ4C     $@
76898+      cmd_lz4c = { cat $(real-prereqs) | lz4 -12 --favor-decSpeed stdin stdout; \
76899                   $(size_append); } > $@
76900 
76901 # U-Boot mkimage
76902diff --git a/scripts/kconfig/parser.y b/scripts/kconfig/parser.y
76903index 190f1117f..421bb2fa6 100644
76904--- a/scripts/kconfig/parser.y
76905+++ b/scripts/kconfig/parser.y
76906@@ -10,6 +10,7 @@
76907 #include <stdlib.h>
76908 #include <string.h>
76909 #include <stdbool.h>
76910+#include <unistd.h>
76911 
76912 #include "lkc.h"
76913 
76914@@ -20,11 +21,18 @@
76915 
76916 int cdebug = PRINTD;
76917 
76918+static const char *kconfig_white_list[] = {
76919+	"vendor/Kconfig",
76920+	"net/newip/Kconfig",
76921+	"net/newip/hooks/Kconfig",
76922+};
76923+
76924 static void yyerror(const char *err);
76925 static void zconfprint(const char *err, ...);
76926 static void zconf_error(const char *err, ...);
76927 static bool zconf_endtoken(const char *tokenname,
76928 			   const char *expected_tokenname);
76929+static bool zconf_in_whitelist(const char *path);
76930 
76931 struct symbol *symbol_hash[SYMBOL_HASHSIZE];
76932 
76933@@ -367,7 +375,9 @@ menu_option_list:
76934 source_stmt: T_SOURCE T_WORD_QUOTE T_EOL
76935 {
76936 	printd(DEBUG_PARSE, "%s:%d:source %s\n", zconf_curname(), zconf_lineno(), $2);
76937-	zconf_nextfile($2);
76938+	if (access(($2), F_OK) == 0 || zconf_in_whitelist($2) == false) {
76939+		zconf_nextfile($2);
76940+	}
76941 	free($2);
76942 };
76943 
76944@@ -484,6 +494,16 @@ assign_val:
76945 
76946 %%
76947 
76948+static bool zconf_in_whitelist(const char *path)
76949+{
76950+	int i;
76951+	for (i = 0; i < sizeof(kconfig_white_list) / sizeof(kconfig_white_list[0]); i++) {
76952+		if(strcmp(kconfig_white_list[i], path) == 0)
76953+			return true;
76954+	}
76955+	return false;
76956+}
76957+
76958 void conf_parse(const char *name)
76959 {
76960 	struct symbol *sym;
76961diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
76962index 662f7b4a9..a4e66d3b1 100644
76963--- a/security/selinux/hooks.c
76964+++ b/security/selinux/hooks.c
76965@@ -101,7 +101,7 @@
76966 #include "ibpkey.h"
76967 #include "xfrm.h"
76968 #include "netlabel.h"
76969-#include "audit.h"
76970+#include "include/audit.h"
76971 #include "avc_ss.h"
76972 
76973 struct selinux_state selinux_state;
76974diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h
76975index f2eeb3626..96f121d33 100644
76976--- a/security/selinux/ss/avtab.h
76977+++ b/security/selinux/ss/avtab.h
76978@@ -21,7 +21,7 @@
76979 #ifndef _SS_AVTAB_H_
76980 #define _SS_AVTAB_H_
76981 
76982-#include "security.h"
76983+#include "../include/security.h"
76984 
76985 struct avtab_key {
76986 	u16 source_type;	/* source type */
76987diff --git a/security/selinux/ss/context.h b/security/selinux/ss/context.h
76988index 62990aa1e..9ec2bb0a4 100644
76989--- a/security/selinux/ss/context.h
76990+++ b/security/selinux/ss/context.h
76991@@ -18,7 +18,7 @@
76992 
76993 #include "ebitmap.h"
76994 #include "mls_types.h"
76995-#include "security.h"
76996+#include "../include/security.h"
76997 
76998 /*
76999  * A security context consists of an authenticated user
77000diff --git a/sound/soc/rockchip/Kconfig b/sound/soc/rockchip/Kconfig
77001index d610b553e..2925971bc 100644
77002--- a/sound/soc/rockchip/Kconfig
77003+++ b/sound/soc/rockchip/Kconfig
77004@@ -16,6 +16,15 @@ config SND_SOC_ROCKCHIP_I2S
77005 	  Rockchip I2S device. The device supports upto maximum of
77006 	  8 channels each for play and record.
77007 
77008+config SND_SOC_ROCKCHIP_I2S_TDM
77009+	tristate "Rockchip I2S/TDM Device Driver"
77010+	depends on CLKDEV_LOOKUP && SND_SOC_ROCKCHIP
77011+	select SND_SOC_GENERIC_DMAENGINE_PCM
77012+	help
77013+	  Say Y or M if you want to add support for I2S/TDM driver for
77014+	  Rockchip I2S/TDM device. The device supports up to maximum of
77015+	  8 channels each for play and record.
77016+
77017 config SND_SOC_ROCKCHIP_PDM
77018 	tristate "Rockchip PDM Controller Driver"
77019 	depends on CLKDEV_LOOKUP && SND_SOC_ROCKCHIP
77020@@ -34,6 +43,21 @@ config SND_SOC_ROCKCHIP_SPDIF
77021 	  Say Y or M if you want to add support for SPDIF driver for
77022 	  Rockchip SPDIF transceiver device.
77023 
77024+config SND_SOC_ROCKCHIP_SPDIFRX
77025+	tristate "Rockchip SPDIFRX Device Driver"
77026+	depends on CLKDEV_LOOKUP && SND_SOC_ROCKCHIP
77027+	select SND_SOC_GENERIC_DMAENGINE_PCM
77028+	help
77029+	  Say Y or M if you want to add support for SPDIFRX driver for
77030+	  Rockchip SPDIF receiver device.
77031+
77032+config SND_SOC_ROCKCHIP_VAD
77033+	tristate "Rockchip Voice Activity Detection Driver"
77034+	depends on CLKDEV_LOOKUP && SND_SOC_ROCKCHIP
77035+	help
77036+	  Say Y or M if you want to add support for VAD driver for
77037+	  Rockchip VAD device.
77038+
77039 config SND_SOC_ROCKCHIP_MAX98090
77040 	tristate "ASoC support for Rockchip boards using a MAX98090 codec"
77041 	depends on SND_SOC_ROCKCHIP && I2C && GPIOLIB && CLKDEV_LOOKUP
77042@@ -45,6 +69,13 @@ config SND_SOC_ROCKCHIP_MAX98090
77043 	  Say Y or M here if you want to add support for SoC audio on Rockchip
77044 	  boards using the MAX98090 codec and HDMI codec, such as Veyron.
77045 
77046+config SND_SOC_ROCKCHIP_MULTICODECS
77047+	tristate "ASoC support for Rockchip multicodecs"
77048+	depends on SND_SOC_ROCKCHIP && CLKDEV_LOOKUP
77049+	help
77050+	  Say Y or M here if you want to add support for SoC audio on Rockchip
77051+	  boards using multicodecs, such as RK3308 boards.
77052+
77053 config SND_SOC_ROCKCHIP_RT5645
77054 	tristate "ASoC support for Rockchip boards using a RT5645/RT5650 codec"
77055 	depends on SND_SOC_ROCKCHIP && I2C && GPIOLIB && CLKDEV_LOOKUP
77056@@ -54,6 +85,14 @@ config SND_SOC_ROCKCHIP_RT5645
77057 	  Say Y or M here if you want to add support for SoC audio on Rockchip
77058 	  boards using the RT5645/RT5650 codec, such as Veyron.
77059 
77060+config SND_SOC_ROCKCHIP_HDMI
77061+	tristate "ASoC support for Rockchip HDMI audio"
77062+	depends on SND_SOC_ROCKCHIP && CLKDEV_LOOKUP
77063+	select SND_SOC_HDMI_CODEC
77064+	help
77065+	  Say Y or M here if you want to add support for SoC audio on Rockchip
77066+	  boards using built-in HDMI or external HDMI.
77067+
77068 config SND_SOC_RK3288_HDMI_ANALOG
77069 	tristate "ASoC support multiple codecs for Rockchip RK3288 boards"
77070 	depends on SND_SOC_ROCKCHIP && I2C && GPIOLIB && CLKDEV_LOOKUP
77071diff --git a/sound/soc/rockchip/Makefile b/sound/soc/rockchip/Makefile
77072index 65e814d46..ec17d97d0 100644
77073--- a/sound/soc/rockchip/Makefile
77074+++ b/sound/soc/rockchip/Makefile
77075@@ -1,20 +1,35 @@
77076 # SPDX-License-Identifier: GPL-2.0
77077 # ROCKCHIP Platform Support
77078 snd-soc-rockchip-i2s-objs := rockchip_i2s.o
77079-snd-soc-rockchip-pcm-objs := rockchip_pcm.o
77080+snd-soc-rockchip-i2s-tdm-objs := rockchip_i2s_tdm.o
77081 snd-soc-rockchip-pdm-objs := rockchip_pdm.o
77082 snd-soc-rockchip-spdif-objs := rockchip_spdif.o
77083+snd-soc-rockchip-spdifrx-objs := rockchip_spdifrx.o
77084+snd-soc-rockchip-vad-objs := rockchip_vad.o
77085+ifdef CONFIG_THUMB2_KERNEL
77086+snd-soc-rockchip-vad-$(CONFIG_THUMB2_KERNEL) += vad_preprocess_thumb.o
77087+else
77088+snd-soc-rockchip-vad-$(CONFIG_ARM64) += vad_preprocess_arm64.o
77089+snd-soc-rockchip-vad-$(CONFIG_ARM) += vad_preprocess_arm.o
77090+endif
77091 
77092-obj-$(CONFIG_SND_SOC_ROCKCHIP_I2S) += snd-soc-rockchip-i2s.o snd-soc-rockchip-pcm.o
77093+obj-$(CONFIG_SND_SOC_ROCKCHIP_I2S) += snd-soc-rockchip-i2s.o
77094+obj-$(CONFIG_SND_SOC_ROCKCHIP_I2S_TDM) += snd-soc-rockchip-i2s-tdm.o
77095 obj-$(CONFIG_SND_SOC_ROCKCHIP_PDM) += snd-soc-rockchip-pdm.o
77096 obj-$(CONFIG_SND_SOC_ROCKCHIP_SPDIF) += snd-soc-rockchip-spdif.o
77097+obj-$(CONFIG_SND_SOC_ROCKCHIP_SPDIFRX) += snd-soc-rockchip-spdifrx.o
77098+obj-$(CONFIG_SND_SOC_ROCKCHIP_VAD) += snd-soc-rockchip-vad.o
77099 
77100+snd-soc-rockchip-hdmi-objs := rockchip_hdmi.o
77101 snd-soc-rockchip-max98090-objs := rockchip_max98090.o
77102+snd-soc-rockchip-multicodecs-objs := rockchip_multicodecs.o
77103 snd-soc-rockchip-rt5645-objs := rockchip_rt5645.o
77104 snd-soc-rk3288-hdmi-analog-objs := rk3288_hdmi_analog.o
77105 snd-soc-rk3399-gru-sound-objs := rk3399_gru_sound.o
77106 
77107+obj-$(CONFIG_SND_SOC_ROCKCHIP_HDMI) += snd-soc-rockchip-hdmi.o
77108 obj-$(CONFIG_SND_SOC_ROCKCHIP_MAX98090) += snd-soc-rockchip-max98090.o
77109+obj-$(CONFIG_SND_SOC_ROCKCHIP_MULTICODECS) += snd-soc-rockchip-multicodecs.o
77110 obj-$(CONFIG_SND_SOC_ROCKCHIP_RT5645) += snd-soc-rockchip-rt5645.o
77111 obj-$(CONFIG_SND_SOC_RK3288_HDMI_ANALOG) += snd-soc-rk3288-hdmi-analog.o
77112 obj-$(CONFIG_SND_SOC_RK3399_GRU_SOUND) += snd-soc-rk3399-gru-sound.o
77113diff --git a/sound/soc/rockchip/rockchip_i2s.h b/sound/soc/rockchip/rockchip_i2s.h
77114index fcaae24e4..251851bf4 100644
77115--- a/sound/soc/rockchip/rockchip_i2s.h
77116+++ b/sound/soc/rockchip/rockchip_i2s.h
77117@@ -88,15 +88,17 @@
77118 #define I2S_CKR_MSS_SLAVE	(1 << I2S_CKR_MSS_SHIFT)
77119 #define I2S_CKR_MSS_MASK	(1 << I2S_CKR_MSS_SHIFT)
77120 #define I2S_CKR_CKP_SHIFT	26
77121-#define I2S_CKR_CKP_NEG		(0 << I2S_CKR_CKP_SHIFT)
77122-#define I2S_CKR_CKP_POS		(1 << I2S_CKR_CKP_SHIFT)
77123+#define I2S_CKR_CKP_NORMAL	(0 << I2S_CKR_CKP_SHIFT)
77124+#define I2S_CKR_CKP_INVERTED	(1 << I2S_CKR_CKP_SHIFT)
77125 #define I2S_CKR_CKP_MASK	(1 << I2S_CKR_CKP_SHIFT)
77126 #define I2S_CKR_RLP_SHIFT	25
77127 #define I2S_CKR_RLP_NORMAL	(0 << I2S_CKR_RLP_SHIFT)
77128-#define I2S_CKR_RLP_OPPSITE	(1 << I2S_CKR_RLP_SHIFT)
77129+#define I2S_CKR_RLP_INVERTED	(1 << I2S_CKR_RLP_SHIFT)
77130+#define I2S_CKR_RLP_MASK	(1 << I2S_CKR_RLP_SHIFT)
77131 #define I2S_CKR_TLP_SHIFT	24
77132 #define I2S_CKR_TLP_NORMAL	(0 << I2S_CKR_TLP_SHIFT)
77133-#define I2S_CKR_TLP_OPPSITE	(1 << I2S_CKR_TLP_SHIFT)
77134+#define I2S_CKR_TLP_INVERTED	(1 << I2S_CKR_TLP_SHIFT)
77135+#define I2S_CKR_TLP_MASK	(1 << I2S_CKR_TLP_SHIFT)
77136 #define I2S_CKR_MDIV_SHIFT	16
77137 #define I2S_CKR_MDIV(x)		((x - 1) << I2S_CKR_MDIV_SHIFT)
77138 #define I2S_CKR_MDIV_MASK	(0xff << I2S_CKR_MDIV_SHIFT)
77139diff --git a/sound/soc/rockchip/rockchip_pdm.h b/sound/soc/rockchip/rockchip_pdm.h
77140index 8e5bbafef..cab977272 100644
77141--- a/sound/soc/rockchip/rockchip_pdm.h
77142+++ b/sound/soc/rockchip/rockchip_pdm.h
77143@@ -41,6 +41,8 @@
77144 #define PDM_PATH1_EN		BIT(28)
77145 #define PDM_PATH0_EN		BIT(27)
77146 #define PDM_HWT_EN		BIT(26)
77147+#define PDM_SAMPLERATE_MSK	GENMASK(7, 5)
77148+#define PDM_SAMPLERATE(x)	((x) << 5)
77149 #define PDM_VDW_MSK		(0x1f << 0)
77150 #define PDM_VDW(X)		((X - 1) << 0)
77151 
77152@@ -51,6 +53,9 @@
77153 #define PDM_FD_DENOMINATOR_MSK	GENMASK(15, 0)
77154 
77155 /* PDM CLK CTRL */
77156+#define PDM_PATH_SHIFT(x)	(8 + (x) * 2)
77157+#define PDM_PATH_MASK(x)	(0x3 << PDM_PATH_SHIFT(x))
77158+#define PDM_PATH(x, v)		((v) << PDM_PATH_SHIFT(x))
77159 #define PDM_CLK_FD_RATIO_MSK	BIT(6)
77160 #define PDM_CLK_FD_RATIO_40	(0X0 << 6)
77161 #define PDM_CLK_FD_RATIO_35	BIT(6)
77162@@ -66,6 +71,7 @@
77163 #define PDM_CLK_1280FS		(0x2 << 0)
77164 #define PDM_CLK_2560FS		(0x3 << 0)
77165 #define PDM_CLK_5120FS		(0x4 << 0)
77166+#define PDM_CIC_RATIO_MSK	(0x3 << 0)
77167 
77168 /* PDM HPF CTRL */
77169 #define PDM_HPF_LE		BIT(3)
77170