diff --git a/Makefile b/Makefile index 64db4e99e..937095621 100644 --- a/Makefile +++ b/Makefile @@ -493,14 +493,16 @@ LINUXINCLUDE := \ -I$(objtree)/arch/$(SRCARCH)/include/generated \ $(if $(building_out_of_srctree),-I$(srctree)/include) \ -I$(objtree)/include \ - $(USERINCLUDE) + $(USERINCLUDE) \ + -I$(srctree)/vendor/include \ + -I$(srctree)/vendor/include/linux KBUILD_AFLAGS := -D__ASSEMBLY__ -fno-PIE KBUILD_CFLAGS := -Wall -Wundef -Werror=strict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -fno-common -fshort-wchar -fno-PIE \ -Werror=implicit-function-declaration -Werror=implicit-int \ -Werror=return-type -Wno-format-security \ - -std=gnu89 + -std=gnu99 KBUILD_CPPFLAGS := -D__KERNEL__ KBUILD_AFLAGS_KERNEL := KBUILD_CFLAGS_KERNEL := @@ -956,9 +958,6 @@ endif # arch Makefile may override CC so keep this after arch Makefile is included NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include) -# warn about C99 declaration after statement -KBUILD_CFLAGS += -Wdeclaration-after-statement - # Variable Length Arrays (VLAs) should not be used anywhere in the kernel KBUILD_CFLAGS += -Wvla @@ -1396,7 +1395,7 @@ kselftest-merge: # Devicetree files ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/boot/dts/),) -dtstree := arch/$(SRCARCH)/boot/dts +dtstree := vendor/arch/$(SRCARCH)/boot/dts endif ifneq ($(dtstree),) diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 485b7dbd4..5e039976c 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -44,8 +44,7 @@ ifeq ($(CONFIG_BROKEN_GAS_INST),y) $(warning Detected assembler with broken .inst; disassembly will be unreliable) endif -KBUILD_CFLAGS += -mgeneral-regs-only \ - $(compat_vdso) $(cc_has_k_constraint) +KBUILD_CFLAGS += $(compat_vdso) $(cc_has_k_constraint) KBUILD_CFLAGS += $(call cc-disable-warning, psabi) KBUILD_AFLAGS += $(compat_vdso) @@ -198,3 +197,16 @@ define archhelp echo ' (distribution) /sbin/installkernel or' echo ' install to $$(INSTALL_PATH) and run lilo' endef + +MAKE_MODULES ?= y + +%.img: +ifeq ("$(CONFIG_MODULES)$(MAKE_MODULES)$(srctree)","yy$(objtree)") + $(Q)$(MAKE) rockchip/$*.dtb Image.lz4 modules +else + $(Q)$(MAKE) rockchip/$*.dtb Image.lz4 +endif + $(Q)$(srctree)/vendor/scripts/mkimg --dtb $*.dtb + +CLEAN_DIRS += out +CLEAN_FILES += boot.img kernel.img resource.img zboot.img diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile index cd3414898..7469148c3 100644 --- a/arch/arm64/boot/Makefile +++ b/arch/arm64/boot/Makefile @@ -28,7 +28,7 @@ $(obj)/Image.gz: $(obj)/Image FORCE $(call if_changed,gzip) $(obj)/Image.lz4: $(obj)/Image FORCE - $(call if_changed,lz4) + $(call if_changed,lz4c) $(obj)/Image.lzma: $(obj)/Image FORCE $(call if_changed,lzma) diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 4c0e72781..2094dce73 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -25,6 +25,13 @@ #include #include +#ifdef CONFIG_ARCH_ROCKCHIP +unsigned int system_serial_low; +EXPORT_SYMBOL(system_serial_low); + +unsigned int system_serial_high; +EXPORT_SYMBOL(system_serial_high); +#endif /* * In case the boot CPU is hotpluggable, we record its initial state and * current state separately. Certain system registers may contain different @@ -200,6 +207,10 @@ static int c_show(struct seq_file *m, void *v) seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr)); } +#ifdef CONFIG_ARCH_ROCKCHIP + seq_printf(m, "Serial\t\t: %08x%08x\n", + system_serial_high, system_serial_low); +#endif return 0; } diff --git a/drivers/clk/rockchip/Kconfig b/drivers/clk/rockchip/Kconfig index 47cd6c5de..99e66263a 100644 --- a/drivers/clk/rockchip/Kconfig +++ b/drivers/clk/rockchip/Kconfig @@ -2,7 +2,7 @@ # common clock support for ROCKCHIP SoC family. config COMMON_CLK_ROCKCHIP - bool "Rockchip clock controller common support" + tristate "Rockchip clock controller common support" depends on ARCH_ROCKCHIP default ARCH_ROCKCHIP help @@ -10,69 +10,79 @@ config COMMON_CLK_ROCKCHIP if COMMON_CLK_ROCKCHIP config CLK_PX30 - bool "Rockchip PX30 clock controller support" - default y + tristate "Rockchip PX30 clock controller support" + depends on ARM64 || COMPILE_TEST + default n help Build the driver for PX30 Clock Driver. config CLK_RV110X - bool "Rockchip RV110x clock controller support" - default y + tristate "Rockchip RV110x clock controller support" + depends on ARM || COMPILE_TEST + default n help Build the driver for RV110x Clock Driver. config CLK_RK3036 - bool "Rockchip RK3036 clock controller support" - default y + tristate "Rockchip RK3036 clock controller support" + depends on ARM || COMPILE_TEST + default n help Build the driver for RK3036 Clock Driver. config CLK_RK312X - bool "Rockchip RK312x clock controller support" - default y + tristate "Rockchip RK312x clock controller support" + depends on ARM || COMPILE_TEST + default n help Build the driver for RK312x Clock Driver. config CLK_RK3188 - bool "Rockchip RK3188 clock controller support" - default y + tristate "Rockchip RK3188 clock controller support" + depends on ARM || COMPILE_TEST + default n help Build the driver for RK3188 Clock Driver. config CLK_RK322X - bool "Rockchip RK322x clock controller support" - default y + tristate "Rockchip RK322x clock controller support" + depends on ARM || COMPILE_TEST + default n help Build the driver for RK322x Clock Driver. config CLK_RK3288 - bool "Rockchip RK3288 clock controller support" - depends on ARM - default y + tristate "Rockchip RK3288 clock controller support" + depends on ARM || COMPILE_TEST + default n help Build the driver for RK3288 Clock Driver. config CLK_RK3308 - bool "Rockchip RK3308 clock controller support" - default y + tristate "Rockchip RK3308 clock controller support" + depends on ARM64 || COMPILE_TEST + default n help Build the driver for RK3308 Clock Driver. config CLK_RK3328 - bool "Rockchip RK3328 clock controller support" - default y + tristate "Rockchip RK3328 clock controller support" + depends on ARM64 || COMPILE_TEST + default n help Build the driver for RK3328 Clock Driver. config CLK_RK3368 - bool "Rockchip RK3368 clock controller support" - default y + tristate "Rockchip RK3368 clock controller support" + depends on ARM64 || COMPILE_TEST + default n help Build the driver for RK3368 Clock Driver. config CLK_RK3399 tristate "Rockchip RK3399 clock controller support" - default y + depends on ARM64 || COMPILE_TEST + default n help Build the driver for RK3399 Clock Driver. endif diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c index 0dc478a19..3293174a6 100644 --- a/drivers/clk/rockchip/clk-cpu.c +++ b/drivers/clk/rockchip/clk-cpu.c @@ -51,6 +51,7 @@ */ struct rockchip_cpuclk { struct clk_hw hw; + struct clk_hw *pll_hw; struct clk_mux cpu_mux; const struct clk_ops *cpu_mux_ops; @@ -88,10 +89,10 @@ static unsigned long rockchip_cpuclk_recalc_rate(struct clk_hw *hw, { struct rockchip_cpuclk *cpuclk = to_rockchip_cpuclk_hw(hw); const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data; - u32 clksel0 = readl_relaxed(cpuclk->reg_base + reg_data->core_reg); + u32 clksel0 = readl_relaxed(cpuclk->reg_base + reg_data->core_reg[0]); - clksel0 >>= reg_data->div_core_shift; - clksel0 &= reg_data->div_core_mask; + clksel0 >>= reg_data->div_core_shift[0]; + clksel0 &= reg_data->div_core_mask[0]; return parent_rate / (clksel0 + 1); } @@ -117,6 +118,42 @@ static void rockchip_cpuclk_set_dividers(struct rockchip_cpuclk *cpuclk, } } +static void rockchip_cpuclk_set_pre_muxs(struct rockchip_cpuclk *cpuclk, + const struct rockchip_cpuclk_rate_table *rate) +{ + int i; + + /* alternate parent is active now. set the pre_muxs */ + for (i = 0; i < ARRAY_SIZE(rate->pre_muxs); i++) { + const struct rockchip_cpuclk_clksel *clksel = &rate->pre_muxs[i]; + + if (!clksel->reg) + break; + + pr_debug("%s: setting reg 0x%x to 0x%x\n", + __func__, clksel->reg, clksel->val); + writel(clksel->val, cpuclk->reg_base + clksel->reg); + } +} + +static void rockchip_cpuclk_set_post_muxs(struct rockchip_cpuclk *cpuclk, + const struct rockchip_cpuclk_rate_table *rate) +{ + int i; + + /* alternate parent is active now. set the muxs */ + for (i = 0; i < ARRAY_SIZE(rate->post_muxs); i++) { + const struct rockchip_cpuclk_clksel *clksel = &rate->post_muxs[i]; + + if (!clksel->reg) + break; + + pr_debug("%s: setting reg 0x%x to 0x%x\n", + __func__, clksel->reg, clksel->val); + writel(clksel->val, cpuclk->reg_base + clksel->reg); + } +} + static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk, struct clk_notifier_data *ndata) { @@ -124,6 +161,7 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk, const struct rockchip_cpuclk_rate_table *rate; unsigned long alt_prate, alt_div; unsigned long flags; + int i = 0; /* check validity of the new rate */ rate = rockchip_get_cpuclk_settings(cpuclk, ndata->new_rate); @@ -133,6 +171,8 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk, return -EINVAL; } + rockchip_boost_enable_recovery_sw_low(cpuclk->pll_hw); + alt_prate = clk_get_rate(cpuclk->alt_parent); spin_lock_irqsave(cpuclk->lock, flags); @@ -146,10 +186,10 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk, if (alt_prate > ndata->old_rate) { /* calculate dividers */ alt_div = DIV_ROUND_UP(alt_prate, ndata->old_rate) - 1; - if (alt_div > reg_data->div_core_mask) { + if (alt_div > reg_data->div_core_mask[0]) { pr_warn("%s: limiting alt-divider %lu to %d\n", - __func__, alt_div, reg_data->div_core_mask); - alt_div = reg_data->div_core_mask; + __func__, alt_div, reg_data->div_core_mask[0]); + alt_div = reg_data->div_core_mask[0]; } /* @@ -162,19 +202,28 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk, pr_debug("%s: setting div %lu as alt-rate %lu > old-rate %lu\n", __func__, alt_div, alt_prate, ndata->old_rate); - writel(HIWORD_UPDATE(alt_div, reg_data->div_core_mask, - reg_data->div_core_shift) | - HIWORD_UPDATE(reg_data->mux_core_alt, + for (i = 0; i < reg_data->num_cores; i++) { + writel(HIWORD_UPDATE(alt_div, reg_data->div_core_mask[i], + reg_data->div_core_shift[i]), + cpuclk->reg_base + reg_data->core_reg[i]); + } + } + + rockchip_boost_add_core_div(cpuclk->pll_hw, alt_prate); + + rockchip_cpuclk_set_pre_muxs(cpuclk, rate); + + /* select alternate parent */ + if (reg_data->mux_core_reg) + writel(HIWORD_UPDATE(reg_data->mux_core_alt, reg_data->mux_core_mask, reg_data->mux_core_shift), - cpuclk->reg_base + reg_data->core_reg); - } else { - /* select alternate parent */ + cpuclk->reg_base + reg_data->mux_core_reg); + else writel(HIWORD_UPDATE(reg_data->mux_core_alt, reg_data->mux_core_mask, reg_data->mux_core_shift), - cpuclk->reg_base + reg_data->core_reg); - } + cpuclk->reg_base + reg_data->core_reg[0]); spin_unlock_irqrestore(cpuclk->lock, flags); return 0; @@ -186,6 +235,7 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk, const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data; const struct rockchip_cpuclk_rate_table *rate; unsigned long flags; + int i = 0; rate = rockchip_get_cpuclk_settings(cpuclk, ndata->new_rate); if (!rate) { @@ -206,16 +256,31 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk, * primary parent by the extra dividers that were needed for the alt. */ - writel(HIWORD_UPDATE(0, reg_data->div_core_mask, - reg_data->div_core_shift) | - HIWORD_UPDATE(reg_data->mux_core_main, - reg_data->mux_core_mask, - reg_data->mux_core_shift), - cpuclk->reg_base + reg_data->core_reg); + if (reg_data->mux_core_reg) + writel(HIWORD_UPDATE(reg_data->mux_core_main, + reg_data->mux_core_mask, + reg_data->mux_core_shift), + cpuclk->reg_base + reg_data->mux_core_reg); + else + writel(HIWORD_UPDATE(reg_data->mux_core_main, + reg_data->mux_core_mask, + reg_data->mux_core_shift), + cpuclk->reg_base + reg_data->core_reg[0]); + + rockchip_cpuclk_set_post_muxs(cpuclk, rate); + + /* remove dividers */ + for (i = 0; i < reg_data->num_cores; i++) { + writel(HIWORD_UPDATE(0, reg_data->div_core_mask[i], + reg_data->div_core_shift[i]), + cpuclk->reg_base + reg_data->core_reg[i]); + } if (ndata->old_rate > ndata->new_rate) rockchip_cpuclk_set_dividers(cpuclk, rate); + rockchip_boost_disable_recovery_sw(cpuclk->pll_hw); + spin_unlock_irqrestore(cpuclk->lock, flags); return 0; } @@ -244,14 +309,16 @@ static int rockchip_cpuclk_notifier_cb(struct notifier_block *nb, } struct clk *rockchip_clk_register_cpuclk(const char *name, - const char *const *parent_names, u8 num_parents, + u8 num_parents, + struct clk *parent, struct clk *alt_parent, const struct rockchip_cpuclk_reg_data *reg_data, const struct rockchip_cpuclk_rate_table *rates, int nrates, void __iomem *reg_base, spinlock_t *lock) { struct rockchip_cpuclk *cpuclk; struct clk_init_data init; - struct clk *clk, *cclk; + struct clk *clk, *cclk, *pll_clk; + const char *parent_name; int ret; if (num_parents < 2) { @@ -259,12 +326,18 @@ struct clk *rockchip_clk_register_cpuclk(const char *name, return ERR_PTR(-EINVAL); } + if (IS_ERR(parent) || IS_ERR(alt_parent)) { + pr_err("%s: invalid parent clock(s)\n", __func__); + return ERR_PTR(-EINVAL); + } + cpuclk = kzalloc(sizeof(*cpuclk), GFP_KERNEL); if (!cpuclk) return ERR_PTR(-ENOMEM); + parent_name = clk_hw_get_name(__clk_get_hw(parent)); init.name = name; - init.parent_names = &parent_names[reg_data->mux_core_main]; + init.parent_names = &parent_name; init.num_parents = 1; init.ops = &rockchip_cpuclk_ops; @@ -281,8 +354,19 @@ struct clk *rockchip_clk_register_cpuclk(const char *name, cpuclk->reg_data = reg_data; cpuclk->clk_nb.notifier_call = rockchip_cpuclk_notifier_cb; cpuclk->hw.init = &init; + if (reg_data->pll_name) { + pll_clk = clk_get_parent(parent); + if (!pll_clk) { + pr_err("%s: could not lookup pll clock: (%s)\n", + __func__, reg_data->pll_name); + ret = -EINVAL; + goto free_cpuclk; + } + cpuclk->pll_hw = __clk_get_hw(pll_clk); + rockchip_boost_init(cpuclk->pll_hw); + } - cpuclk->alt_parent = __clk_lookup(parent_names[reg_data->mux_core_alt]); + cpuclk->alt_parent = alt_parent; if (!cpuclk->alt_parent) { pr_err("%s: could not lookup alternate parent: (%d)\n", __func__, reg_data->mux_core_alt); @@ -297,11 +381,11 @@ struct clk *rockchip_clk_register_cpuclk(const char *name, goto free_cpuclk; } - clk = __clk_lookup(parent_names[reg_data->mux_core_main]); + clk = parent; if (!clk) { pr_err("%s: could not lookup parent clock: (%d) %s\n", __func__, reg_data->mux_core_main, - parent_names[reg_data->mux_core_main]); + parent_name); ret = -EINVAL; goto free_alt_parent; } diff --git a/drivers/clk/rockchip/clk-ddr.c b/drivers/clk/rockchip/clk-ddr.c index 86718c54e..3c8bcbee2 100644 --- a/drivers/clk/rockchip/clk-ddr.c +++ b/drivers/clk/rockchip/clk-ddr.c @@ -8,10 +8,20 @@ #include #include #include +#include +#include #include #include +#include +#include +#ifdef CONFIG_ARM +#include +#endif + #include "clk.h" +#define MHZ (1000000) + struct rockchip_ddrclk { struct clk_hw hw; void __iomem *reg_base; @@ -21,25 +31,47 @@ struct rockchip_ddrclk { int div_shift; int div_width; int ddr_flag; - spinlock_t *lock; }; #define to_rockchip_ddrclk_hw(hw) container_of(hw, struct rockchip_ddrclk, hw) +struct share_params_ddrclk { + u32 hz; + u32 lcdc_type; +}; + +struct rockchip_ddrclk_data { + void __iomem *params; + int (*dmcfreq_wait_complete)(void); +}; + +static struct rockchip_ddrclk_data ddr_data = {NULL, NULL}; + +void rockchip_set_ddrclk_params(void __iomem *params) +{ + ddr_data.params = params; +} +EXPORT_SYMBOL(rockchip_set_ddrclk_params); + +void rockchip_set_ddrclk_dmcfreq_wait_complete(int (*func)(void)) +{ + ddr_data.dmcfreq_wait_complete = func; +} +EXPORT_SYMBOL(rockchip_set_ddrclk_dmcfreq_wait_complete); + static int rockchip_ddrclk_sip_set_rate(struct clk_hw *hw, unsigned long drate, unsigned long prate) { - struct rockchip_ddrclk *ddrclk = to_rockchip_ddrclk_hw(hw); - unsigned long flags; struct arm_smccc_res res; - spin_lock_irqsave(ddrclk->lock, flags); arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, drate, 0, ROCKCHIP_SIP_CONFIG_DRAM_SET_RATE, 0, 0, 0, 0, &res); - spin_unlock_irqrestore(ddrclk->lock, flags); - return res.a0; + if (res.a0) + return 0; + else + return -EPERM; } static unsigned long @@ -87,18 +119,134 @@ static const struct clk_ops rockchip_ddrclk_sip_ops = { .get_parent = rockchip_ddrclk_get_parent, }; +static u32 ddr_clk_cached; + +static int rockchip_ddrclk_scpi_set_rate(struct clk_hw *hw, unsigned long drate, + unsigned long prate) +{ + u32 ret; + u32 lcdc_type = 0; + struct share_params_ddrclk *p; + + p = (struct share_params_ddrclk *)ddr_data.params; + if (p) + lcdc_type = p->lcdc_type; + + ret = scpi_ddr_set_clk_rate(drate / MHZ, lcdc_type); + if (ret) { + ddr_clk_cached = ret; + ret = 0; + } else { + ddr_clk_cached = 0; + ret = -1; + } + + return ret; +} + +static unsigned long rockchip_ddrclk_scpi_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + if (ddr_clk_cached) + return (MHZ * ddr_clk_cached); + else + return (MHZ * scpi_ddr_get_clk_rate()); +} + +static long rockchip_ddrclk_scpi_round_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long *prate) +{ + rate = rate / MHZ; + rate = (rate / 12) * 12; + + return (rate * MHZ); +} + +static const struct clk_ops rockchip_ddrclk_scpi_ops = { + .recalc_rate = rockchip_ddrclk_scpi_recalc_rate, + .set_rate = rockchip_ddrclk_scpi_set_rate, + .round_rate = rockchip_ddrclk_scpi_round_rate, + .get_parent = rockchip_ddrclk_get_parent, +}; + +static int rockchip_ddrclk_sip_set_rate_v2(struct clk_hw *hw, + unsigned long drate, + unsigned long prate) +{ + struct share_params_ddrclk *p; + struct arm_smccc_res res; + + p = (struct share_params_ddrclk *)ddr_data.params; + if (p) + p->hz = drate; + + res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0, + ROCKCHIP_SIP_CONFIG_DRAM_SET_RATE); + + if ((int)res.a1 == SIP_RET_SET_RATE_TIMEOUT) { + if (ddr_data.dmcfreq_wait_complete) + ddr_data.dmcfreq_wait_complete(); + } + + return res.a0; +} + +static unsigned long rockchip_ddrclk_sip_recalc_rate_v2 + (struct clk_hw *hw, unsigned long parent_rate) +{ + struct arm_smccc_res res; + + res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0, + ROCKCHIP_SIP_CONFIG_DRAM_GET_RATE); + if (!res.a0) + return res.a1; + else + return 0; +} + +static long rockchip_ddrclk_sip_round_rate_v2(struct clk_hw *hw, + unsigned long rate, + unsigned long *prate) +{ + struct share_params_ddrclk *p; + struct arm_smccc_res res; + + p = (struct share_params_ddrclk *)ddr_data.params; + if (p) + p->hz = rate; + + res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0, + ROCKCHIP_SIP_CONFIG_DRAM_ROUND_RATE); + if (!res.a0) + return res.a1; + else + return 0; +} + +static const struct clk_ops rockchip_ddrclk_sip_ops_v2 = { + .recalc_rate = rockchip_ddrclk_sip_recalc_rate_v2, + .set_rate = rockchip_ddrclk_sip_set_rate_v2, + .round_rate = rockchip_ddrclk_sip_round_rate_v2, + .get_parent = rockchip_ddrclk_get_parent, +}; + struct clk *rockchip_clk_register_ddrclk(const char *name, int flags, const char *const *parent_names, u8 num_parents, int mux_offset, int mux_shift, int mux_width, int div_shift, int div_width, - int ddr_flag, void __iomem *reg_base, - spinlock_t *lock) + int ddr_flag, void __iomem *reg_base) { struct rockchip_ddrclk *ddrclk; struct clk_init_data init; struct clk *clk; +#ifdef CONFIG_ARM + if (!psci_smp_available()) + return NULL; +#endif + ddrclk = kzalloc(sizeof(*ddrclk), GFP_KERNEL); if (!ddrclk) return ERR_PTR(-ENOMEM); @@ -114,6 +262,12 @@ struct clk *rockchip_clk_register_ddrclk(const char *name, int flags, case ROCKCHIP_DDRCLK_SIP: init.ops = &rockchip_ddrclk_sip_ops; break; + case ROCKCHIP_DDRCLK_SCPI: + init.ops = &rockchip_ddrclk_scpi_ops; + break; + case ROCKCHIP_DDRCLK_SIP_V2: + init.ops = &rockchip_ddrclk_sip_ops_v2; + break; default: pr_err("%s: unsupported ddrclk type %d\n", __func__, ddr_flag); kfree(ddrclk); @@ -121,7 +275,6 @@ struct clk *rockchip_clk_register_ddrclk(const char *name, int flags, } ddrclk->reg_base = reg_base; - ddrclk->lock = lock; ddrclk->hw.init = &init; ddrclk->mux_offset = mux_offset; ddrclk->mux_shift = mux_shift; diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c index ccd5c270c..b978af08d 100644 --- a/drivers/clk/rockchip/clk-half-divider.c +++ b/drivers/clk/rockchip/clk-half-divider.c @@ -14,9 +14,9 @@ static bool _is_best_half_div(unsigned long rate, unsigned long now, unsigned long best, unsigned long flags) { if (flags & CLK_DIVIDER_ROUND_CLOSEST) - return abs(rate - now) < abs(rate - best); + return abs(rate - now) <= abs(rate - best); - return now <= rate && now > best; + return now <= rate && now >= best; } static unsigned long clk_half_divider_recalc_rate(struct clk_hw *hw, @@ -38,7 +38,7 @@ static int clk_half_divider_bestdiv(struct clk_hw *hw, unsigned long rate, { unsigned int i, bestdiv = 0; unsigned long parent_rate, best = 0, now, maxdiv; - unsigned long parent_rate_saved = *best_parent_rate; + bool is_bestdiv = false; if (!rate) rate = 1; @@ -51,7 +51,7 @@ static int clk_half_divider_bestdiv(struct clk_hw *hw, unsigned long rate, if (bestdiv < 3) bestdiv = 0; else - bestdiv = (bestdiv - 3) / 2; + bestdiv = DIV_ROUND_UP(bestdiv - 3, 2); bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv; return bestdiv; } @@ -63,28 +63,20 @@ static int clk_half_divider_bestdiv(struct clk_hw *hw, unsigned long rate, maxdiv = min(ULONG_MAX / rate, maxdiv); for (i = 0; i <= maxdiv; i++) { - if (((u64)rate * (i * 2 + 3)) == ((u64)parent_rate_saved * 2)) { - /* - * It's the most ideal case if the requested rate can be - * divided from parent clock without needing to change - * parent rate, so return the divider immediately. - */ - *best_parent_rate = parent_rate_saved; - return i; - } parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), ((u64)rate * (i * 2 + 3)) / 2); now = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), (i * 2 + 3)); if (_is_best_half_div(rate, now, best, flags)) { + is_bestdiv = true; bestdiv = i; best = now; *best_parent_rate = parent_rate; } } - if (!bestdiv) { + if (!is_bestdiv) { bestdiv = div_mask(width); *best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), 1); } @@ -114,7 +106,7 @@ static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate, u32 val; value = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), rate); - value = (value - 3) / 2; + value = DIV_ROUND_UP(value - 3, 2); value = min_t(unsigned int, value, div_mask(divider->width)); if (divider->lock) @@ -160,10 +152,10 @@ struct clk *rockchip_clk_register_halfdiv(const char *name, u8 num_parents, void __iomem *base, int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags, - u8 div_shift, u8 div_width, - u8 div_flags, int gate_offset, - u8 gate_shift, u8 gate_flags, - unsigned long flags, + int div_offset, u8 div_shift, + u8 div_width, u8 div_flags, + int gate_offset, u8 gate_shift, + u8 gate_flags, unsigned long flags, spinlock_t *lock) { struct clk_hw *hw = ERR_PTR(-ENOMEM); @@ -205,7 +197,10 @@ struct clk *rockchip_clk_register_halfdiv(const char *name, goto err_div; div->flags = div_flags; - div->reg = base + muxdiv_offset; + if (div_offset) + div->reg = base + div_offset; + else + div->reg = base + muxdiv_offset; div->shift = div_shift; div->width = div_width; div->lock = lock; diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c index d0bd513ff..5687b5d8f 100644 --- a/drivers/clk/rockchip/clk-pll.c +++ b/drivers/clk/rockchip/clk-pll.c @@ -15,6 +15,9 @@ #include #include #include +#include +#include +#include #include "clk.h" #define PLL_MODE_MASK 0x3 @@ -38,15 +41,352 @@ struct rockchip_clk_pll { u8 flags; const struct rockchip_pll_rate_table *rate_table; unsigned int rate_count; + int sel; + unsigned long scaling; spinlock_t *lock; struct rockchip_clk_provider *ctx; + + bool boost_enabled; + u32 boost_backup_pll_usage; + unsigned long boost_backup_pll_rate; + unsigned long boost_low_rate; + unsigned long boost_high_rate; + struct regmap *boost; +#ifdef CONFIG_DEBUG_FS + struct hlist_node debug_node; +#endif }; #define to_rockchip_clk_pll(_hw) container_of(_hw, struct rockchip_clk_pll, hw) #define to_rockchip_clk_pll_nb(nb) \ container_of(nb, struct rockchip_clk_pll, clk_nb) +static void rockchip_boost_disable_low(struct rockchip_clk_pll *pll); + +#define MHZ (1000UL * 1000UL) +#define KHZ (1000UL) + +/* CLK_PLL_TYPE_RK3066_AUTO type ops */ +#define PLL_FREF_MIN (269 * KHZ) +#define PLL_FREF_MAX (2200 * MHZ) + +#define PLL_FVCO_MIN (440 * MHZ) +#define PLL_FVCO_MAX (2200 * MHZ) + +#define PLL_FOUT_MIN (27500 * KHZ) +#define PLL_FOUT_MAX (2200 * MHZ) + +#define PLL_NF_MAX (4096) +#define PLL_NR_MAX (64) +#define PLL_NO_MAX (16) + +/* CLK_PLL_TYPE_RK3036/3366/3399_AUTO type ops */ +#define MIN_FOUTVCO_FREQ (800 * MHZ) +#define MAX_FOUTVCO_FREQ (2000 * MHZ) + +static struct rockchip_pll_rate_table auto_table; +#ifdef CONFIG_DEBUG_FS +static HLIST_HEAD(clk_boost_list); +static DEFINE_MUTEX(clk_boost_lock); +#endif + +int rockchip_pll_clk_adaptive_scaling(struct clk *clk, int sel) +{ + struct clk *parent = clk_get_parent(clk); + struct rockchip_clk_pll *pll; + + if (IS_ERR_OR_NULL(parent)) + return -EINVAL; + + pll = to_rockchip_clk_pll(__clk_get_hw(parent)); + if (!pll) + return -EINVAL; + + pll->sel = sel; + + return 0; +} +EXPORT_SYMBOL(rockchip_pll_clk_adaptive_scaling); + +int rockchip_pll_clk_rate_to_scale(struct clk *clk, unsigned long rate) +{ + const struct rockchip_pll_rate_table *rate_table; + struct clk *parent = clk_get_parent(clk); + struct rockchip_clk_pll *pll; + unsigned int i; + + if (IS_ERR_OR_NULL(parent)) + return -EINVAL; + + pll = to_rockchip_clk_pll(__clk_get_hw(parent)); + if (!pll) + return -EINVAL; + + rate_table = pll->rate_table; + for (i = 0; i < pll->rate_count; i++) { + if (rate >= rate_table[i].rate) + return i; + } + + return -EINVAL; +} +EXPORT_SYMBOL(rockchip_pll_clk_rate_to_scale); + +int rockchip_pll_clk_scale_to_rate(struct clk *clk, unsigned int scale) +{ + const struct rockchip_pll_rate_table *rate_table; + struct clk *parent = clk_get_parent(clk); + struct rockchip_clk_pll *pll; + unsigned int i; + + if (IS_ERR_OR_NULL(parent)) + return -EINVAL; + + pll = to_rockchip_clk_pll(__clk_get_hw(parent)); + if (!pll) + return -EINVAL; + + rate_table = pll->rate_table; + for (i = 0; i < pll->rate_count; i++) { + if (i == scale) + return rate_table[i].rate; + } + + return -EINVAL; +} +EXPORT_SYMBOL(rockchip_pll_clk_scale_to_rate); + +static struct rockchip_pll_rate_table *rk_pll_rate_table_get(void) +{ + return &auto_table; +} + +static int rockchip_pll_clk_set_postdiv(unsigned long fout_hz, + u32 *postdiv1, + u32 *postdiv2, + u32 *foutvco) +{ + unsigned long freq; + + if (fout_hz < MIN_FOUTVCO_FREQ) { + for (*postdiv1 = 1; *postdiv1 <= 7; (*postdiv1)++) { + for (*postdiv2 = 1; *postdiv2 <= 7; (*postdiv2)++) { + freq = fout_hz * (*postdiv1) * (*postdiv2); + if (freq >= MIN_FOUTVCO_FREQ && + freq <= MAX_FOUTVCO_FREQ) { + *foutvco = freq; + return 0; + } + } + } + pr_err("CANNOT FIND postdiv1/2 to make fout in range from 800M to 2000M,fout = %lu\n", + fout_hz); + } else { + *postdiv1 = 1; + *postdiv2 = 1; + } + return 0; +} + +static struct rockchip_pll_rate_table * +rockchip_pll_clk_set_by_auto(struct rockchip_clk_pll *pll, + unsigned long fin_hz, + unsigned long fout_hz) +{ + struct rockchip_pll_rate_table *rate_table = rk_pll_rate_table_get(); + /* FIXME set postdiv1/2 always 1*/ + u32 foutvco = fout_hz; + u64 fin_64, frac_64; + u32 f_frac, postdiv1, postdiv2; + unsigned long clk_gcd = 0; + + if (fin_hz == 0 || fout_hz == 0 || fout_hz == fin_hz) + return NULL; + + rockchip_pll_clk_set_postdiv(fout_hz, &postdiv1, &postdiv2, &foutvco); + rate_table->postdiv1 = postdiv1; + rate_table->postdiv2 = postdiv2; + rate_table->dsmpd = 1; + + if (fin_hz / MHZ * MHZ == fin_hz && fout_hz / MHZ * MHZ == fout_hz) { + fin_hz /= MHZ; + foutvco /= MHZ; + clk_gcd = gcd(fin_hz, foutvco); + rate_table->refdiv = fin_hz / clk_gcd; + rate_table->fbdiv = foutvco / clk_gcd; + + rate_table->frac = 0; + + pr_debug("fin = %lu, fout = %lu, clk_gcd = %lu, refdiv = %u, fbdiv = %u, postdiv1 = %u, postdiv2 = %u, frac = %u\n", + fin_hz, fout_hz, clk_gcd, rate_table->refdiv, + rate_table->fbdiv, rate_table->postdiv1, + rate_table->postdiv2, rate_table->frac); + } else { + pr_debug("frac div running, fin_hz = %lu, fout_hz = %lu, fin_INT_mhz = %lu, fout_INT_mhz = %lu\n", + fin_hz, fout_hz, + fin_hz / MHZ * MHZ, + fout_hz / MHZ * MHZ); + pr_debug("frac get postdiv1 = %u, postdiv2 = %u, foutvco = %u\n", + rate_table->postdiv1, rate_table->postdiv2, foutvco); + clk_gcd = gcd(fin_hz / MHZ, foutvco / MHZ); + rate_table->refdiv = fin_hz / MHZ / clk_gcd; + rate_table->fbdiv = foutvco / MHZ / clk_gcd; + pr_debug("frac get refdiv = %u, fbdiv = %u\n", + rate_table->refdiv, rate_table->fbdiv); + + rate_table->frac = 0; + + f_frac = (foutvco % MHZ); + fin_64 = fin_hz; + do_div(fin_64, (u64)rate_table->refdiv); + frac_64 = (u64)f_frac << 24; + do_div(frac_64, fin_64); + rate_table->frac = (u32)frac_64; + if (rate_table->frac > 0) + rate_table->dsmpd = 0; + pr_debug("frac = %x\n", rate_table->frac); + } + return rate_table; +} + +static struct rockchip_pll_rate_table * +rockchip_rk3066_pll_clk_set_by_auto(struct rockchip_clk_pll *pll, + unsigned long fin_hz, + unsigned long fout_hz) +{ + struct rockchip_pll_rate_table *rate_table = rk_pll_rate_table_get(); + u32 nr, nf, no, nonr; + u32 nr_out, nf_out, no_out; + u32 n; + u32 numerator, denominator; + u64 fref, fvco, fout; + unsigned long clk_gcd = 0; + + nr_out = PLL_NR_MAX + 1; + no_out = 0; + nf_out = 0; + + if (fin_hz == 0 || fout_hz == 0 || fout_hz == fin_hz) + return NULL; + + clk_gcd = gcd(fin_hz, fout_hz); + + numerator = fout_hz / clk_gcd; + denominator = fin_hz / clk_gcd; + + for (n = 1;; n++) { + nf = numerator * n; + nonr = denominator * n; + if (nf > PLL_NF_MAX || nonr > (PLL_NO_MAX * PLL_NR_MAX)) + break; + + for (no = 1; no <= PLL_NO_MAX; no++) { + if (!(no == 1 || !(no % 2))) + continue; + + if (nonr % no) + continue; + nr = nonr / no; + + if (nr > PLL_NR_MAX) + continue; + + fref = fin_hz / nr; + if (fref < PLL_FREF_MIN || fref > PLL_FREF_MAX) + continue; + + fvco = fref * nf; + if (fvco < PLL_FVCO_MIN || fvco > PLL_FVCO_MAX) + continue; + + fout = fvco / no; + if (fout < PLL_FOUT_MIN || fout > PLL_FOUT_MAX) + continue; + + /* select the best from all available PLL settings */ + if ((no > no_out) || + ((no == no_out) && (nr < nr_out))) { + nr_out = nr; + nf_out = nf; + no_out = no; + } + } + } + + /* output the best PLL setting */ + if ((nr_out <= PLL_NR_MAX) && (no_out > 0)) { + rate_table->nr = nr_out; + rate_table->nf = nf_out; + rate_table->no = no_out; + } else { + return NULL; + } + + return rate_table; +} + +static struct rockchip_pll_rate_table * +rockchip_rk3588_pll_clk_set_by_auto(struct rockchip_clk_pll *pll, + unsigned long fin_hz, + unsigned long fout_hz) +{ + struct rockchip_pll_rate_table *rate_table = rk_pll_rate_table_get(); + u64 fvco_min = 2250 * MHZ, fvco_max = 4500 * MHZ; + u64 fout_min = 37 * MHZ, fout_max = 4500 * MHZ; + u32 p, m, s; + u64 fvco, fref, fout, ffrac; + + if (fin_hz == 0 || fout_hz == 0 || fout_hz == fin_hz) + return NULL; + + if (fout_hz > fout_max || fout_hz < fout_min) + return NULL; + + if (fin_hz / MHZ * MHZ == fin_hz && fout_hz / MHZ * MHZ == fout_hz) { + for (s = 0; s <= 6; s++) { + fvco = fout_hz << s; + if (fvco < fvco_min || fvco > fvco_max) + continue; + for (p = 2; p <= 4; p++) { + for (m = 64; m <= 1023; m++) { + if (fvco == m * fin_hz / p) { + rate_table->p = p; + rate_table->m = m; + rate_table->s = s; + rate_table->k = 0; + return rate_table; + } + } + } + } + pr_err("CANNOT FIND Fout by auto,fout = %lu\n", fout_hz); + } else { + fout = (fout_hz / MHZ) * MHZ; + ffrac = (fout_hz % MHZ); + for (s = 0; s <= 6; s++) { + fvco = fout << s; + if (fvco < fvco_min || fvco > fvco_max) + continue; + for (p = 1; p <= 4; p++) { + for (m = 64; m <= 1023; m++) { + if (fvco == m * fin_hz / p) { + rate_table->p = p; + rate_table->m = m; + rate_table->s = s; + fref = fin_hz / p; + fout = (ffrac << s) * 65535; + rate_table->k = fout / fref; + return rate_table; + } + } + } + } + pr_err("CANNOT FIND Fout by auto,fout = %lu\n", fout_hz); + } + return NULL; +} + static const struct rockchip_pll_rate_table *rockchip_get_pll_settings( struct rockchip_clk_pll *pll, unsigned long rate) { @@ -54,28 +394,29 @@ static const struct rockchip_pll_rate_table *rockchip_get_pll_settings( int i; for (i = 0; i < pll->rate_count; i++) { - if (rate == rate_table[i].rate) + if (rate == rate_table[i].rate) { + if (i < pll->sel) { + pll->scaling = rate; + return &rate_table[pll->sel]; + } + pll->scaling = 0; return &rate_table[i]; + } } + pll->scaling = 0; - return NULL; + if (pll->type == pll_rk3066) + return rockchip_rk3066_pll_clk_set_by_auto(pll, 24 * MHZ, rate); + else if (pll->type == pll_rk3588 || pll->type == pll_rk3588_core) + return rockchip_rk3588_pll_clk_set_by_auto(pll, 24 * MHZ, rate); + else + return rockchip_pll_clk_set_by_auto(pll, 24 * MHZ, rate); } static long rockchip_pll_round_rate(struct clk_hw *hw, unsigned long drate, unsigned long *prate) { - struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); - const struct rockchip_pll_rate_table *rate_table = pll->rate_table; - int i; - - /* Assumming rate_table is in descending order */ - for (i = 0; i < pll->rate_count; i++) { - if (drate >= rate_table[i].rate) - return rate_table[i].rate; - } - - /* return minimum supported value */ - return rate_table[i - 1].rate; + return drate; } /* @@ -136,6 +477,30 @@ static int rockchip_rk3036_pll_wait_lock(struct rockchip_clk_pll *pll) return ret; } +static unsigned long +rockchip_rk3036_pll_con_to_rate(struct rockchip_clk_pll *pll, + u32 con0, u32 con1) +{ + unsigned int fbdiv, postdiv1, refdiv, postdiv2; + u64 rate64 = 24000000; + + fbdiv = ((con0 >> RK3036_PLLCON0_FBDIV_SHIFT) & + RK3036_PLLCON0_FBDIV_MASK); + postdiv1 = ((con0 >> RK3036_PLLCON0_POSTDIV1_SHIFT) & + RK3036_PLLCON0_POSTDIV1_MASK); + refdiv = ((con1 >> RK3036_PLLCON1_REFDIV_SHIFT) & + RK3036_PLLCON1_REFDIV_MASK); + postdiv2 = ((con1 >> RK3036_PLLCON1_POSTDIV2_SHIFT) & + RK3036_PLLCON1_POSTDIV2_MASK); + + rate64 *= fbdiv; + do_div(rate64, refdiv); + do_div(rate64, postdiv1); + do_div(rate64, postdiv2); + + return (unsigned long)rate64; +} + static void rockchip_rk3036_pll_get_params(struct rockchip_clk_pll *pll, struct rockchip_pll_rate_table *rate) { @@ -165,7 +530,10 @@ static unsigned long rockchip_rk3036_pll_recalc_rate(struct clk_hw *hw, { struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); struct rockchip_pll_rate_table cur; - u64 rate64 = prate; + u64 rate64 = prate, frac_rate64 = prate; + + if (pll->sel && pll->scaling) + return pll->scaling; rockchip_rk3036_pll_get_params(pll, &cur); @@ -174,7 +542,7 @@ static unsigned long rockchip_rk3036_pll_recalc_rate(struct clk_hw *hw, if (cur.dsmpd == 0) { /* fractional mode */ - u64 frac_rate64 = prate * cur.frac; + frac_rate64 *= cur.frac; do_div(frac_rate64, cur.refdiv); rate64 += frac_rate64 >> 24; @@ -231,6 +599,8 @@ static int rockchip_rk3036_pll_set_params(struct rockchip_clk_pll *pll, pllcon |= rate->frac << RK3036_PLLCON2_FRAC_SHIFT; writel_relaxed(pllcon, pll->reg_base + RK3036_PLLCON(2)); + rockchip_boost_disable_low(pll); + /* wait for the pll to lock */ ret = rockchip_rk3036_pll_wait_lock(pll); if (ret) { @@ -412,6 +782,9 @@ static unsigned long rockchip_rk3066_pll_recalc_rate(struct clk_hw *hw, return prate; } + if (pll->sel && pll->scaling) + return pll->scaling; + rockchip_rk3066_pll_get_params(pll, &cur); rate64 *= cur.nf; @@ -485,9 +858,18 @@ static int rockchip_rk3066_pll_set_rate(struct clk_hw *hw, unsigned long drate, { struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); const struct rockchip_pll_rate_table *rate; + unsigned long old_rate = rockchip_rk3066_pll_recalc_rate(hw, prate); + struct regmap *grf = pll->ctx->grf; + int ret; - pr_debug("%s: changing %s to %lu with a parent rate of %lu\n", - __func__, clk_hw_get_name(hw), drate, prate); + if (IS_ERR(grf)) { + pr_debug("%s: grf regmap not available, aborting rate change\n", + __func__); + return PTR_ERR(grf); + } + + pr_debug("%s: changing %s from %lu to %lu with a parent rate of %lu\n", + __func__, clk_hw_get_name(hw), old_rate, drate, prate); /* Get required rate settings from table */ rate = rockchip_get_pll_settings(pll, drate); @@ -497,7 +879,11 @@ static int rockchip_rk3066_pll_set_rate(struct clk_hw *hw, unsigned long drate, return -EINVAL; } - return rockchip_rk3066_pll_set_params(pll, rate); + ret = rockchip_rk3066_pll_set_params(pll, rate); + if (ret) + pll->scaling = 0; + + return ret; } static int rockchip_rk3066_pll_enable(struct clk_hw *hw) @@ -649,6 +1035,9 @@ static unsigned long rockchip_rk3399_pll_recalc_rate(struct clk_hw *hw, struct rockchip_pll_rate_table cur; u64 rate64 = prate; + if (pll->sel && pll->scaling) + return pll->scaling; + rockchip_rk3399_pll_get_params(pll, &cur); rate64 *= cur.fbdiv; @@ -692,6 +1081,11 @@ static int rockchip_rk3399_pll_set_params(struct rockchip_clk_pll *pll, rate_change_remuxed = 1; } + /* set pll power down */ + writel(HIWORD_UPDATE(RK3399_PLLCON3_PWRDOWN, + RK3399_PLLCON3_PWRDOWN, 0), + pll->reg_base + RK3399_PLLCON(3)); + /* update pll values */ writel_relaxed(HIWORD_UPDATE(rate->fbdiv, RK3399_PLLCON0_FBDIV_MASK, RK3399_PLLCON0_FBDIV_SHIFT), @@ -715,6 +1109,11 @@ static int rockchip_rk3399_pll_set_params(struct rockchip_clk_pll *pll, RK3399_PLLCON3_DSMPD_SHIFT), pll->reg_base + RK3399_PLLCON(3)); + /* set pll power up */ + writel(HIWORD_UPDATE(0, + RK3399_PLLCON3_PWRDOWN, 0), + pll->reg_base + RK3399_PLLCON(3)); + /* wait for the pll to lock */ ret = rockchip_rk3399_pll_wait_lock(pll); if (ret) { @@ -734,9 +1133,11 @@ static int rockchip_rk3399_pll_set_rate(struct clk_hw *hw, unsigned long drate, { struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); const struct rockchip_pll_rate_table *rate; + unsigned long old_rate = rockchip_rk3399_pll_recalc_rate(hw, prate); + int ret; - pr_debug("%s: changing %s to %lu with a parent rate of %lu\n", - __func__, __clk_get_name(hw->clk), drate, prate); + pr_debug("%s: changing %s from %lu to %lu with a parent rate of %lu\n", + __func__, __clk_get_name(hw->clk), old_rate, drate, prate); /* Get required rate settings from table */ rate = rockchip_get_pll_settings(pll, drate); @@ -746,7 +1147,11 @@ static int rockchip_rk3399_pll_set_rate(struct clk_hw *hw, unsigned long drate, return -EINVAL; } - return rockchip_rk3399_pll_set_params(pll, rate); + ret = rockchip_rk3399_pll_set_params(pll, rate); + if (ret) + pll->scaling = 0; + + return ret; } static int rockchip_rk3399_pll_enable(struct clk_hw *hw) @@ -842,6 +1247,307 @@ static const struct clk_ops rockchip_rk3399_pll_clk_ops = { .init = rockchip_rk3399_pll_init, }; +/** + * PLL used in RK3588 + */ + +#define RK3588_PLLCON(i) (i * 0x4) +#define RK3588_PLLCON0_M_MASK 0x3ff +#define RK3588_PLLCON0_M_SHIFT 0 +#define RK3588_PLLCON1_P_MASK 0x3f +#define RK3588_PLLCON1_P_SHIFT 0 +#define RK3588_PLLCON1_S_MASK 0x7 +#define RK3588_PLLCON1_S_SHIFT 6 +#define RK3588_PLLCON2_K_MASK 0xffff +#define RK3588_PLLCON2_K_SHIFT 0 +#define RK3588_PLLCON1_PWRDOWN BIT(13) +#define RK3588_PLLCON6_LOCK_STATUS BIT(15) + +static int rockchip_rk3588_pll_wait_lock(struct rockchip_clk_pll *pll) +{ + u32 pllcon; + int ret; + + /* + * Lock time typical 250, max 500 input clock cycles @24MHz + * So define a very safe maximum of 1000us, meaning 24000 cycles. + */ + ret = readl_relaxed_poll_timeout(pll->reg_base + RK3588_PLLCON(6), + pllcon, + pllcon & RK3588_PLLCON6_LOCK_STATUS, + 0, 1000); + if (ret) + pr_err("%s: timeout waiting for pll to lock\n", __func__); + + return ret; +} + +static void rockchip_rk3588_pll_get_params(struct rockchip_clk_pll *pll, + struct rockchip_pll_rate_table *rate) +{ + u32 pllcon; + + pllcon = readl_relaxed(pll->reg_base + RK3588_PLLCON(0)); + rate->m = ((pllcon >> RK3588_PLLCON0_M_SHIFT) + & RK3588_PLLCON0_M_MASK); + + pllcon = readl_relaxed(pll->reg_base + RK3588_PLLCON(1)); + rate->p = ((pllcon >> RK3588_PLLCON1_P_SHIFT) + & RK3588_PLLCON1_P_MASK); + rate->s = ((pllcon >> RK3588_PLLCON1_S_SHIFT) + & RK3588_PLLCON1_S_MASK); + + pllcon = readl_relaxed(pll->reg_base + RK3588_PLLCON(2)); + rate->k = ((pllcon >> RK3588_PLLCON2_K_SHIFT) + & RK3588_PLLCON2_K_MASK); +} + +static unsigned long rockchip_rk3588_pll_recalc_rate(struct clk_hw *hw, + unsigned long prate) +{ + struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); + struct rockchip_pll_rate_table cur; + u64 rate64 = prate, postdiv; + + if (pll->sel && pll->scaling) + return pll->scaling; + + rockchip_rk3588_pll_get_params(pll, &cur); + + rate64 *= cur.m; + do_div(rate64, cur.p); + + if (cur.k) { + /* fractional mode */ + u64 frac_rate64 = prate * cur.k; + + postdiv = cur.p * 65535; + do_div(frac_rate64, postdiv); + rate64 += frac_rate64; + } + rate64 = rate64 >> cur.s; + + return (unsigned long)rate64; +} + +static int rockchip_rk3588_pll_set_params(struct rockchip_clk_pll *pll, + const struct rockchip_pll_rate_table *rate) +{ + const struct clk_ops *pll_mux_ops = pll->pll_mux_ops; + struct clk_mux *pll_mux = &pll->pll_mux; + struct rockchip_pll_rate_table cur; + int rate_change_remuxed = 0; + int cur_parent; + int ret; + + pr_debug("%s: rate settings for %lu p: %d, m: %d, s: %d, k: %d\n", + __func__, rate->rate, rate->p, rate->m, rate->s, rate->k); + + rockchip_rk3588_pll_get_params(pll, &cur); + cur.rate = 0; + + if (pll->type == pll_rk3588) { + cur_parent = pll_mux_ops->get_parent(&pll_mux->hw); + if (cur_parent == PLL_MODE_NORM) { + pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_SLOW); + rate_change_remuxed = 1; + } + } + + /* set pll power down */ + writel(HIWORD_UPDATE(RK3588_PLLCON1_PWRDOWN, + RK3588_PLLCON1_PWRDOWN, 0), + pll->reg_base + RK3399_PLLCON(1)); + + /* update pll values */ + writel_relaxed(HIWORD_UPDATE(rate->m, RK3588_PLLCON0_M_MASK, + RK3588_PLLCON0_M_SHIFT), + pll->reg_base + RK3399_PLLCON(0)); + + writel_relaxed(HIWORD_UPDATE(rate->p, RK3588_PLLCON1_P_MASK, + RK3588_PLLCON1_P_SHIFT) | + HIWORD_UPDATE(rate->s, RK3588_PLLCON1_S_MASK, + RK3588_PLLCON1_S_SHIFT), + pll->reg_base + RK3399_PLLCON(1)); + + writel_relaxed(HIWORD_UPDATE(rate->k, RK3588_PLLCON2_K_MASK, + RK3588_PLLCON2_K_SHIFT), + pll->reg_base + RK3399_PLLCON(2)); + + /* set pll power up */ + writel(HIWORD_UPDATE(0, + RK3588_PLLCON1_PWRDOWN, 0), + pll->reg_base + RK3588_PLLCON(1)); + + /* wait for the pll to lock */ + ret = rockchip_rk3588_pll_wait_lock(pll); + if (ret) { + pr_warn("%s: pll update unsuccessful, trying to restore old params\n", + __func__); + rockchip_rk3588_pll_set_params(pll, &cur); + } + + if ((pll->type == pll_rk3588) && rate_change_remuxed) + pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_NORM); + + return ret; +} + +static int rockchip_rk3588_pll_set_rate(struct clk_hw *hw, unsigned long drate, + unsigned long prate) +{ + struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); + const struct rockchip_pll_rate_table *rate; + unsigned long old_rate = rockchip_rk3588_pll_recalc_rate(hw, prate); + int ret; + + pr_debug("%s: changing %s from %lu to %lu with a parent rate of %lu\n", + __func__, __clk_get_name(hw->clk), old_rate, drate, prate); + + /* Get required rate settings from table */ + rate = rockchip_get_pll_settings(pll, drate); + if (!rate) { + pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__, + drate, __clk_get_name(hw->clk)); + return -EINVAL; + } + + ret = rockchip_rk3588_pll_set_params(pll, rate); + if (ret) + pll->scaling = 0; + + return ret; +} + +static int rockchip_rk3588_pll_enable(struct clk_hw *hw) +{ + struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); + + writel(HIWORD_UPDATE(0, RK3588_PLLCON1_PWRDOWN, 0), + pll->reg_base + RK3588_PLLCON(1)); + rockchip_rk3588_pll_wait_lock(pll); + + return 0; +} + +static void rockchip_rk3588_pll_disable(struct clk_hw *hw) +{ + struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); + + writel(HIWORD_UPDATE(RK3588_PLLCON1_PWRDOWN, + RK3588_PLLCON1_PWRDOWN, 0), + pll->reg_base + RK3588_PLLCON(1)); +} + +static int rockchip_rk3588_pll_is_enabled(struct clk_hw *hw) +{ + struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); + u32 pllcon = readl(pll->reg_base + RK3588_PLLCON(1)); + + return !(pllcon & RK3588_PLLCON1_PWRDOWN); +} + +static int rockchip_rk3588_pll_init(struct clk_hw *hw) +{ + struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); + + if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE)) + return 0; + + return 0; +} + +static const struct clk_ops rockchip_rk3588_pll_clk_norate_ops = { + .recalc_rate = rockchip_rk3588_pll_recalc_rate, + .enable = rockchip_rk3588_pll_enable, + .disable = rockchip_rk3588_pll_disable, + .is_enabled = rockchip_rk3588_pll_is_enabled, +}; + +static const struct clk_ops rockchip_rk3588_pll_clk_ops = { + .recalc_rate = rockchip_rk3588_pll_recalc_rate, + .round_rate = rockchip_pll_round_rate, + .set_rate = rockchip_rk3588_pll_set_rate, + .enable = rockchip_rk3588_pll_enable, + .disable = rockchip_rk3588_pll_disable, + .is_enabled = rockchip_rk3588_pll_is_enabled, + .init = rockchip_rk3588_pll_init, +}; + +#ifdef CONFIG_ROCKCHIP_CLK_COMPENSATION +int rockchip_pll_clk_compensation(struct clk *clk, int ppm) +{ + struct clk *parent = clk_get_parent(clk); + struct rockchip_clk_pll *pll; + static u32 frac, fbdiv; + bool negative; + u32 pllcon, pllcon0, pllcon2, fbdiv_mask, frac_mask, frac_shift; + u64 fracdiv, m, n; + + if ((ppm > 1000) || (ppm < -1000)) + return -EINVAL; + + if (IS_ERR_OR_NULL(parent)) + return -EINVAL; + + pll = to_rockchip_clk_pll(__clk_get_hw(parent)); + if (!pll) + return -EINVAL; + + switch (pll->type) { + case pll_rk3036: + case pll_rk3328: + pllcon0 = RK3036_PLLCON(0); + pllcon2 = RK3036_PLLCON(2); + fbdiv_mask = RK3036_PLLCON0_FBDIV_MASK; + frac_mask = RK3036_PLLCON2_FRAC_MASK; + frac_shift = RK3036_PLLCON2_FRAC_SHIFT; + break; + case pll_rk3066: + return -EINVAL; + case pll_rk3399: + pllcon0 = RK3399_PLLCON(0); + pllcon2 = RK3399_PLLCON(2); + fbdiv_mask = RK3399_PLLCON0_FBDIV_MASK; + frac_mask = RK3399_PLLCON2_FRAC_MASK; + frac_shift = RK3399_PLLCON2_FRAC_SHIFT; + break; + default: + return -EINVAL; + } + + negative = !!(ppm & BIT(31)); + ppm = negative ? ~ppm + 1 : ppm; + + if (!frac) { + frac = readl_relaxed(pll->reg_base + pllcon2) & frac_mask; + fbdiv = readl_relaxed(pll->reg_base + pllcon0) & fbdiv_mask; + } + + /* + * delta frac frac ppm + * -------------- = (fbdiv + ----------) * --------- + * 1 << 24 1 << 24 1000000 + * + */ + m = div64_u64((uint64_t)frac * ppm, 1000000); + n = div64_u64((uint64_t)ppm << 24, 1000000) * fbdiv; + + fracdiv = negative ? frac - (m + n) : frac + (m + n); + + if (!frac || fracdiv > frac_mask) + return -EINVAL; + + pllcon = readl_relaxed(pll->reg_base + pllcon2); + pllcon &= ~(frac_mask << frac_shift); + pllcon |= fracdiv << frac_shift; + writel_relaxed(pllcon, pll->reg_base + pllcon2); + + return 0; +} +EXPORT_SYMBOL(rockchip_pll_clk_compensation); +#endif + /* * Common registering of pll clocks */ @@ -890,7 +1596,8 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx, if (pll_type == pll_rk3036 || pll_type == pll_rk3066 || pll_type == pll_rk3328 || - pll_type == pll_rk3399) + pll_type == pll_rk3399 || + pll_type == pll_rk3588) pll_mux->flags |= CLK_MUX_HIWORD_MASK; /* the actual muxing is xin24m, pll-output, xin32k */ @@ -914,8 +1621,12 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx, /* now create the actual pll */ init.name = pll_name; +#ifndef CONFIG_ROCKCHIP_LOW_PERFORMANCE /* keep all plls untouched for now */ init.flags = flags | CLK_IGNORE_UNUSED; +#else + init.flags = flags; +#endif init.parent_names = &parent_names[0]; init.num_parents = 1; @@ -940,7 +1651,7 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx, switch (pll_type) { case pll_rk3036: case pll_rk3328: - if (!pll->rate_table) + if (!pll->rate_table || IS_ERR(ctx->grf)) init.ops = &rockchip_rk3036_pll_clk_norate_ops; else init.ops = &rockchip_rk3036_pll_clk_ops; @@ -957,6 +1668,14 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx, else init.ops = &rockchip_rk3399_pll_clk_ops; break; + case pll_rk3588: + case pll_rk3588_core: + if (!pll->rate_table) + init.ops = &rockchip_rk3588_pll_clk_norate_ops; + else + init.ops = &rockchip_rk3588_pll_clk_ops; + init.flags = flags; + break; default: pr_warn("%s: Unknown pll type for pll clk %s\n", __func__, name); @@ -988,3 +1707,316 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx, kfree(pll); return mux_clk; } + +static unsigned long rockchip_pll_con_to_rate(struct rockchip_clk_pll *pll, + u32 con0, u32 con1) +{ + switch (pll->type) { + case pll_rk3036: + case pll_rk3328: + return rockchip_rk3036_pll_con_to_rate(pll, con0, con1); + case pll_rk3066: + break; + case pll_rk3399: + break; + default: + pr_warn("%s: Unknown pll type\n", __func__); + } + + return 0; +} + +void rockchip_boost_init(struct clk_hw *hw) +{ + struct rockchip_clk_pll *pll; + struct device_node *np; + u32 value, con0, con1; + + if (!hw) + return; + pll = to_rockchip_clk_pll(hw); + np = of_parse_phandle(pll->ctx->cru_node, "rockchip,boost", 0); + if (!np) { + pr_debug("%s: failed to get boost np\n", __func__); + return; + } + pll->boost = syscon_node_to_regmap(np); + if (IS_ERR(pll->boost)) { + pr_debug("%s: failed to get boost regmap\n", __func__); + return; + } + + if (!of_property_read_u32(np, "rockchip,boost-low-con0", &con0) && + !of_property_read_u32(np, "rockchip,boost-low-con1", &con1)) { + pr_debug("boost-low-con=0x%x 0x%x\n", con0, con1); + regmap_write(pll->boost, BOOST_PLL_L_CON(0), + HIWORD_UPDATE(con0, BOOST_PLL_CON_MASK, 0)); + regmap_write(pll->boost, BOOST_PLL_L_CON(1), + HIWORD_UPDATE(con1, BOOST_PLL_CON_MASK, 0)); + pll->boost_low_rate = rockchip_pll_con_to_rate(pll, con0, + con1); + pr_debug("boost-low-rate=%lu\n", pll->boost_low_rate); + } + if (!of_property_read_u32(np, "rockchip,boost-high-con0", &con0) && + !of_property_read_u32(np, "rockchip,boost-high-con1", &con1)) { + pr_debug("boost-high-con=0x%x 0x%x\n", con0, con1); + regmap_write(pll->boost, BOOST_PLL_H_CON(0), + HIWORD_UPDATE(con0, BOOST_PLL_CON_MASK, 0)); + regmap_write(pll->boost, BOOST_PLL_H_CON(1), + HIWORD_UPDATE(con1, BOOST_PLL_CON_MASK, 0)); + pll->boost_high_rate = rockchip_pll_con_to_rate(pll, con0, + con1); + pr_debug("boost-high-rate=%lu\n", pll->boost_high_rate); + } + if (!of_property_read_u32(np, "rockchip,boost-backup-pll", &value)) { + pr_debug("boost-backup-pll=0x%x\n", value); + regmap_write(pll->boost, BOOST_CLK_CON, + HIWORD_UPDATE(value, BOOST_BACKUP_PLL_MASK, + BOOST_BACKUP_PLL_SHIFT)); + } + if (!of_property_read_u32(np, "rockchip,boost-backup-pll-usage", + &pll->boost_backup_pll_usage)) { + pr_debug("boost-backup-pll-usage=0x%x\n", + pll->boost_backup_pll_usage); + regmap_write(pll->boost, BOOST_CLK_CON, + HIWORD_UPDATE(pll->boost_backup_pll_usage, + BOOST_BACKUP_PLL_USAGE_MASK, + BOOST_BACKUP_PLL_USAGE_SHIFT)); + } + if (!of_property_read_u32(np, "rockchip,boost-switch-threshold", + &value)) { + pr_debug("boost-switch-threshold=0x%x\n", value); + regmap_write(pll->boost, BOOST_SWITCH_THRESHOLD, value); + } + if (!of_property_read_u32(np, "rockchip,boost-statis-threshold", + &value)) { + pr_debug("boost-statis-threshold=0x%x\n", value); + regmap_write(pll->boost, BOOST_STATIS_THRESHOLD, value); + } + if (!of_property_read_u32(np, "rockchip,boost-statis-enable", + &value)) { + pr_debug("boost-statis-enable=0x%x\n", value); + regmap_write(pll->boost, BOOST_BOOST_CON, + HIWORD_UPDATE(value, BOOST_STATIS_ENABLE_MASK, + BOOST_STATIS_ENABLE_SHIFT)); + } + if (!of_property_read_u32(np, "rockchip,boost-enable", &value)) { + pr_debug("boost-enable=0x%x\n", value); + regmap_write(pll->boost, BOOST_BOOST_CON, + HIWORD_UPDATE(value, BOOST_ENABLE_MASK, + BOOST_ENABLE_SHIFT)); + if (value) + pll->boost_enabled = true; + } +#ifdef CONFIG_DEBUG_FS + if (pll->boost_enabled) { + mutex_lock(&clk_boost_lock); + hlist_add_head(&pll->debug_node, &clk_boost_list); + mutex_unlock(&clk_boost_lock); + } +#endif +} + +void rockchip_boost_enable_recovery_sw_low(struct clk_hw *hw) +{ + struct rockchip_clk_pll *pll; + unsigned int val; + + if (!hw) + return; + pll = to_rockchip_clk_pll(hw); + if (!pll->boost_enabled) + return; + + regmap_write(pll->boost, BOOST_BOOST_CON, + HIWORD_UPDATE(1, BOOST_RECOVERY_MASK, + BOOST_RECOVERY_SHIFT)); + do { + regmap_read(pll->boost, BOOST_FSM_STATUS, &val); + } while (!(val & BOOST_BUSY_STATE)); + + regmap_write(pll->boost, BOOST_BOOST_CON, + HIWORD_UPDATE(1, BOOST_SW_CTRL_MASK, + BOOST_SW_CTRL_SHIFT) | + HIWORD_UPDATE(1, BOOST_LOW_FREQ_EN_MASK, + BOOST_LOW_FREQ_EN_SHIFT)); +} + +static void rockchip_boost_disable_low(struct rockchip_clk_pll *pll) +{ + if (!pll->boost_enabled) + return; + + regmap_write(pll->boost, BOOST_BOOST_CON, + HIWORD_UPDATE(0, BOOST_LOW_FREQ_EN_MASK, + BOOST_LOW_FREQ_EN_SHIFT)); +} + +void rockchip_boost_disable_recovery_sw(struct clk_hw *hw) +{ + struct rockchip_clk_pll *pll; + + if (!hw) + return; + pll = to_rockchip_clk_pll(hw); + if (!pll->boost_enabled) + return; + + regmap_write(pll->boost, BOOST_BOOST_CON, + HIWORD_UPDATE(0, BOOST_RECOVERY_MASK, + BOOST_RECOVERY_SHIFT)); + regmap_write(pll->boost, BOOST_BOOST_CON, + HIWORD_UPDATE(0, BOOST_SW_CTRL_MASK, + BOOST_SW_CTRL_SHIFT)); +} + +void rockchip_boost_add_core_div(struct clk_hw *hw, unsigned long prate) +{ + struct rockchip_clk_pll *pll; + unsigned int div; + + if (!hw) + return; + pll = to_rockchip_clk_pll(hw); + if (!pll->boost_enabled || pll->boost_backup_pll_rate == prate) + return; + + /* todo */ + if (pll->boost_backup_pll_usage == BOOST_BACKUP_PLL_USAGE_TARGET) + return; + /* + * cpu clock rate should be less than or equal to + * low rate when change pll rate in boost module + */ + if (pll->boost_low_rate && prate > pll->boost_low_rate) { + div = DIV_ROUND_UP(prate, pll->boost_low_rate) - 1; + regmap_write(pll->boost, BOOST_CLK_CON, + HIWORD_UPDATE(div, BOOST_CORE_DIV_MASK, + BOOST_CORE_DIV_SHIFT)); + pll->boost_backup_pll_rate = prate; + } +} + +#ifdef CONFIG_DEBUG_FS +#include + +#ifndef MODULE +static int boost_summary_show(struct seq_file *s, void *data) +{ + struct rockchip_clk_pll *pll = (struct rockchip_clk_pll *)s->private; + u32 boost_count = 0; + u32 freq_cnt0 = 0, freq_cnt1 = 0; + u64 freq_cnt = 0, high_freq_time = 0; + u32 short_count = 0, short_threshold = 0; + u32 interval_time = 0; + + seq_puts(s, " device boost_count high_freq_count high_freq_time short_count short_threshold interval_count\n"); + seq_puts(s, "------------------------------------------------------------------------------------------------------\n"); + seq_printf(s, " %s\n", clk_hw_get_name(&pll->hw)); + + regmap_read(pll->boost, BOOST_SWITCH_CNT, &boost_count); + + regmap_read(pll->boost, BOOST_HIGH_PERF_CNT0, &freq_cnt0); + regmap_read(pll->boost, BOOST_HIGH_PERF_CNT1, &freq_cnt1); + freq_cnt = ((u64)freq_cnt1 << 32) + (u64)freq_cnt0; + high_freq_time = freq_cnt; + do_div(high_freq_time, 24); + + regmap_read(pll->boost, BOOST_SHORT_SWITCH_CNT, &short_count); + regmap_read(pll->boost, BOOST_STATIS_THRESHOLD, &short_threshold); + regmap_read(pll->boost, BOOST_SWITCH_THRESHOLD, &interval_time); + + seq_printf(s, "%22u %17llu %15llu %12u %16u %15u\n", + boost_count, freq_cnt, high_freq_time, short_count, + short_threshold, interval_time); + + return 0; +} + +static int boost_summary_open(struct inode *inode, struct file *file) +{ + return single_open(file, boost_summary_show, inode->i_private); +} + +static const struct file_operations boost_summary_fops = { + .open = boost_summary_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int boost_config_show(struct seq_file *s, void *data) +{ + struct rockchip_clk_pll *pll = (struct rockchip_clk_pll *)s->private; + + seq_printf(s, "boost_enabled: %d\n", pll->boost_enabled); + seq_printf(s, "boost_low_rate: %lu\n", pll->boost_low_rate); + seq_printf(s, "boost_high_rate: %lu\n", pll->boost_high_rate); + + return 0; +} + +static int boost_config_open(struct inode *inode, struct file *file) +{ + return single_open(file, boost_config_show, inode->i_private); +} + +static const struct file_operations boost_config_fops = { + .open = boost_config_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int boost_debug_create_one(struct rockchip_clk_pll *pll, + struct dentry *rootdir) +{ + struct dentry *pdentry, *d; + + pdentry = debugfs_lookup(clk_hw_get_name(&pll->hw), rootdir); + if (!pdentry) { + pr_err("%s: failed to lookup %s dentry\n", __func__, + clk_hw_get_name(&pll->hw)); + return -ENOMEM; + } + + d = debugfs_create_file("boost_summary", 0444, pdentry, + pll, &boost_summary_fops); + if (!d) { + pr_err("%s: failed to create boost_summary file\n", __func__); + return -ENOMEM; + } + + d = debugfs_create_file("boost_config", 0444, pdentry, + pll, &boost_config_fops); + if (!d) { + pr_err("%s: failed to create boost config file\n", __func__); + return -ENOMEM; + } + + return 0; +} + +static int __init boost_debug_init(void) +{ + struct rockchip_clk_pll *pll; + struct dentry *rootdir; + + rootdir = debugfs_lookup("clk", NULL); + if (!rootdir) { + pr_err("%s: failed to lookup clk dentry\n", __func__); + return -ENOMEM; + } + + mutex_lock(&clk_boost_lock); + + hlist_for_each_entry(pll, &clk_boost_list, debug_node) + boost_debug_create_one(pll, rootdir); + + mutex_unlock(&clk_boost_lock); + + return 0; +} +late_initcall(boost_debug_init); +#endif /* MODULE */ +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c index b443169dd..6c8e47067 100644 --- a/drivers/clk/rockchip/clk.c +++ b/drivers/clk/rockchip/clk.c @@ -38,6 +38,7 @@ static struct clk *rockchip_clk_register_branch(const char *name, const char *const *parent_names, u8 num_parents, void __iomem *base, int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags, + u32 *mux_table, int div_offset, u8 div_shift, u8 div_width, u8 div_flags, struct clk_div_table *div_table, int gate_offset, u8 gate_shift, u8 gate_flags, unsigned long flags, @@ -60,6 +61,7 @@ static struct clk *rockchip_clk_register_branch(const char *name, mux->shift = mux_shift; mux->mask = BIT(mux_width) - 1; mux->flags = mux_flags; + mux->table = mux_table; mux->lock = lock; mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops : &clk_mux_ops; @@ -182,12 +184,43 @@ static void rockchip_fractional_approximation(struct clk_hw *hw, unsigned long p_rate, p_parent_rate; struct clk_hw *p_parent; unsigned long scale; + u32 div; p_rate = clk_hw_get_rate(clk_hw_get_parent(hw)); - if ((rate * 20 > p_rate) && (p_rate % rate != 0)) { + if (((rate * 20 > p_rate) && (p_rate % rate != 0)) || + (fd->max_prate && fd->max_prate < p_rate)) { p_parent = clk_hw_get_parent(clk_hw_get_parent(hw)); - p_parent_rate = clk_hw_get_rate(p_parent); - *parent_rate = p_parent_rate; + if (!p_parent) { + *parent_rate = p_rate; + } else { + p_parent_rate = clk_hw_get_rate(p_parent); + *parent_rate = p_parent_rate; + if (fd->max_prate && p_parent_rate > fd->max_prate) { + div = DIV_ROUND_UP(p_parent_rate, + fd->max_prate); + *parent_rate = p_parent_rate / div; + } + } + + if (*parent_rate < rate * 20) { + /* + * Fractional frequency divider to do + * integer frequency divider does not + * need 20 times the limit. + */ + if (!(*parent_rate % rate)) { + *m = 1; + *n = *parent_rate / rate; + return; + } else if (!(fd->flags & CLK_FRAC_DIVIDER_NO_LIMIT)) { + pr_warn("%s p_rate(%ld) is low than rate(%ld)*20, use integer or half-div\n", + clk_hw_get_name(hw), + *parent_rate, rate); + *m = 0; + *n = 1; + return; + } + } } /* @@ -210,7 +243,7 @@ static struct clk *rockchip_clk_register_frac_branch( void __iomem *base, int muxdiv_offset, u8 div_flags, int gate_offset, u8 gate_shift, u8 gate_flags, unsigned long flags, struct rockchip_clk_branch *child, - spinlock_t *lock) + unsigned long max_prate, spinlock_t *lock) { struct clk_hw *hw; struct rockchip_clk_frac *frac; @@ -251,6 +284,7 @@ static struct clk *rockchip_clk_register_frac_branch( div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift; div->lock = lock; div->approximation = rockchip_fractional_approximation; + div->max_prate = max_prate; div_ops = &clk_fractional_divider_ops; hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, @@ -278,6 +312,8 @@ static struct clk *rockchip_clk_register_frac_branch( frac_mux->shift = child->mux_shift; frac_mux->mask = BIT(child->mux_width) - 1; frac_mux->flags = child->mux_flags; + if (child->mux_table) + frac_mux->table = child->mux_table; frac_mux->lock = lock; frac_mux->hw.init = &init; @@ -360,6 +396,61 @@ static struct clk *rockchip_clk_register_factor_branch(const char *name, return hw->clk; } +static struct clk *rockchip_clk_register_composite_brother_branch( + struct rockchip_clk_provider *ctx, const char *name, + const char *const *parent_names, u8 num_parents, + void __iomem *base, int muxdiv_offset, u8 mux_shift, + u8 mux_width, u8 mux_flags, u32 *mux_table, + int div_offset, u8 div_shift, u8 div_width, u8 div_flags, + struct clk_div_table *div_table, int gate_offset, + u8 gate_shift, u8 gate_flags, unsigned long flags, + struct rockchip_clk_branch *brother, spinlock_t *lock) +{ + struct clk *clk, *brother_clk; + struct clk_composite *composite, *brother_composite; + struct clk_hw *hw, *brother_hw; + + if (brother && brother->branch_type != branch_half_divider) { + pr_err("%s: composite brother for %s can only be a halfdiv\n", + __func__, name); + return ERR_PTR(-EINVAL); + } + + clk = rockchip_clk_register_branch(name, parent_names, num_parents, + base, muxdiv_offset, mux_shift, + mux_width, mux_flags, mux_table, + div_offset, div_shift, div_width, + div_flags, div_table, + gate_offset, gate_shift, gate_flags, + flags, lock); + if (IS_ERR(clk)) + return clk; + + brother_clk = rockchip_clk_register_halfdiv(brother->name, + brother->parent_names, brother->num_parents, + base, brother->muxdiv_offset, + brother->mux_shift, brother->mux_width, + brother->mux_flags, brother->div_offset, + brother->div_shift, brother->div_width, + brother->div_flags, brother->gate_offset, + brother->gate_shift, brother->gate_flags, + flags, lock); + if (IS_ERR(brother_clk)) + return brother_clk; + rockchip_clk_add_lookup(ctx, brother_clk, brother->id); + + hw = __clk_get_hw(clk); + brother_hw = __clk_get_hw(brother_clk); + if (hw && brother_hw) { + composite = to_clk_composite(hw); + brother_composite = to_clk_composite(brother_hw); + composite->brother_hw = brother_hw; + brother_composite->brother_hw = hw; + } + + return clk; +} + struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np, void __iomem *base, unsigned long nr_clks) @@ -387,6 +478,8 @@ struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np, ctx->grf = syscon_regmap_lookup_by_phandle(ctx->cru_node, "rockchip,grf"); + ctx->pmugrf = syscon_regmap_lookup_by_phandle(ctx->cru_node, + "rockchip,pmugrf"); return ctx; @@ -452,11 +545,22 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx, /* catch simple muxes */ switch (list->branch_type) { case branch_mux: - clk = clk_register_mux(NULL, list->name, - list->parent_names, list->num_parents, - flags, ctx->reg_base + list->muxdiv_offset, - list->mux_shift, list->mux_width, - list->mux_flags, &ctx->lock); + if (list->mux_table) + clk = clk_register_mux_table(NULL, list->name, + list->parent_names, list->num_parents, + flags, + ctx->reg_base + list->muxdiv_offset, + list->mux_shift, + BIT(list->mux_width) - 1, + list->mux_flags, list->mux_table, + &ctx->lock); + else + clk = clk_register_mux(NULL, list->name, + list->parent_names, list->num_parents, + flags, + ctx->reg_base + list->muxdiv_offset, + list->mux_shift, list->mux_width, + list->mux_flags, &ctx->lock); break; case branch_muxgrf: clk = rockchip_clk_register_muxgrf(list->name, @@ -465,6 +569,13 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx, list->mux_shift, list->mux_width, list->mux_flags); break; + case branch_muxpmugrf: + clk = rockchip_clk_register_muxgrf(list->name, + list->parent_names, list->num_parents, + flags, ctx->pmugrf, list->muxdiv_offset, + list->mux_shift, list->mux_width, + list->mux_flags); + break; case branch_divider: if (list->div_table) clk = clk_register_divider_table(NULL, @@ -488,17 +599,18 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx, list->div_flags, list->gate_offset, list->gate_shift, list->gate_flags, flags, list->child, - &ctx->lock); + list->max_prate, &ctx->lock); break; case branch_half_divider: clk = rockchip_clk_register_halfdiv(list->name, list->parent_names, list->num_parents, ctx->reg_base, list->muxdiv_offset, list->mux_shift, list->mux_width, - list->mux_flags, list->div_shift, - list->div_width, list->div_flags, - list->gate_offset, list->gate_shift, - list->gate_flags, flags, &ctx->lock); + list->mux_flags, list->div_offset, + list->div_shift, list->div_width, + list->div_flags, list->gate_offset, + list->gate_shift, list->gate_flags, + flags, &ctx->lock); break; case branch_gate: flags |= CLK_SET_RATE_PARENT; @@ -514,11 +626,25 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx, ctx->reg_base, list->muxdiv_offset, list->mux_shift, list->mux_width, list->mux_flags, - list->div_offset, list->div_shift, list->div_width, + list->mux_table, list->div_offset, + list->div_shift, list->div_width, list->div_flags, list->div_table, list->gate_offset, list->gate_shift, list->gate_flags, flags, &ctx->lock); break; + case branch_composite_brother: + clk = rockchip_clk_register_composite_brother_branch( + ctx, list->name, list->parent_names, + list->num_parents, ctx->reg_base, + list->muxdiv_offset, list->mux_shift, + list->mux_width, list->mux_flags, + list->mux_table, list->div_offset, + list->div_shift, list->div_width, + list->div_flags, list->div_table, + list->gate_offset, list->gate_shift, + list->gate_flags, flags, list->child, + &ctx->lock); + break; case branch_mmc: clk = rockchip_clk_register_mmc( list->name, @@ -549,7 +675,17 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx, list->muxdiv_offset, list->mux_shift, list->mux_width, list->div_shift, list->div_width, list->div_flags, - ctx->reg_base, &ctx->lock); + ctx->reg_base); + break; + case branch_dclk_divider: + clk = rockchip_clk_register_dclk_branch(list->name, + list->parent_names, list->num_parents, + ctx->reg_base, list->muxdiv_offset, list->mux_shift, + list->mux_width, list->mux_flags, + list->div_offset, list->div_shift, list->div_width, + list->div_flags, list->div_table, + list->gate_offset, list->gate_shift, + list->gate_flags, flags, list->max_prate, &ctx->lock); break; } @@ -573,15 +709,17 @@ EXPORT_SYMBOL_GPL(rockchip_clk_register_branches); void rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx, unsigned int lookup_id, - const char *name, const char *const *parent_names, + const char *name, u8 num_parents, + struct clk *parent, struct clk *alt_parent, const struct rockchip_cpuclk_reg_data *reg_data, const struct rockchip_cpuclk_rate_table *rates, int nrates) { struct clk *clk; - clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents, + clk = rockchip_clk_register_cpuclk(name, num_parents, + parent, alt_parent, reg_data, rates, nrates, ctx->reg_base, &ctx->lock); if (IS_ERR(clk)) { @@ -594,20 +732,20 @@ void rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx, } EXPORT_SYMBOL_GPL(rockchip_clk_register_armclk); -void rockchip_clk_protect_critical(const char *const clocks[], - int nclocks) -{ - int i; - - /* Protect the clocks that needs to stay on */ - for (i = 0; i < nclocks; i++) { - struct clk *clk = __clk_lookup(clocks[i]); +void (*rk_dump_cru)(void); +EXPORT_SYMBOL(rk_dump_cru); - if (clk) - clk_prepare_enable(clk); - } +static int rk_clk_panic(struct notifier_block *this, + unsigned long ev, void *ptr) +{ + if (rk_dump_cru) + rk_dump_cru(); + return NOTIFY_DONE; } -EXPORT_SYMBOL_GPL(rockchip_clk_protect_critical); + +static struct notifier_block rk_clk_panic_block = { + .notifier_call = rk_clk_panic, +}; static void __iomem *rst_base; static unsigned int reg_restart; @@ -641,5 +779,7 @@ rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx, if (ret) pr_err("%s: cannot register restart handler, %d\n", __func__, ret); + atomic_notifier_chain_register(&panic_notifier_list, + &rk_clk_panic_block); } EXPORT_SYMBOL_GPL(rockchip_register_restart_notifier); diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h index 2271a8412..26cf2553c 100644 --- a/drivers/clk/rockchip/clk.h +++ b/drivers/clk/rockchip/clk.h @@ -37,12 +37,25 @@ struct clk; #define BOOST_SWITCH_THRESHOLD 0x0024 #define BOOST_FSM_STATUS 0x0028 #define BOOST_PLL_L_CON(x) ((x) * 0x4 + 0x2c) +#define BOOST_PLL_CON_MASK 0xffff +#define BOOST_CORE_DIV_MASK 0x1f +#define BOOST_CORE_DIV_SHIFT 0 +#define BOOST_BACKUP_PLL_MASK 0x3 +#define BOOST_BACKUP_PLL_SHIFT 8 +#define BOOST_BACKUP_PLL_USAGE_MASK 0x1 +#define BOOST_BACKUP_PLL_USAGE_SHIFT 12 +#define BOOST_BACKUP_PLL_USAGE_BORROW 0 +#define BOOST_BACKUP_PLL_USAGE_TARGET 1 +#define BOOST_ENABLE_MASK 0x1 +#define BOOST_ENABLE_SHIFT 0 #define BOOST_RECOVERY_MASK 0x1 #define BOOST_RECOVERY_SHIFT 1 #define BOOST_SW_CTRL_MASK 0x1 #define BOOST_SW_CTRL_SHIFT 2 #define BOOST_LOW_FREQ_EN_MASK 0x1 #define BOOST_LOW_FREQ_EN_SHIFT 3 +#define BOOST_STATIS_ENABLE_MASK 0x1 +#define BOOST_STATIS_ENABLE_SHIFT 4 #define BOOST_BUSY_STATE BIT(8) #define PX30_PLL_CON(x) ((x) * 0x4) @@ -79,6 +92,51 @@ struct clk; #define RV1108_EMMC_CON0 0x1e8 #define RV1108_EMMC_CON1 0x1ec +#define RV1126_PMU_MODE 0x0 +#define RV1126_PMU_PLL_CON(x) ((x) * 0x4 + 0x10) +#define RV1126_PMU_CLKSEL_CON(x) ((x) * 0x4 + 0x100) +#define RV1126_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x180) +#define RV1126_PMU_SOFTRST_CON(x) ((x) * 0x4 + 0x200) +#define RV1126_PLL_CON(x) ((x) * 0x4) +#define RV1126_MODE_CON 0x90 +#define RV1126_CLKSEL_CON(x) ((x) * 0x4 + 0x100) +#define RV1126_CLKGATE_CON(x) ((x) * 0x4 + 0x280) +#define RV1126_SOFTRST_CON(x) ((x) * 0x4 + 0x300) +#define RV1126_GLB_SRST_FST 0x408 +#define RV1126_GLB_SRST_SND 0x40c +#define RV1126_SDMMC_CON0 0x440 +#define RV1126_SDMMC_CON1 0x444 +#define RV1126_SDIO_CON0 0x448 +#define RV1126_SDIO_CON1 0x44c +#define RV1126_EMMC_CON0 0x450 +#define RV1126_EMMC_CON1 0x454 + +/* + * register positions shared by RK1808 RK2928, RK3036, + * RK3066, RK3188 and RK3228 + */ + +#define RK1808_PLL_CON(x) ((x) * 0x4) +#define RK1808_MODE_CON 0xa0 +#define RK1808_MISC_CON 0xa4 +#define RK1808_MISC1_CON 0xa8 +#define RK1808_GLB_SRST_FST 0xb8 +#define RK1808_GLB_SRST_SND 0xbc +#define RK1808_CLKSEL_CON(x) ((x) * 0x4 + 0x100) +#define RK1808_CLKGATE_CON(x) ((x) * 0x4 + 0x230) +#define RK1808_SOFTRST_CON(x) ((x) * 0x4 + 0x300) +#define RK1808_SDMMC_CON0 0x380 +#define RK1808_SDMMC_CON1 0x384 +#define RK1808_SDIO_CON0 0x388 +#define RK1808_SDIO_CON1 0x38c +#define RK1808_EMMC_CON0 0x390 +#define RK1808_EMMC_CON1 0x394 + +#define RK1808_PMU_PLL_CON(x) ((x) * 0x4 + 0x4000) +#define RK1808_PMU_MODE_CON 0x4020 +#define RK1808_PMU_CLKSEL_CON(x) ((x) * 0x4 + 0x4040) +#define RK1808_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x4080) + #define RK2928_PLL_CON(x) ((x) * 0x4) #define RK2928_MODE_CON 0x40 #define RK2928_CLKSEL_CON(x) ((x) * 0x4 + 0x44) @@ -188,11 +246,83 @@ struct clk; #define RK3399_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x100) #define RK3399_PMU_SOFTRST_CON(x) ((x) * 0x4 + 0x110) +#define RK3568_PLL_CON(x) RK2928_PLL_CON(x) +#define RK3568_MODE_CON0 0xc0 +#define RK3568_MISC_CON0 0xc4 +#define RK3568_MISC_CON1 0xc8 +#define RK3568_MISC_CON2 0xcc +#define RK3568_GLB_CNT_TH 0xd0 +#define RK3568_GLB_SRST_FST 0xd4 +#define RK3568_GLB_SRST_SND 0xd8 +#define RK3568_GLB_RST_CON 0xdc +#define RK3568_GLB_RST_ST 0xe0 +#define RK3568_CLKSEL_CON(x) ((x) * 0x4 + 0x100) +#define RK3568_CLKGATE_CON(x) ((x) * 0x4 + 0x300) +#define RK3568_SOFTRST_CON(x) ((x) * 0x4 + 0x400) +#define RK3568_SDMMC0_CON0 0x580 +#define RK3568_SDMMC0_CON1 0x584 +#define RK3568_SDMMC1_CON0 0x588 +#define RK3568_SDMMC1_CON1 0x58c +#define RK3568_SDMMC2_CON0 0x590 +#define RK3568_SDMMC2_CON1 0x594 +#define RK3568_EMMC_CON0 0x598 +#define RK3568_EMMC_CON1 0x59c + +#define RK3568_PMU_PLL_CON(x) RK2928_PLL_CON(x) +#define RK3568_PMU_MODE_CON0 0x80 +#define RK3568_PMU_CLKSEL_CON(x) ((x) * 0x4 + 0x100) +#define RK3568_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x180) +#define RK3568_PMU_SOFTRST_CON(x) ((x) * 0x4 + 0x200) + +#define RK3588_PHP_CRU_BASE 0x8000 +#define RK3588_PMU_CRU_BASE 0x30000 +#define RK3588_BIGCORE0_CRU_BASE 0x50000 +#define RK3588_BIGCORE1_CRU_BASE 0x52000 +#define RK3588_DSU_CRU_BASE 0x58000 + +#define RK3588_PLL_CON(x) RK2928_PLL_CON(x) +#define RK3588_MODE_CON0 0x280 +#define RK3588_CLKSEL_CON(x) ((x) * 0x4 + 0x300) +#define RK3588_CLKGATE_CON(x) ((x) * 0x4 + 0x800) +#define RK3588_SOFTRST_CON(x) ((x) * 0x4 + 0xa00) +#define RK3588_GLB_CNT_TH 0xc00 +#define RK3588_GLB_SRST_FST 0xc08 +#define RK3588_GLB_SRST_SND 0xc0c +#define RK3588_GLB_RST_CON 0xc10 +#define RK3588_GLB_RST_ST 0xc04 +#define RK3588_SDIO_CON0 0xC24 +#define RK3588_SDIO_CON1 0xC28 +#define RK3588_SDMMC_CON0 0xC30 +#define RK3588_SDMMC_CON1 0xC34 + +#define RK3588_PHP_CLKGATE_CON(x) ((x) * 0x4 + RK3588_PHP_CRU_BASE + 0x800) +#define RK3588_PHP_SOFTRST_CON(x) ((x) * 0x4 + RK3588_PHP_CRU_BASE + 0xa00) + +#define RK3588_PMU_PLL_CON(x) ((x) * 0x4 + RK3588_PHP_CRU_BASE) +#define RK3588_PMU_CLKSEL_CON(x) ((x) * 0x4 + RK3588_PMU_CRU_BASE + 0x300) +#define RK3588_PMU_CLKGATE_CON(x) ((x) * 0x4 + RK3588_PMU_CRU_BASE + 0x800) +#define RK3588_PMU_SOFTRST_CON(x) ((x) * 0x4 + RK3588_PMU_CRU_BASE + 0xa00) + +#define RK3588_B0_PLL_CON(x) ((x) * 0x4 + RK3588_BIGCORE0_CRU_BASE) +#define RK3588_BIGCORE0_CLKSEL_CON(x) ((x) * 0x4 + RK3588_BIGCORE0_CRU_BASE + 0x300) +#define RK3588_BIGCORE0_CLKGATE_CON(x) ((x) * 0x4 + RK3588_BIGCORE0_CRU_BASE + 0x800) +#define RK3588_BIGCORE0_SOFTRST_CON(x) ((x) * 0x4 + RK3588_BIGCORE0_CRU_BASE + 0xa00) +#define RK3588_B1_PLL_CON(x) ((x) * 0x4 + RK3588_BIGCORE1_CRU_BASE) +#define RK3588_BIGCORE1_CLKSEL_CON(x) ((x) * 0x4 + RK3588_BIGCORE1_CRU_BASE + 0x300) +#define RK3588_BIGCORE1_CLKGATE_CON(x) ((x) * 0x4 + RK3588_BIGCORE1_CRU_BASE + 0x800) +#define RK3588_BIGCORE1_SOFTRST_CON(x) ((x) * 0x4 + RK3588_BIGCORE1_CRU_BASE + 0xa00) +#define RK3588_LPLL_CON(x) ((x) * 0x4 + RK3588_DSU_CRU_BASE) +#define RK3588_DSU_CLKSEL_CON(x) ((x) * 0x4 + RK3588_DSU_CRU_BASE + 0x300) +#define RK3588_DSU_CLKGATE_CON(x) ((x) * 0x4 + RK3588_DSU_CRU_BASE + 0x800) +#define RK3588_DSU_SOFTRST_CON(x) ((x) * 0x4 + RK3588_DSU_CRU_BASE + 0xa00) + enum rockchip_pll_type { pll_rk3036, pll_rk3066, pll_rk3328, pll_rk3399, + pll_rk3588, + pll_rk3588_core, }; #define RK3036_PLL_RATE(_rate, _refdiv, _fbdiv, _postdiv1, \ @@ -225,6 +355,15 @@ enum rockchip_pll_type { .nb = _nb, \ } +#define RK3588_PLL_RATE(_rate, _p, _m, _s, _k) \ +{ \ + .rate = _rate##U, \ + .p = _p, \ + .m = _m, \ + .s = _s, \ + .k = _k, \ +} + /** * struct rockchip_clk_provider - information about clock provider * @reg_base: virtual address for the register base. @@ -238,22 +377,37 @@ struct rockchip_clk_provider { struct clk_onecell_data clk_data; struct device_node *cru_node; struct regmap *grf; + struct regmap *pmugrf; spinlock_t lock; }; struct rockchip_pll_rate_table { unsigned long rate; - unsigned int nr; - unsigned int nf; - unsigned int no; - unsigned int nb; - /* for RK3036/RK3399 */ - unsigned int fbdiv; - unsigned int postdiv1; - unsigned int refdiv; - unsigned int postdiv2; - unsigned int dsmpd; - unsigned int frac; + union { + struct { + /* for RK3066 */ + unsigned int nr; + unsigned int nf; + unsigned int no; + unsigned int nb; + }; + struct { + /* for RK3036/RK3399 */ + unsigned int fbdiv; + unsigned int postdiv1; + unsigned int refdiv; + unsigned int postdiv2; + unsigned int dsmpd; + unsigned int frac; + }; + struct { + /* for RK3588 */ + unsigned int m; + unsigned int p; + unsigned int s; + unsigned int k; + }; + }; }; /** @@ -317,39 +471,56 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx, struct rockchip_pll_rate_table *rate_table, unsigned long flags, u8 clk_pll_flags); +void rockchip_boost_init(struct clk_hw *hw); + +void rockchip_boost_enable_recovery_sw_low(struct clk_hw *hw); + +void rockchip_boost_disable_recovery_sw(struct clk_hw *hw); + +void rockchip_boost_add_core_div(struct clk_hw *hw, unsigned long prate); + struct rockchip_cpuclk_clksel { int reg; u32 val; }; -#define ROCKCHIP_CPUCLK_NUM_DIVIDERS 2 +#define ROCKCHIP_CPUCLK_NUM_DIVIDERS 6 +#define ROCKCHIP_CPUCLK_MAX_CORES 4 struct rockchip_cpuclk_rate_table { unsigned long prate; struct rockchip_cpuclk_clksel divs[ROCKCHIP_CPUCLK_NUM_DIVIDERS]; + struct rockchip_cpuclk_clksel pre_muxs[ROCKCHIP_CPUCLK_NUM_DIVIDERS]; + struct rockchip_cpuclk_clksel post_muxs[ROCKCHIP_CPUCLK_NUM_DIVIDERS]; }; /** * struct rockchip_cpuclk_reg_data - register offsets and masks of the cpuclock - * @core_reg: register offset of the core settings register - * @div_core_shift: core divider offset used to divide the pll value - * @div_core_mask: core divider mask - * @mux_core_alt: mux value to select alternate parent + * @core_reg[]: register offset of the cores setting register + * @div_core_shift[]: cores divider offset used to divide the pll value + * @div_core_mask[]: cores divider mask + * @num_cores: number of cpu cores + * @mux_core_reg: register offset of the cores select parent + * @mux_core_alt: mux value to select alternate parent * @mux_core_main: mux value to select main parent of core * @mux_core_shift: offset of the core multiplexer * @mux_core_mask: core multiplexer mask */ struct rockchip_cpuclk_reg_data { - int core_reg; - u8 div_core_shift; - u32 div_core_mask; - u8 mux_core_alt; - u8 mux_core_main; - u8 mux_core_shift; - u32 mux_core_mask; + int core_reg[ROCKCHIP_CPUCLK_MAX_CORES]; + u8 div_core_shift[ROCKCHIP_CPUCLK_MAX_CORES]; + u32 div_core_mask[ROCKCHIP_CPUCLK_MAX_CORES]; + int num_cores; + int mux_core_reg; + u8 mux_core_alt; + u8 mux_core_main; + u8 mux_core_shift; + u32 mux_core_mask; + const char *pll_name; }; struct clk *rockchip_clk_register_cpuclk(const char *name, - const char *const *parent_names, u8 num_parents, + u8 num_parents, + struct clk *parent, struct clk *alt_parent, const struct rockchip_cpuclk_reg_data *reg_data, const struct rockchip_cpuclk_rate_table *rates, int nrates, void __iomem *reg_base, spinlock_t *lock); @@ -361,16 +532,21 @@ struct clk *rockchip_clk_register_mmc(const char *name, /* * DDRCLK flags, including method of setting the rate * ROCKCHIP_DDRCLK_SIP: use SIP call to bl31 to change ddrclk rate. + * ROCKCHIP_DDRCLK_SCPI: use SCPI APIs to let mcu change ddrclk rate. */ #define ROCKCHIP_DDRCLK_SIP BIT(0) +#define ROCKCHIP_DDRCLK_SCPI 0x02 +#define ROCKCHIP_DDRCLK_SIP_V2 0x03 + +void rockchip_set_ddrclk_params(void __iomem *params); +void rockchip_set_ddrclk_dmcfreq_wait_complete(int (*func)(void)); struct clk *rockchip_clk_register_ddrclk(const char *name, int flags, const char *const *parent_names, u8 num_parents, int mux_offset, int mux_shift, int mux_width, int div_shift, int div_width, - int ddr_flags, void __iomem *reg_base, - spinlock_t *lock); + int ddr_flags, void __iomem *reg_base); #define ROCKCHIP_INVERTER_HIWORD_MASK BIT(0) @@ -388,8 +564,10 @@ struct clk *rockchip_clk_register_muxgrf(const char *name, enum rockchip_clk_branch_type { branch_composite, + branch_composite_brother, branch_mux, branch_muxgrf, + branch_muxpmugrf, branch_divider, branch_fraction_divider, branch_gate, @@ -398,6 +576,7 @@ enum rockchip_clk_branch_type { branch_factor, branch_ddrclk, branch_half_divider, + branch_dclk_divider, }; struct rockchip_clk_branch { @@ -411,6 +590,7 @@ struct rockchip_clk_branch { u8 mux_shift; u8 mux_width; u8 mux_flags; + u32 *mux_table; int div_offset; u8 div_shift; u8 div_width; @@ -420,6 +600,7 @@ struct rockchip_clk_branch { u8 gate_shift; u8 gate_flags; struct rockchip_clk_branch *child; + unsigned long max_prate; }; #define COMPOSITE(_id, cname, pnames, f, mo, ms, mw, mf, ds, dw,\ @@ -443,6 +624,50 @@ struct rockchip_clk_branch { .gate_flags = gf, \ } +#define COMPOSITE_BROTHER(_id, cname, pnames, f, mo, ms, mw, mf,\ + ds, dw, df, go, gs, gf, bro) \ + { \ + .id = _id, \ + .branch_type = branch_composite_brother, \ + .name = cname, \ + .parent_names = pnames, \ + .num_parents = ARRAY_SIZE(pnames), \ + .flags = f, \ + .muxdiv_offset = mo, \ + .mux_shift = ms, \ + .mux_width = mw, \ + .mux_flags = mf, \ + .div_shift = ds, \ + .div_width = dw, \ + .div_flags = df, \ + .gate_offset = go, \ + .gate_shift = gs, \ + .gate_flags = gf, \ + .child = bro, \ + } + +#define COMPOSITE_MUXTBL(_id, cname, pnames, f, mo, ms, mw, mf, \ + mt, ds, dw, df, go, gs, gf) \ + { \ + .id = _id, \ + .branch_type = branch_composite, \ + .name = cname, \ + .parent_names = pnames, \ + .num_parents = ARRAY_SIZE(pnames), \ + .flags = f, \ + .muxdiv_offset = mo, \ + .mux_shift = ms, \ + .mux_width = mw, \ + .mux_flags = mf, \ + .mux_table = mt, \ + .div_shift = ds, \ + .div_width = dw, \ + .div_flags = df, \ + .gate_offset = go, \ + .gate_shift = gs, \ + .gate_flags = gf, \ + } + #define COMPOSITE_DIV_OFFSET(_id, cname, pnames, f, mo, ms, mw, \ mf, do, ds, dw, df, go, gs, gf) \ { \ @@ -539,6 +764,26 @@ struct rockchip_clk_branch { .gate_offset = -1, \ } +#define COMPOSITE_BROTHER_NOGATE(_id, cname, pnames, f, mo, ms, \ + mw, mf, ds, dw, df, bro) \ + { \ + .id = _id, \ + .branch_type = branch_composite_brother, \ + .name = cname, \ + .parent_names = pnames, \ + .num_parents = ARRAY_SIZE(pnames), \ + .flags = f, \ + .muxdiv_offset = mo, \ + .mux_shift = ms, \ + .mux_width = mw, \ + .mux_flags = mf, \ + .div_shift = ds, \ + .div_width = dw, \ + .div_flags = df, \ + .gate_offset = -1, \ + .child = bro, \ + } + #define COMPOSITE_NOGATE_DIVTBL(_id, cname, pnames, f, mo, ms, \ mw, mf, ds, dw, df, dt) \ { \ @@ -559,7 +804,7 @@ struct rockchip_clk_branch { .gate_offset = -1, \ } -#define COMPOSITE_FRAC(_id, cname, pname, f, mo, df, go, gs, gf)\ +#define COMPOSITE_FRAC(_id, cname, pname, f, mo, df, go, gs, gf, prate)\ { \ .id = _id, \ .branch_type = branch_fraction_divider, \ @@ -574,9 +819,10 @@ struct rockchip_clk_branch { .gate_offset = go, \ .gate_shift = gs, \ .gate_flags = gf, \ + .max_prate = prate, \ } -#define COMPOSITE_FRACMUX(_id, cname, pname, f, mo, df, go, gs, gf, ch) \ +#define COMPOSITE_FRACMUX(_id, cname, pname, f, mo, df, go, gs, gf, ch, prate) \ { \ .id = _id, \ .branch_type = branch_fraction_divider, \ @@ -592,9 +838,10 @@ struct rockchip_clk_branch { .gate_shift = gs, \ .gate_flags = gf, \ .child = ch, \ + .max_prate = prate, \ } -#define COMPOSITE_FRACMUX_NOGATE(_id, cname, pname, f, mo, df, ch) \ +#define COMPOSITE_FRACMUX_NOGATE(_id, cname, pname, f, mo, df, ch, prate) \ { \ .id = _id, \ .branch_type = branch_fraction_divider, \ @@ -608,6 +855,7 @@ struct rockchip_clk_branch { .div_flags = df, \ .gate_offset = -1, \ .child = ch, \ + .max_prate = prate, \ } #define COMPOSITE_DDRCLK(_id, cname, pnames, f, mo, ms, mw, \ @@ -643,6 +891,22 @@ struct rockchip_clk_branch { .gate_offset = -1, \ } +#define MUXTBL(_id, cname, pnames, f, o, s, w, mf, mt) \ + { \ + .id = _id, \ + .branch_type = branch_mux, \ + .name = cname, \ + .parent_names = pnames, \ + .num_parents = ARRAY_SIZE(pnames), \ + .flags = f, \ + .muxdiv_offset = o, \ + .mux_shift = s, \ + .mux_width = w, \ + .mux_flags = mf, \ + .gate_offset = -1, \ + .mux_table = mt, \ + } + #define MUXGRF(_id, cname, pnames, f, o, s, w, mf) \ { \ .id = _id, \ @@ -658,6 +922,21 @@ struct rockchip_clk_branch { .gate_offset = -1, \ } +#define MUXPMUGRF(_id, cname, pnames, f, o, s, w, mf) \ + { \ + .id = _id, \ + .branch_type = branch_muxpmugrf, \ + .name = cname, \ + .parent_names = pnames, \ + .num_parents = ARRAY_SIZE(pnames), \ + .flags = f, \ + .muxdiv_offset = o, \ + .mux_shift = s, \ + .mux_width = w, \ + .mux_flags = mf, \ + .gate_offset = -1, \ + } + #define DIV(_id, cname, pname, f, o, s, w, df) \ { \ .id = _id, \ @@ -772,6 +1051,28 @@ struct rockchip_clk_branch { .gate_flags = gf, \ } +#define COMPOSITE_HALFDIV_OFFSET(_id, cname, pnames, f, mo, ms, mw, mf, do,\ + ds, dw, df, go, gs, gf) \ + { \ + .id = _id, \ + .branch_type = branch_half_divider, \ + .name = cname, \ + .parent_names = pnames, \ + .num_parents = ARRAY_SIZE(pnames), \ + .flags = f, \ + .muxdiv_offset = mo, \ + .mux_shift = ms, \ + .mux_width = mw, \ + .mux_flags = mf, \ + .div_offset = do, \ + .div_shift = ds, \ + .div_width = dw, \ + .div_flags = df, \ + .gate_offset = go, \ + .gate_shift = gs, \ + .gate_flags = gf, \ + } + #define COMPOSITE_NOGATE_HALFDIV(_id, cname, pnames, f, mo, ms, mw, mf, \ ds, dw, df) \ { \ @@ -824,6 +1125,28 @@ struct rockchip_clk_branch { .gate_offset = -1, \ } +#define COMPOSITE_DCLK(_id, cname, pnames, f, mo, ms, mw, mf, ds, dw,\ + df, go, gs, gf, prate) \ + { \ + .id = _id, \ + .branch_type = branch_dclk_divider, \ + .name = cname, \ + .parent_names = pnames, \ + .num_parents = ARRAY_SIZE(pnames), \ + .flags = f, \ + .muxdiv_offset = mo, \ + .mux_shift = ms, \ + .mux_width = mw, \ + .mux_flags = mf, \ + .div_shift = ds, \ + .div_width = dw, \ + .div_flags = df, \ + .gate_offset = go, \ + .gate_shift = gs, \ + .gate_flags = gf, \ + .max_prate = prate, \ + } + /* SGRF clocks are only accessible from secure mode, so not controllable */ #define SGRF_GATE(_id, cname, pname) \ FACTOR(_id, cname, pname, 0, 1, 1) @@ -840,13 +1163,17 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx, void rockchip_clk_register_plls(struct rockchip_clk_provider *ctx, struct rockchip_pll_clock *pll_list, unsigned int nr_pll, int grf_lock_offset); -void rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx, - unsigned int lookup_id, const char *name, - const char *const *parent_names, u8 num_parents, - const struct rockchip_cpuclk_reg_data *reg_data, - const struct rockchip_cpuclk_rate_table *rates, - int nrates); -void rockchip_clk_protect_critical(const char *const clocks[], int nclocks); +void __init rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx, + unsigned int lookup_id, + const char *name, + u8 num_parents, + struct clk *parent, struct clk *alt_parent, + const struct rockchip_cpuclk_reg_data *reg_data, + const struct rockchip_cpuclk_rate_table *rates, + int nrates); +int rockchip_pll_clk_rate_to_scale(struct clk *clk, unsigned long rate); +int rockchip_pll_clk_scale_to_rate(struct clk *clk, unsigned int scale); +int rockchip_pll_clk_adaptive_scaling(struct clk *clk, int sel); void rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx, unsigned int reg, void (*cb)(void)); @@ -857,12 +1184,27 @@ struct clk *rockchip_clk_register_halfdiv(const char *name, u8 num_parents, void __iomem *base, int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags, - u8 div_shift, u8 div_width, - u8 div_flags, int gate_offset, - u8 gate_shift, u8 gate_flags, - unsigned long flags, + int div_offset, u8 div_shift, + u8 div_width, u8 div_flags, + int gate_offset, u8 gate_shift, + u8 gate_flags, unsigned long flags, spinlock_t *lock); +struct clk *rockchip_clk_register_dclk_branch(const char *name, + const char *const *parent_names, + u8 num_parents, + void __iomem *base, + int muxdiv_offset, u8 mux_shift, + u8 mux_width, u8 mux_flags, + int div_offset, u8 div_shift, + u8 div_width, u8 div_flags, + struct clk_div_table *div_table, + int gate_offset, + u8 gate_shift, u8 gate_flags, + unsigned long flags, + unsigned long max_prate, + spinlock_t *lock); + #ifdef CONFIG_RESET_CONTROLLER void rockchip_register_softrst(struct device_node *np, unsigned int num_regs, @@ -874,5 +1216,6 @@ static inline void rockchip_register_softrst(struct device_node *np, { } #endif +extern void (*rk_dump_cru)(void); #endif diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index a0c6e88be..9d9cb5757 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -85,7 +85,9 @@ config IXP4XX_TIMER Enables support for the Intel XScale IXP4xx SoC timer. config ROCKCHIP_TIMER - bool "Rockchip timer driver" if COMPILE_TEST + tristate "Rockchip timer driver" + default ARCH_ROCKCHIP + depends on ARCH_ROCKCHIP || COMPILE_TEST depends on ARM || ARM64 select TIMER_OF select CLKSRC_MMIO diff --git a/drivers/clocksource/timer-rockchip.c b/drivers/clocksource/timer-rockchip.c index 1f95d0aca..2f4e970d7 100644 --- a/drivers/clocksource/timer-rockchip.c +++ b/drivers/clocksource/timer-rockchip.c @@ -8,11 +8,13 @@ #include #include #include +#include #include #include #include #include #include +#include #define TIMER_NAME "rk_timer" @@ -45,7 +47,9 @@ struct rk_clkevt { }; static struct rk_clkevt *rk_clkevt; +#ifndef MODULE static struct rk_timer *rk_clksrc; +#endif static inline struct rk_timer *rk_timer(struct clock_event_device *ce) { @@ -119,10 +123,12 @@ static irqreturn_t rk_timer_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } +#ifndef MODULE static u64 notrace rk_timer_sched_read(void) { return ~readl_relaxed(rk_clksrc->base + TIMER_CURRENT_VALUE0); } +#endif static int __init rk_timer_probe(struct rk_timer *timer, struct device_node *np) @@ -250,6 +256,7 @@ static int __init rk_clkevt_init(struct device_node *np) return ret; } +#ifndef MODULE static int __init rk_clksrc_init(struct device_node *np) { int ret = -EINVAL; @@ -287,14 +294,17 @@ static int __init rk_clksrc_init(struct device_node *np) rk_clksrc = ERR_PTR(ret); return ret; } +#endif static int __init rk_timer_init(struct device_node *np) { if (!rk_clkevt) return rk_clkevt_init(np); +#ifndef MODULE if (!rk_clksrc) return rk_clksrc_init(np); +#endif pr_err("Too many timer definitions for '%s'\n", TIMER_NAME); return -EINVAL; @@ -302,3 +312,26 @@ static int __init rk_timer_init(struct device_node *np) TIMER_OF_DECLARE(rk3288_timer, "rockchip,rk3288-timer", rk_timer_init); TIMER_OF_DECLARE(rk3399_timer, "rockchip,rk3399-timer", rk_timer_init); + +#ifdef MODULE +static int __init rk_timer_driver_probe(struct platform_device *pdev) +{ + return rk_timer_init(pdev->dev.of_node); +} + +static const struct of_device_id rk_timer_match_table[] = { + { .compatible = "rockchip,rk3288-timer" }, + { .compatible = "rockchip,rk3399-timer" }, + { /* sentinel */ }, +}; + +static struct platform_driver rk_timer_driver = { + .driver = { + .name = TIMER_NAME, + .of_match_table = rk_timer_match_table, + }, +}; +module_platform_driver_probe(rk_timer_driver, rk_timer_driver_probe); + +MODULE_LICENSE("GPL"); +#endif diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index aea285651..6e428043a 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -66,21 +66,6 @@ static const struct of_device_id whitelist[] __initconst = { { .compatible = "renesas,r8a7794", }, { .compatible = "renesas,sh73a0", }, - { .compatible = "rockchip,rk2928", }, - { .compatible = "rockchip,rk3036", }, - { .compatible = "rockchip,rk3066a", }, - { .compatible = "rockchip,rk3066b", }, - { .compatible = "rockchip,rk3188", }, - { .compatible = "rockchip,rk3228", }, - { .compatible = "rockchip,rk3288", }, - { .compatible = "rockchip,rk3328", }, - { .compatible = "rockchip,rk3366", }, - { .compatible = "rockchip,rk3368", }, - { .compatible = "rockchip,rk3399", - .data = &(struct cpufreq_dt_platform_data) - { .have_governor_per_policy = true, }, - }, - { .compatible = "st-ericsson,u8500", }, { .compatible = "st-ericsson,u8540", }, { .compatible = "st-ericsson,u9500", }, @@ -139,6 +124,30 @@ static const struct of_device_id blacklist[] __initconst = { { .compatible = "qcom,sdm845", }, { .compatible = "qcom,sm8150", }, + { .compatible = "rockchip,px30", }, + { .compatible = "rockchip,rk2928", }, + { .compatible = "rockchip,rk3036", }, + { .compatible = "rockchip,rk3066a", }, + { .compatible = "rockchip,rk3066b", }, + { .compatible = "rockchip,rk3126", }, + { .compatible = "rockchip,rk3128", }, + { .compatible = "rockchip,rk3188", }, + { .compatible = "rockchip,rk3228", }, + { .compatible = "rockchip,rk3229", }, + { .compatible = "rockchip,rk3288", }, + { .compatible = "rockchip,rk3288w", }, + { .compatible = "rockchip,rk3326", }, + { .compatible = "rockchip,rk3328", }, + { .compatible = "rockchip,rk3366", }, + { .compatible = "rockchip,rk3368", }, + { .compatible = "rockchip,rk3399", }, + { .compatible = "rockchip,rk3399pro", }, + { .compatible = "rockchip,rk3566", }, + { .compatible = "rockchip,rk3568", }, + { .compatible = "rockchip,rk3588", }, + { .compatible = "rockchip,rv1109", }, + { .compatible = "rockchip,rv1126", }, + { .compatible = "st,stih407", }, { .compatible = "st,stih410", }, { .compatible = "st,stih418", }, diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index e363ae04a..bbf10ab42 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -23,6 +23,9 @@ #include #include "cpufreq-dt.h" +#ifdef CONFIG_ARCH_ROCKCHIP +#include +#endif struct private_data { struct list_head node; @@ -30,7 +33,7 @@ struct private_data { cpumask_var_t cpus; struct device *cpu_dev; struct opp_table *opp_table; - struct opp_table *reg_opp_table; + struct cpufreq_frequency_table *freq_table; bool have_static_opps; }; @@ -59,7 +62,11 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index) struct private_data *priv = policy->driver_data; unsigned long freq = policy->freq_table[index].frequency; +#ifdef CONFIG_ARCH_ROCKCHIP + return rockchip_cpufreq_opp_set_rate(priv->cpu_dev, freq * 1000); +#else return dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000); +#endif } /* @@ -102,7 +109,6 @@ static const char *find_supply_name(struct device *dev) static int cpufreq_init(struct cpufreq_policy *policy) { - struct cpufreq_frequency_table *freq_table; struct private_data *priv; struct device *cpu_dev; struct clk *cpu_clk; @@ -114,9 +120,7 @@ static int cpufreq_init(struct cpufreq_policy *policy) pr_err("failed to find data for cpu%d\n", policy->cpu); return -ENODEV; } - cpu_dev = priv->cpu_dev; - cpumask_copy(policy->cpus, priv->cpus); cpu_clk = clk_get(cpu_dev, NULL); if (IS_ERR(cpu_clk)) { @@ -125,67 +129,32 @@ static int cpufreq_init(struct cpufreq_policy *policy) return ret; } - /* - * Initialize OPP tables for all policy->cpus. They will be shared by - * all CPUs which have marked their CPUs shared with OPP bindings. - * - * For platforms not using operating-points-v2 bindings, we do this - * before updating policy->cpus. Otherwise, we will end up creating - * duplicate OPPs for policy->cpus. - * - * OPPs might be populated at runtime, don't check for error here - */ - if (!dev_pm_opp_of_cpumask_add_table(policy->cpus)) - priv->have_static_opps = true; - - /* - * But we need OPP table to function so if it is not there let's - * give platform code chance to provide it for us. - */ - ret = dev_pm_opp_get_opp_count(cpu_dev); - if (ret <= 0) { - dev_err(cpu_dev, "OPP table can't be empty\n"); - ret = -ENODEV; - goto out_free_opp; - } - - ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); - if (ret) { - dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); - goto out_free_opp; - } + transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev); + if (!transition_latency) + transition_latency = CPUFREQ_ETERNAL; + cpumask_copy(policy->cpus, priv->cpus); policy->driver_data = priv; policy->clk = cpu_clk; - policy->freq_table = freq_table; - + policy->freq_table = priv->freq_table; policy->suspend_freq = dev_pm_opp_get_suspend_opp_freq(cpu_dev) / 1000; + policy->cpuinfo.transition_latency = transition_latency; + policy->dvfs_possible_from_any_cpu = true; /* Support turbo/boost mode */ if (policy_has_boost_freq(policy)) { /* This gets disabled by core on driver unregister */ ret = cpufreq_enable_boost_support(); if (ret) - goto out_free_cpufreq_table; + goto out_clk_put; cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs; } - transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev); - if (!transition_latency) - transition_latency = CPUFREQ_ETERNAL; - - policy->cpuinfo.transition_latency = transition_latency; - policy->dvfs_possible_from_any_cpu = true; - dev_pm_opp_of_register_em(cpu_dev, policy->cpus); return 0; -out_free_cpufreq_table: - dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); -out_free_opp: - if (priv->have_static_opps) - dev_pm_opp_of_cpumask_remove_table(policy->cpus); +out_clk_put: clk_put(cpu_clk); return ret; @@ -208,11 +177,6 @@ static int cpufreq_offline(struct cpufreq_policy *policy) static int cpufreq_exit(struct cpufreq_policy *policy) { - struct private_data *priv = policy->driver_data; - - dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); - if (priv->have_static_opps) - dev_pm_opp_of_cpumask_remove_table(policy->related_cpus); clk_put(policy->clk); return 0; } @@ -236,6 +200,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu) { struct private_data *priv; struct device *cpu_dev; + bool fallback = false; const char *reg_name; int ret; @@ -254,68 +219,91 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu) if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL)) return -ENOMEM; + cpumask_set_cpu(cpu, priv->cpus); priv->cpu_dev = cpu_dev; - /* Try to get OPP table early to ensure resources are available */ - priv->opp_table = dev_pm_opp_get_opp_table(cpu_dev); - if (IS_ERR(priv->opp_table)) { - ret = PTR_ERR(priv->opp_table); - if (ret != -EPROBE_DEFER) - dev_err(cpu_dev, "failed to get OPP table: %d\n", ret); - goto free_cpumask; - } - /* * OPP layer will be taking care of regulators now, but it needs to know * the name of the regulator first. */ reg_name = find_supply_name(cpu_dev); if (reg_name) { - priv->reg_opp_table = dev_pm_opp_set_regulators(cpu_dev, - ®_name, 1); - if (IS_ERR(priv->reg_opp_table)) { - ret = PTR_ERR(priv->reg_opp_table); + priv->opp_table = dev_pm_opp_set_regulators(cpu_dev, ®_name, + 1); + if (IS_ERR(priv->opp_table)) { + ret = PTR_ERR(priv->opp_table); if (ret != -EPROBE_DEFER) dev_err(cpu_dev, "failed to set regulators: %d\n", ret); - goto put_table; + goto free_cpumask; } } - /* Find OPP sharing information so we can fill pri->cpus here */ /* Get OPP-sharing information from "operating-points-v2" bindings */ ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->cpus); if (ret) { if (ret != -ENOENT) - goto put_reg; + goto out; /* * operating-points-v2 not supported, fallback to all CPUs share * OPP for backward compatibility if the platform hasn't set * sharing CPUs. */ - if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus)) { - cpumask_setall(priv->cpus); - - /* - * OPP tables are initialized only for cpu, do it for - * others as well. - */ - ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->cpus); - if (ret) - dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", - __func__, ret); - } + if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus)) + fallback = true; + } + + /* + * Initialize OPP tables for all priv->cpus. They will be shared by + * all CPUs which have marked their CPUs shared with OPP bindings. + * + * For platforms not using operating-points-v2 bindings, we do this + * before updating priv->cpus. Otherwise, we will end up creating + * duplicate OPPs for the CPUs. + * + * OPPs might be populated at runtime, don't check for error here. + */ + if (!dev_pm_opp_of_cpumask_add_table(priv->cpus)) + priv->have_static_opps = true; + + /* + * The OPP table must be initialized, statically or dynamically, by this + * point. + */ + ret = dev_pm_opp_get_opp_count(cpu_dev); + if (ret <= 0) { + dev_err(cpu_dev, "OPP table can't be empty\n"); + ret = -ENODEV; + goto out; + } + + if (fallback) { + cpumask_setall(priv->cpus); + ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->cpus); + if (ret) + dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", + __func__, ret); + } + +#ifdef CONFIG_ARCH_ROCKCHIP + rockchip_cpufreq_adjust_power_scale(cpu_dev); +#endif + + ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &priv->freq_table); + if (ret) { + dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); + goto out; } list_add(&priv->node, &priv_list); return 0; -put_reg: - if (priv->reg_opp_table) - dev_pm_opp_put_regulators(priv->reg_opp_table); -put_table: - dev_pm_opp_put_opp_table(priv->opp_table); +out: + if (priv->have_static_opps) + dev_pm_opp_of_cpumask_remove_table(priv->cpus); + if (priv->opp_table) + dev_pm_opp_put_regulators(priv->opp_table); free_cpumask: free_cpumask_var(priv->cpus); return ret; @@ -326,9 +314,11 @@ static void dt_cpufreq_release(void) struct private_data *priv, *tmp; list_for_each_entry_safe(priv, tmp, &priv_list, node) { - if (priv->reg_opp_table) - dev_pm_opp_put_regulators(priv->reg_opp_table); - dev_pm_opp_put_opp_table(priv->opp_table); + dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &priv->freq_table); + if (priv->have_static_opps) + dev_pm_opp_of_cpumask_remove_table(priv->cpus); + if (priv->opp_table) + dev_pm_opp_put_regulators(priv->opp_table); free_cpumask_var(priv->cpus); list_del(&priv->node); } diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 583423909..6926796dc 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -688,8 +688,12 @@ static ssize_t show_##file_name \ return sprintf(buf, "%u\n", policy->object); \ } +static ssize_t show_cpuinfo_max_freq(struct cpufreq_policy *policy, char *buf) +{ + unsigned int max_freq = policy->cpuinfo.max_freq; + return sprintf(buf, "%u\n", max_freq); +} show_one(cpuinfo_min_freq, cpuinfo.min_freq); -show_one(cpuinfo_max_freq, cpuinfo.max_freq); show_one(cpuinfo_transition_latency, cpuinfo.transition_latency); show_one(scaling_min_freq, min); show_one(scaling_max_freq, max); @@ -1400,7 +1404,7 @@ static int cpufreq_online(unsigned int cpu) ret = freq_qos_add_request(&policy->constraints, policy->min_freq_req, FREQ_QOS_MIN, - FREQ_QOS_MIN_DEFAULT_VALUE); + policy->min); if (ret < 0) { /* * So we don't call freq_qos_remove_request() for an @@ -1420,7 +1424,7 @@ static int cpufreq_online(unsigned int cpu) ret = freq_qos_add_request(&policy->constraints, policy->max_freq_req, FREQ_QOS_MAX, - FREQ_QOS_MAX_DEFAULT_VALUE); + policy->max); if (ret < 0) { policy->max_freq_req = NULL; goto out_destroy_policy; @@ -2535,6 +2539,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, return ret; } +EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_frequency_limits); /** * cpufreq_update_policy - Re-evaluate an existing cpufreq policy. diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index 50a4d7846..1f001d281 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c @@ -78,20 +78,18 @@ static int cpufreq_userspace_policy_start(struct cpufreq_policy *policy) mutex_lock(&userspace_mutex); per_cpu(cpu_is_managed, policy->cpu) = 1; - *setspeed = policy->cur; + if (!*setspeed) + *setspeed = policy->cur; mutex_unlock(&userspace_mutex); return 0; } static void cpufreq_userspace_policy_stop(struct cpufreq_policy *policy) { - unsigned int *setspeed = policy->governor_data; - pr_debug("managing cpu %u stopped\n", policy->cpu); mutex_lock(&userspace_mutex); per_cpu(cpu_is_managed, policy->cpu) = 0; - *setspeed = 0; mutex_unlock(&userspace_mutex); } diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index 4070e573b..557f59ac4 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -381,3 +381,4 @@ void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx, mutex_unlock(&cpuidle_lock); } +EXPORT_SYMBOL_GPL(cpuidle_driver_state_disabled); diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c index 29acaf48e..0e51ed256 100644 --- a/drivers/cpuidle/governor.c +++ b/drivers/cpuidle/governor.c @@ -102,6 +102,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov) return ret; } +EXPORT_SYMBOL_GPL(cpuidle_register_governor); /** * cpuidle_governor_latency_req - Compute a latency constraint for CPU @@ -118,3 +119,4 @@ s64 cpuidle_governor_latency_req(unsigned int cpu) return (s64)device_req * NSEC_PER_USEC; } +EXPORT_SYMBOL_GPL(cpuidle_governor_latency_req); diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index c6f460550..db0fe99c8 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -1761,6 +1761,40 @@ static ssize_t timer_store(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RW(timer); +static ssize_t load_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int err; + struct devfreq *devfreq = to_devfreq(dev); + struct devfreq_dev_status stat = devfreq->last_status; + unsigned long freq; + ssize_t len; + + err = devfreq_update_stats(devfreq); + if (err) + return err; + + if (stat.total_time < stat.busy_time) { + err = devfreq_update_stats(devfreq); + if (err) + return err; + }; + + if (!stat.total_time) + return 0; + + len = sprintf(buf, "%lu", stat.busy_time * 100 / stat.total_time); + + if (devfreq->profile->get_cur_freq && + !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) + len += sprintf(buf + len, "@%luHz\n", freq); + else + len += sprintf(buf + len, "@%luHz\n", devfreq->previous_freq); + + return len; +} +static DEVICE_ATTR_RO(load); + static struct attribute *devfreq_attrs[] = { &dev_attr_name.attr, &dev_attr_governor.attr, @@ -1773,6 +1807,7 @@ static struct attribute *devfreq_attrs[] = { &dev_attr_max_freq.attr, &dev_attr_trans_stat.attr, &dev_attr_timer.attr, + &dev_attr_load.attr, NULL, }; ATTRIBUTE_GROUPS(devfreq); diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c index 9a88faaf8..395790397 100644 --- a/drivers/devfreq/event/rockchip-dfi.c +++ b/drivers/devfreq/event/rockchip-dfi.c @@ -20,26 +20,81 @@ #include -#define RK3399_DMC_NUM_CH 2 - +#define PX30_PMUGRF_OS_REG2 0x208 +#define PX30_PMUGRF_OS_REG3 0x20c + +#define RK3588_PMUGRF_OS_REG(n) (0x200 + (n) * 4) + +#define RK3128_GRF_SOC_CON0 0x140 +#define RK3128_GRF_OS_REG1 0x1cc +#define RK3128_GRF_DFI_WRNUM 0x220 +#define RK3128_GRF_DFI_RDNUM 0x224 +#define RK3128_GRF_DFI_TIMERVAL 0x22c +#define RK3128_DDR_MONITOR_EN ((1 << (16 + 6)) + (1 << 6)) +#define RK3128_DDR_MONITOR_DISB ((1 << (16 + 6)) + (0 << 6)) + +#define RK3288_PMU_SYS_REG2 0x9c +#define RK3288_GRF_SOC_CON4 0x254 +#define RK3288_GRF_SOC_STATUS(n) (0x280 + (n) * 4) +#define RK3288_DFI_EN (0x30003 << 14) +#define RK3288_DFI_DIS (0x30000 << 14) +#define RK3288_LPDDR_SEL (0x10001 << 13) +#define RK3288_DDR3_SEL (0x10000 << 13) + +#define RK3328_GRF_OS_REG2 0x5d0 + +#define RK3368_GRF_DDRC0_CON0 0x600 +#define RK3368_GRF_SOC_STATUS5 0x494 +#define RK3368_GRF_SOC_STATUS6 0x498 +#define RK3368_GRF_SOC_STATUS8 0x4a0 +#define RK3368_GRF_SOC_STATUS9 0x4a4 +#define RK3368_GRF_SOC_STATUS10 0x4a8 +#define RK3368_DFI_EN (0x30003 << 5) +#define RK3368_DFI_DIS (0x30000 << 5) + +#define MAX_DMC_NUM_CH 4 +#define READ_DRAMTYPE_INFO(n) (((n) >> 13) & 0x7) +#define READ_CH_INFO(n) (((n) >> 28) & 0x3) +#define READ_DRAMTYPE_INFO_V3(n, m) ((((n) >> 13) & 0x7) | ((((m) >> 12) & 0x3) << 3)) +#define READ_SYSREG_VERSION(m) (((m) >> 28) & 0xf) +#define READ_LP5_BANK_MODE(m) (((m) >> 1) & 0x3) +#define READ_LP5_CKR(m) (((m) >> 0) & 0x1) /* DDRMON_CTRL */ -#define DDRMON_CTRL 0x04 -#define CLR_DDRMON_CTRL (0x1f0000 << 0) -#define LPDDR4_EN (0x10001 << 4) -#define HARDWARE_EN (0x10001 << 3) -#define LPDDR3_EN (0x10001 << 2) -#define SOFTWARE_EN (0x10001 << 1) -#define SOFTWARE_DIS (0x10000 << 1) -#define TIME_CNT_EN (0x10001 << 0) +#define DDRMON_CTRL 0x04 +#define CLR_DDRMON_CTRL (0xffff0000 << 0) +#define LPDDR5_BANK_MODE(m) ((0x30000 | ((m) & 0x3)) << 7) +#define LPDDR5_EN (0x10001 << 6) +#define DDR4_EN (0x10001 << 5) +#define LPDDR4_EN (0x10001 << 4) +#define HARDWARE_EN (0x10001 << 3) +#define LPDDR2_3_EN (0x10001 << 2) +#define SOFTWARE_EN (0x10001 << 1) +#define SOFTWARE_DIS (0x10000 << 1) +#define TIME_CNT_EN (0x10001 << 0) #define DDRMON_CH0_COUNT_NUM 0x28 #define DDRMON_CH0_DFI_ACCESS_NUM 0x2c #define DDRMON_CH1_COUNT_NUM 0x3c #define DDRMON_CH1_DFI_ACCESS_NUM 0x40 +/* pmu grf */ +#define PMUGRF_OS_REG2 0x308 + +enum { + DDR4 = 0, + DDR3 = 3, + LPDDR2 = 5, + LPDDR3 = 6, + LPDDR4 = 7, + LPDDR4X = 8, + LPDDR5 = 9, + DDR5 = 10, + UNUSED = 0xFF +}; + struct dmc_usage { - u32 access; - u32 total; + u64 access; + u64 total; }; /* @@ -50,44 +105,307 @@ struct dmc_usage { struct rockchip_dfi { struct devfreq_event_dev *edev; struct devfreq_event_desc *desc; - struct dmc_usage ch_usage[RK3399_DMC_NUM_CH]; + struct dmc_usage ch_usage[MAX_DMC_NUM_CH]; struct device *dev; void __iomem *regs; struct regmap *regmap_pmu; + struct regmap *regmap_grf; + struct regmap *regmap_pmugrf; struct clk *clk; + u32 dram_type; + u32 mon_idx; + u32 count_rate; + u32 dram_dynamic_info_reg; + /* 0: BG mode, 1: 16 Bank mode, 2: 8 bank mode */ + u32 lp5_bank_mode; + /* 0: clk:dqs = 1:2, 1: 1:4 */ + u32 lp5_ckr; + /* + * available mask, 1: available, 0: not available + * each bit represent a channel + */ + u32 ch_msk; +}; + +static void rk3128_dfi_start_hardware_counter(struct devfreq_event_dev *edev) +{ + struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + + regmap_write(info->regmap_grf, + RK3128_GRF_SOC_CON0, + RK3128_DDR_MONITOR_EN); +} + +static void rk3128_dfi_stop_hardware_counter(struct devfreq_event_dev *edev) +{ + struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + + regmap_write(info->regmap_grf, + RK3128_GRF_SOC_CON0, + RK3128_DDR_MONITOR_DISB); +} + +static int rk3128_dfi_disable(struct devfreq_event_dev *edev) +{ + rk3128_dfi_stop_hardware_counter(edev); + + return 0; +} + +static int rk3128_dfi_enable(struct devfreq_event_dev *edev) +{ + rk3128_dfi_start_hardware_counter(edev); + + return 0; +} + +static int rk3128_dfi_set_event(struct devfreq_event_dev *edev) +{ + return 0; +} + +static int rk3128_dfi_get_event(struct devfreq_event_dev *edev, + struct devfreq_event_data *edata) +{ + struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + unsigned long flags; + u32 dfi_wr, dfi_rd, dfi_timer; + + local_irq_save(flags); + + rk3128_dfi_stop_hardware_counter(edev); + + regmap_read(info->regmap_grf, RK3128_GRF_DFI_WRNUM, &dfi_wr); + regmap_read(info->regmap_grf, RK3128_GRF_DFI_RDNUM, &dfi_rd); + regmap_read(info->regmap_grf, RK3128_GRF_DFI_TIMERVAL, &dfi_timer); + + edata->load_count = (dfi_wr + dfi_rd) * 4; + edata->total_count = dfi_timer; + + rk3128_dfi_start_hardware_counter(edev); + + local_irq_restore(flags); + + return 0; +} + +static const struct devfreq_event_ops rk3128_dfi_ops = { + .disable = rk3128_dfi_disable, + .enable = rk3128_dfi_enable, + .get_event = rk3128_dfi_get_event, + .set_event = rk3128_dfi_set_event, +}; + +static void rk3288_dfi_start_hardware_counter(struct devfreq_event_dev *edev) +{ + struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + + regmap_write(info->regmap_grf, RK3288_GRF_SOC_CON4, RK3288_DFI_EN); +} + +static void rk3288_dfi_stop_hardware_counter(struct devfreq_event_dev *edev) +{ + struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + + regmap_write(info->regmap_grf, RK3288_GRF_SOC_CON4, RK3288_DFI_DIS); +} + +static int rk3288_dfi_disable(struct devfreq_event_dev *edev) +{ + rk3288_dfi_stop_hardware_counter(edev); + + return 0; +} + +static int rk3288_dfi_enable(struct devfreq_event_dev *edev) +{ + rk3288_dfi_start_hardware_counter(edev); + + return 0; +} + +static int rk3288_dfi_set_event(struct devfreq_event_dev *edev) +{ + return 0; +} + +static int rk3288_dfi_get_busier_ch(struct devfreq_event_dev *edev) +{ + struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + u32 tmp, max = 0; + u32 i, busier_ch = 0; + u32 rd_count, wr_count, total_count; + + rk3288_dfi_stop_hardware_counter(edev); + + /* Find out which channel is busier */ + for (i = 0; i < MAX_DMC_NUM_CH; i++) { + if (!(info->ch_msk & BIT(i))) + continue; + regmap_read(info->regmap_grf, + RK3288_GRF_SOC_STATUS(11 + i * 4), &wr_count); + regmap_read(info->regmap_grf, + RK3288_GRF_SOC_STATUS(12 + i * 4), &rd_count); + regmap_read(info->regmap_grf, + RK3288_GRF_SOC_STATUS(14 + i * 4), &total_count); + info->ch_usage[i].access = (wr_count + rd_count) * 4; + info->ch_usage[i].total = total_count; + tmp = info->ch_usage[i].access; + if (tmp > max) { + busier_ch = i; + max = tmp; + } + } + rk3288_dfi_start_hardware_counter(edev); + + return busier_ch; +} + +static int rk3288_dfi_get_event(struct devfreq_event_dev *edev, + struct devfreq_event_data *edata) +{ + struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + int busier_ch; + unsigned long flags; + + local_irq_save(flags); + busier_ch = rk3288_dfi_get_busier_ch(edev); + local_irq_restore(flags); + + edata->load_count = info->ch_usage[busier_ch].access; + edata->total_count = info->ch_usage[busier_ch].total; + + return 0; +} + +static const struct devfreq_event_ops rk3288_dfi_ops = { + .disable = rk3288_dfi_disable, + .enable = rk3288_dfi_enable, + .get_event = rk3288_dfi_get_event, + .set_event = rk3288_dfi_set_event, +}; + +static void rk3368_dfi_start_hardware_counter(struct devfreq_event_dev *edev) +{ + struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + + regmap_write(info->regmap_grf, RK3368_GRF_DDRC0_CON0, RK3368_DFI_EN); +} + +static void rk3368_dfi_stop_hardware_counter(struct devfreq_event_dev *edev) +{ + struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + + regmap_write(info->regmap_grf, RK3368_GRF_DDRC0_CON0, RK3368_DFI_DIS); +} + +static int rk3368_dfi_disable(struct devfreq_event_dev *edev) +{ + rk3368_dfi_stop_hardware_counter(edev); + + return 0; +} + +static int rk3368_dfi_enable(struct devfreq_event_dev *edev) +{ + rk3368_dfi_start_hardware_counter(edev); + + return 0; +} + +static int rk3368_dfi_set_event(struct devfreq_event_dev *edev) +{ + return 0; +} + +static int rk3368_dfi_get_event(struct devfreq_event_dev *edev, + struct devfreq_event_data *edata) +{ + struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + unsigned long flags; + u32 dfi0_wr, dfi0_rd, dfi1_wr, dfi1_rd, dfi_timer; + + local_irq_save(flags); + + rk3368_dfi_stop_hardware_counter(edev); + + regmap_read(info->regmap_grf, RK3368_GRF_SOC_STATUS5, &dfi0_wr); + regmap_read(info->regmap_grf, RK3368_GRF_SOC_STATUS6, &dfi0_rd); + regmap_read(info->regmap_grf, RK3368_GRF_SOC_STATUS9, &dfi1_wr); + regmap_read(info->regmap_grf, RK3368_GRF_SOC_STATUS10, &dfi1_rd); + regmap_read(info->regmap_grf, RK3368_GRF_SOC_STATUS8, &dfi_timer); + + edata->load_count = (dfi0_wr + dfi0_rd + dfi1_wr + dfi1_rd) * 2; + edata->total_count = dfi_timer; + + rk3368_dfi_start_hardware_counter(edev); + + local_irq_restore(flags); + + return 0; +} + +static const struct devfreq_event_ops rk3368_dfi_ops = { + .disable = rk3368_dfi_disable, + .enable = rk3368_dfi_enable, + .get_event = rk3368_dfi_get_event, + .set_event = rk3368_dfi_set_event, }; static void rockchip_dfi_start_hardware_counter(struct devfreq_event_dev *edev) { struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); void __iomem *dfi_regs = info->regs; - u32 val; - u32 ddr_type; + u32 mon_idx = 0, val_6 = 0; + u32 i; - /* get ddr type */ - regmap_read(info->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val); - ddr_type = (val >> RK3399_PMUGRF_DDRTYPE_SHIFT) & - RK3399_PMUGRF_DDRTYPE_MASK; + if (info->mon_idx) + mon_idx = info->mon_idx; - /* clear DDRMON_CTRL setting */ - writel_relaxed(CLR_DDRMON_CTRL, dfi_regs + DDRMON_CTRL); + if (info->dram_dynamic_info_reg) + regmap_read(info->regmap_pmugrf, info->dram_dynamic_info_reg, &val_6); - /* set ddr type to dfi */ - if (ddr_type == RK3399_PMUGRF_DDRTYPE_LPDDR3) - writel_relaxed(LPDDR3_EN, dfi_regs + DDRMON_CTRL); - else if (ddr_type == RK3399_PMUGRF_DDRTYPE_LPDDR4) - writel_relaxed(LPDDR4_EN, dfi_regs + DDRMON_CTRL); + if (info->dram_type == LPDDR5) { + info->lp5_bank_mode = READ_LP5_BANK_MODE(val_6); + info->lp5_ckr = READ_LP5_CKR(val_6); + } - /* enable count, use software mode */ - writel_relaxed(SOFTWARE_EN, dfi_regs + DDRMON_CTRL); + for (i = 0; i < MAX_DMC_NUM_CH; i++) { + if (!(info->ch_msk & BIT(i))) + continue; + /* clear DDRMON_CTRL setting */ + writel_relaxed(CLR_DDRMON_CTRL, dfi_regs + i * mon_idx + DDRMON_CTRL); + + /* set ddr type to dfi */ + if (info->dram_type == LPDDR3 || info->dram_type == LPDDR2) + writel_relaxed(LPDDR2_3_EN, dfi_regs + i * mon_idx + DDRMON_CTRL); + else if (info->dram_type == LPDDR4 || info->dram_type == LPDDR4X) + writel_relaxed(LPDDR4_EN, dfi_regs + i * mon_idx + DDRMON_CTRL); + else if (info->dram_type == DDR4) + writel_relaxed(DDR4_EN, dfi_regs + i * mon_idx + DDRMON_CTRL); + else if (info->dram_type == LPDDR5) + writel_relaxed(LPDDR5_EN | LPDDR5_BANK_MODE(info->lp5_bank_mode), + dfi_regs + i * mon_idx + DDRMON_CTRL); + + /* enable count, use software mode */ + writel_relaxed(SOFTWARE_EN, dfi_regs + i * mon_idx + DDRMON_CTRL); + } } static void rockchip_dfi_stop_hardware_counter(struct devfreq_event_dev *edev) { struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); void __iomem *dfi_regs = info->regs; + u32 mon_idx = 0, i; - writel_relaxed(SOFTWARE_DIS, dfi_regs + DDRMON_CTRL); + if (info->mon_idx) + mon_idx = info->mon_idx; + + for (i = 0; i < MAX_DMC_NUM_CH; i++) { + if (!(info->ch_msk & BIT(i))) + continue; + writel_relaxed(SOFTWARE_DIS, dfi_regs + i * mon_idx + DDRMON_CTRL); + } } static int rockchip_dfi_get_busier_ch(struct devfreq_event_dev *edev) @@ -96,16 +414,35 @@ static int rockchip_dfi_get_busier_ch(struct devfreq_event_dev *edev) u32 tmp, max = 0; u32 i, busier_ch = 0; void __iomem *dfi_regs = info->regs; + u32 mon_idx = 0x20, count_rate = 1; rockchip_dfi_stop_hardware_counter(edev); + if (info->mon_idx) + mon_idx = info->mon_idx; + if (info->count_rate) + count_rate = info->count_rate; + /* Find out which channel is busier */ - for (i = 0; i < RK3399_DMC_NUM_CH; i++) { - info->ch_usage[i].access = readl_relaxed(dfi_regs + - DDRMON_CH0_DFI_ACCESS_NUM + i * 20) * 4; + for (i = 0; i < MAX_DMC_NUM_CH; i++) { + if (!(info->ch_msk & BIT(i))) + continue; + + /* rk3588 counter is dfi clk rate */ info->ch_usage[i].total = readl_relaxed(dfi_regs + - DDRMON_CH0_COUNT_NUM + i * 20); - tmp = info->ch_usage[i].access; + DDRMON_CH0_COUNT_NUM + i * mon_idx) * count_rate; + + /* LPDDR5 LPDDR4 and LPDDR4X BL = 16,other DDR type BL = 8 */ + tmp = readl_relaxed(dfi_regs + + DDRMON_CH0_DFI_ACCESS_NUM + i * mon_idx); + if (info->dram_type == LPDDR4 || info->dram_type == LPDDR4X) + tmp *= 8; + else if (info->dram_type == LPDDR5) + tmp *= 16 / (4 << info->lp5_ckr); + else + tmp *= 4; + info->ch_usage[i].access = tmp; + if (tmp > max) { busier_ch = i; max = tmp; @@ -121,7 +458,8 @@ static int rockchip_dfi_disable(struct devfreq_event_dev *edev) struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); rockchip_dfi_stop_hardware_counter(edev); - clk_disable_unprepare(info->clk); + if (info->clk) + clk_disable_unprepare(info->clk); return 0; } @@ -131,10 +469,13 @@ static int rockchip_dfi_enable(struct devfreq_event_dev *edev) struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); int ret; - ret = clk_prepare_enable(info->clk); - if (ret) { - dev_err(&edev->dev, "failed to enable dfi clk: %d\n", ret); - return ret; + if (info->clk) { + ret = clk_prepare_enable(info->clk); + if (ret) { + dev_err(&edev->dev, "failed to enable dfi clk: %d\n", + ret); + return ret; + } } rockchip_dfi_start_hardware_counter(edev); @@ -151,8 +492,11 @@ static int rockchip_dfi_get_event(struct devfreq_event_dev *edev, { struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); int busier_ch; + unsigned long flags; + local_irq_save(flags); busier_ch = rockchip_dfi_get_busier_ch(edev); + local_irq_restore(flags); edata->load_count = info->ch_usage[busier_ch].access; edata->total_count = info->ch_usage[busier_ch].total; @@ -167,22 +511,156 @@ static const struct devfreq_event_ops rockchip_dfi_ops = { .set_event = rockchip_dfi_set_event, }; -static const struct of_device_id rockchip_dfi_id_match[] = { - { .compatible = "rockchip,rk3399-dfi" }, - { }, -}; -MODULE_DEVICE_TABLE(of, rockchip_dfi_id_match); +static __init int rk3588_dfi_init(struct platform_device *pdev, + struct rockchip_dfi *data, + struct devfreq_event_desc *desc) +{ + struct device_node *np = pdev->dev.of_node; + struct resource *res; + u32 val_2, val_3, val_4; -static int rockchip_dfi_probe(struct platform_device *pdev) + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + data->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(data->regs)) + return PTR_ERR(data->regs); + + data->regmap_pmugrf = syscon_regmap_lookup_by_phandle(np, "rockchip,pmu_grf"); + if (IS_ERR(data->regmap_pmugrf)) + return PTR_ERR(data->regmap_pmugrf); + + regmap_read(data->regmap_pmugrf, RK3588_PMUGRF_OS_REG(2), &val_2); + regmap_read(data->regmap_pmugrf, RK3588_PMUGRF_OS_REG(3), &val_3); + regmap_read(data->regmap_pmugrf, RK3588_PMUGRF_OS_REG(4), &val_4); + if (READ_SYSREG_VERSION(val_3) >= 0x3) + data->dram_type = READ_DRAMTYPE_INFO_V3(val_2, val_3); + else + data->dram_type = READ_DRAMTYPE_INFO(val_2); + + data->mon_idx = 0x4000; + data->count_rate = 2; + data->dram_dynamic_info_reg = RK3588_PMUGRF_OS_REG(6); + data->ch_msk = READ_CH_INFO(val_2) | READ_CH_INFO(val_4) << 2; + data->clk = NULL; + + desc->ops = &rockchip_dfi_ops; + + return 0; +} + +static __init int px30_dfi_init(struct platform_device *pdev, + struct rockchip_dfi *data, + struct devfreq_event_desc *desc) { - struct device *dev = &pdev->dev; - struct rockchip_dfi *data; - struct devfreq_event_desc *desc; struct device_node *np = pdev->dev.of_node, *node; + struct resource *res; + u32 val_2, val_3; - data = devm_kzalloc(dev, sizeof(struct rockchip_dfi), GFP_KERNEL); - if (!data) - return -ENOMEM; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + data->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(data->regs)) + return PTR_ERR(data->regs); + + node = of_parse_phandle(np, "rockchip,pmugrf", 0); + if (node) { + data->regmap_pmugrf = syscon_node_to_regmap(node); + if (IS_ERR(data->regmap_pmugrf)) + return PTR_ERR(data->regmap_pmugrf); + } + + regmap_read(data->regmap_pmugrf, PX30_PMUGRF_OS_REG2, &val_2); + regmap_read(data->regmap_pmugrf, PX30_PMUGRF_OS_REG3, &val_3); + if (READ_SYSREG_VERSION(val_3) >= 0x3) + data->dram_type = READ_DRAMTYPE_INFO_V3(val_2, val_3); + else + data->dram_type = READ_DRAMTYPE_INFO(val_2); + data->ch_msk = 1; + data->clk = NULL; + + desc->ops = &rockchip_dfi_ops; + + return 0; +} + +static __init int rk3128_dfi_init(struct platform_device *pdev, + struct rockchip_dfi *data, + struct devfreq_event_desc *desc) +{ + struct device_node *np = pdev->dev.of_node, *node; + + node = of_parse_phandle(np, "rockchip,grf", 0); + if (node) { + data->regmap_grf = syscon_node_to_regmap(node); + if (IS_ERR(data->regmap_grf)) + return PTR_ERR(data->regmap_grf); + } + + desc->ops = &rk3128_dfi_ops; + + return 0; +} + +static __init int rk3288_dfi_init(struct platform_device *pdev, + struct rockchip_dfi *data, + struct devfreq_event_desc *desc) +{ + struct device_node *np = pdev->dev.of_node, *node; + u32 val; + + node = of_parse_phandle(np, "rockchip,pmu", 0); + if (node) { + data->regmap_pmu = syscon_node_to_regmap(node); + if (IS_ERR(data->regmap_pmu)) + return PTR_ERR(data->regmap_pmu); + } + + node = of_parse_phandle(np, "rockchip,grf", 0); + if (node) { + data->regmap_grf = syscon_node_to_regmap(node); + if (IS_ERR(data->regmap_grf)) + return PTR_ERR(data->regmap_grf); + } + + regmap_read(data->regmap_pmu, RK3288_PMU_SYS_REG2, &val); + data->dram_type = READ_DRAMTYPE_INFO(val); + data->ch_msk = READ_CH_INFO(val); + + if (data->dram_type == DDR3) + regmap_write(data->regmap_grf, RK3288_GRF_SOC_CON4, + RK3288_DDR3_SEL); + else + regmap_write(data->regmap_grf, RK3288_GRF_SOC_CON4, + RK3288_LPDDR_SEL); + + desc->ops = &rk3288_dfi_ops; + + return 0; +} + +static __init int rk3368_dfi_init(struct platform_device *pdev, + struct rockchip_dfi *data, + struct devfreq_event_desc *desc) +{ + struct device *dev = &pdev->dev; + + if (!dev->parent || !dev->parent->of_node) + return -EINVAL; + + data->regmap_grf = syscon_node_to_regmap(dev->parent->of_node); + if (IS_ERR(data->regmap_grf)) + return PTR_ERR(data->regmap_grf); + + desc->ops = &rk3368_dfi_ops; + + return 0; +} + +static __init int rockchip_dfi_init(struct platform_device *pdev, + struct rockchip_dfi *data, + struct devfreq_event_desc *desc) +{ + struct device *dev = &pdev->dev; + struct device_node *np = pdev->dev.of_node, *node; + u32 val; data->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(data->regs)) @@ -202,23 +680,101 @@ static int rockchip_dfi_probe(struct platform_device *pdev) if (IS_ERR(data->regmap_pmu)) return PTR_ERR(data->regmap_pmu); } - data->dev = dev; + + regmap_read(data->regmap_pmu, PMUGRF_OS_REG2, &val); + data->dram_type = READ_DRAMTYPE_INFO(val); + data->ch_msk = READ_CH_INFO(val); + + desc->ops = &rockchip_dfi_ops; + + return 0; +} + +static __init int rk3328_dfi_init(struct platform_device *pdev, + struct rockchip_dfi *data, + struct devfreq_event_desc *desc) +{ + struct device_node *np = pdev->dev.of_node, *node; + struct resource *res; + u32 val; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + data->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(data->regs)) + return PTR_ERR(data->regs); + + node = of_parse_phandle(np, "rockchip,grf", 0); + if (node) { + data->regmap_grf = syscon_node_to_regmap(node); + if (IS_ERR(data->regmap_grf)) + return PTR_ERR(data->regmap_grf); + } + + regmap_read(data->regmap_grf, RK3328_GRF_OS_REG2, &val); + data->dram_type = READ_DRAMTYPE_INFO(val); + data->ch_msk = 1; + data->clk = NULL; + + desc->ops = &rockchip_dfi_ops; + + return 0; +} + +static const struct of_device_id rockchip_dfi_id_match[] = { + { .compatible = "rockchip,px30-dfi", .data = px30_dfi_init }, + { .compatible = "rockchip,rk1808-dfi", .data = px30_dfi_init }, + { .compatible = "rockchip,rk3128-dfi", .data = rk3128_dfi_init }, + { .compatible = "rockchip,rk3288-dfi", .data = rk3288_dfi_init }, + { .compatible = "rockchip,rk3328-dfi", .data = rk3328_dfi_init }, + { .compatible = "rockchip,rk3368-dfi", .data = rk3368_dfi_init }, + { .compatible = "rockchip,rk3399-dfi", .data = rockchip_dfi_init }, + { .compatible = "rockchip,rk3568-dfi", .data = px30_dfi_init }, + { .compatible = "rockchip,rk3588-dfi", .data = rk3588_dfi_init }, + { .compatible = "rockchip,rv1126-dfi", .data = px30_dfi_init }, + { }, +}; + +static int rockchip_dfi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rockchip_dfi *data; + struct devfreq_event_desc *desc; + struct device_node *np = pdev->dev.of_node; + const struct of_device_id *match; + int (*init)(struct platform_device *pdev, struct rockchip_dfi *data, + struct devfreq_event_desc *desc); + + data = devm_kzalloc(dev, sizeof(struct rockchip_dfi), GFP_KERNEL); + if (!data) + return -ENOMEM; desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); if (!desc) return -ENOMEM; - desc->ops = &rockchip_dfi_ops; + match = of_match_node(rockchip_dfi_id_match, pdev->dev.of_node); + if (match) { + init = match->data; + if (init) { + if (init(pdev, data, desc)) + return -EINVAL; + } else { + return 0; + } + } else { + return 0; + } + desc->driver_data = data; desc->name = np->name; - data->desc = desc; - data->edev = devm_devfreq_event_add_edev(&pdev->dev, desc); + data->edev = devm_devfreq_event_add_edev(dev, desc); if (IS_ERR(data->edev)) { - dev_err(&pdev->dev, - "failed to add devfreq-event device\n"); + dev_err(dev, "failed to add devfreq-event device\n"); return PTR_ERR(data->edev); } + data->desc = desc; + data->dev = &pdev->dev; platform_set_drvdata(pdev, data); diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index 594b77d89..2d55b177a 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -18,10 +18,9 @@ config SYNC_FILE Documentation/driver-api/sync_file.rst. config SW_SYNC - bool "Sync File Validation Framework" + tristate "Sync File Validation Framework" default n depends on SYNC_FILE - depends on DEBUG_FS help A sync object driver that uses a 32bit counter to coordinate synchronization. Useful when there is no hardware primitive backing @@ -42,7 +41,6 @@ config UDMABUF config DMABUF_MOVE_NOTIFY bool "Move notify between drivers (EXPERIMENTAL)" default n - depends on DMA_SHARED_BUFFER help Don't pin buffers if the dynamic DMA-buf interface is available on both the exporter as well as the importer. This fixes a security @@ -56,19 +54,6 @@ config DMABUF_SELFTESTS default n depends on DMA_SHARED_BUFFER -config DMABUF_PROCESS_INFO - bool "Show dmabuf usage of all processes" - default n - depends on DMA_SHARED_BUFFER - depends on PROC_FS || DEBUG_FS - help - Choose this option to show dmabuf objects usage of all processes. - Firstly, with this option, when a process creates a dmabuf object, - its pid and task_comm will be recorded in the dmabuf. - Secondly, this option creates dma_buf/process_bufinfo file in - debugfs (if DEBUG_FS enabled) and process_dmabuf_info file in procfs - (if PROC_FS enabled) to show dmabuf objects usage of all processes. - menuconfig DMABUF_HEAPS bool "DMA-BUF Userland Memory Heaps" select DMA_SHARED_BUFFER @@ -80,7 +65,7 @@ menuconfig DMABUF_HEAPS menuconfig DMABUF_SYSFS_STATS bool "DMA-BUF sysfs statistics" - depends on DMA_SHARED_BUFFER + select DMA_SHARED_BUFFER help Choose this option to enable DMA-BUF sysfs statistics in location /sys/kernel/dmabuf/buffers. diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index cfbc5e3da..32757328b 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -4,7 +4,8 @@ obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \ obj-$(CONFIG_DMABUF_HEAPS) += dma-heap.o obj-$(CONFIG_DMABUF_HEAPS) += heaps/ obj-$(CONFIG_SYNC_FILE) += sync_file.o -obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o +obj-$(CONFIG_SW_SYNC) += sw_sync.o +obj-$(CONFIG_SW_SYNC_DEBUG) += sync_debug.o obj-$(CONFIG_UDMABUF) += udmabuf.o obj-$(CONFIG_DMABUF_SYSFS_STATS) += dma-buf-sysfs-stats.o @@ -14,5 +15,3 @@ dmabuf_selftests-y := \ st-dma-fence-chain.o obj-$(CONFIG_DMABUF_SELFTESTS) += dmabuf_selftests.o - -obj-$(CONFIG_DMABUF_PROCESS_INFO) += dma-buf-process-info.o diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.c b/drivers/dma-buf/dma-buf-sysfs-stats.c index 053baadca..2389a363b 100644 --- a/drivers/dma-buf/dma-buf-sysfs-stats.c +++ b/drivers/dma-buf/dma-buf-sysfs-stats.c @@ -16,40 +16,6 @@ #define to_dma_buf_entry_from_kobj(x) container_of(x, struct dma_buf_sysfs_entry, kobj) -/** - * DOC: overview - * - * ``/sys/kernel/debug/dma_buf/bufinfo`` provides an overview of every DMA-BUF - * in the system. However, since debugfs is not safe to be mounted in - * production, procfs and sysfs can be used to gather DMA-BUF statistics on - * production systems. - * - * The ``/proc//fdinfo/`` files in procfs can be used to gather - * information about DMA-BUF fds. Detailed documentation about the interface - * is present in Documentation/filesystems/proc.rst. - * - * Unfortunately, the existing procfs interfaces can only provide information - * about the DMA-BUFs for which processes hold fds or have the buffers mmapped - * into their address space. This necessitated the creation of the DMA-BUF sysfs - * statistics interface to provide per-buffer information on production systems. - * - * The interface at ``/sys/kernel/dma-buf/buffers`` exposes information about - * every DMA-BUF when ``CONFIG_DMABUF_SYSFS_STATS`` is enabled. - * - * The following stats are exposed by the interface: - * - * * ``/sys/kernel/dmabuf/buffers//exporter_name`` - * * ``/sys/kernel/dmabuf/buffers//size`` - * - * The information in the interface can also be used to derive per-exporter - * statistics. The data from the interface can be gathered on error conditions - * or other important events to provide a snapshot of DMA-BUF usage. - * It can also be collected periodically by telemetry to monitor various metrics. - * - * Detailed documentation about the interface is present in - * Documentation/ABI/testing/sysfs-kernel-dmabuf-buffers. - */ - struct dma_buf_stats_attribute { struct attribute attr; ssize_t (*show)(struct dma_buf *dmabuf, @@ -130,8 +96,9 @@ void dma_buf_stats_teardown(struct dma_buf *dmabuf) kobject_put(&sysfs_entry->kobj); } - -/* Statistics files do not need to send uevents. */ +/* + * Statistics files do not need to send uevents. + */ static int dmabuf_sysfs_uevent_filter(struct kset *kset, struct kobject *kobj) { return 0; diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 89c10136b..068e0aeeb 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -30,9 +30,6 @@ #include #include "dma-buf-sysfs-stats.h" -#include "dma-buf-process-info.h" - -static inline int is_dma_buf_file(struct file *); struct dma_buf_list { struct list_head head; @@ -41,6 +38,30 @@ struct dma_buf_list { static struct dma_buf_list db_list; +/* + * This function helps in traversing the db_list and calls the + * callback function which can extract required info out of each + * dmabuf. + */ +int get_each_dmabuf(int (*callback)(const struct dma_buf *dmabuf, + void *private), void *private) +{ + struct dma_buf *buf; + int ret = mutex_lock_interruptible(&db_list.lock); + + if (ret) + return ret; + + list_for_each_entry(buf, &db_list.head, list_node) { + ret = callback(buf, private); + if (ret) + break; + } + mutex_unlock(&db_list.lock); + return ret; +} +EXPORT_SYMBOL_GPL(get_each_dmabuf); + static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) { struct dma_buf *dmabuf; @@ -60,6 +81,9 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) static void dma_buf_release(struct dentry *dentry) { struct dma_buf *dmabuf; +#ifdef CONFIG_NO_GKI + int dtor_ret = 0; +#endif dmabuf = dentry->d_fsdata; if (unlikely(!dmabuf)) @@ -77,13 +101,19 @@ static void dma_buf_release(struct dentry *dentry) */ BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active); - dmabuf->ops->release(dmabuf); + dma_buf_stats_teardown(dmabuf); +#ifdef CONFIG_NO_GKI + if (dmabuf->dtor) + dtor_ret = dmabuf->dtor(dmabuf, dmabuf->dtor_data); + + if (!dtor_ret) +#endif + dmabuf->ops->release(dmabuf); if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) dma_resv_fini(dmabuf->resv); - WARN_ON(!list_empty(&dmabuf->attachments)); - dma_buf_stats_teardown(dmabuf); + WARN_ON(!list_empty(&dmabuf->attachments)); module_put(dmabuf->owner); kfree(dmabuf->name); kfree(dmabuf); @@ -328,6 +358,25 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) return events; } +static long _dma_buf_set_name(struct dma_buf *dmabuf, const char *name) +{ + long ret = 0; + + dma_resv_lock(dmabuf->resv, NULL); + if (!list_empty(&dmabuf->attachments)) { + ret = -EBUSY; + goto out_unlock; + } + spin_lock(&dmabuf->name_lock); + kfree(dmabuf->name); + dmabuf->name = name; + spin_unlock(&dmabuf->name_lock); + +out_unlock: + dma_resv_unlock(dmabuf->resv); + return ret; +} + /** * dma_buf_set_name - Set a name to a specific dma_buf to track the usage. * The name of the dma-buf buffer can only be set when the dma-buf is not @@ -343,7 +392,23 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) * devices, return -EBUSY. * */ -static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf) +long dma_buf_set_name(struct dma_buf *dmabuf, const char *name) +{ + long ret = 0; + char *buf = kstrndup(name, DMA_BUF_NAME_LEN, GFP_KERNEL); + + if (!buf) + return -ENOMEM; + + ret = _dma_buf_set_name(dmabuf, buf); + if (ret) + kfree(buf); + + return ret; +} +EXPORT_SYMBOL_GPL(dma_buf_set_name); + +static long dma_buf_set_name_user(struct dma_buf *dmabuf, const char __user *buf) { char *name = strndup_user(buf, DMA_BUF_NAME_LEN); long ret = 0; @@ -351,19 +416,10 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf) if (IS_ERR(name)) return PTR_ERR(name); - dma_resv_lock(dmabuf->resv, NULL); - if (!list_empty(&dmabuf->attachments)) { - ret = -EBUSY; + ret = _dma_buf_set_name(dmabuf, name); + if (ret) kfree(name); - goto out_unlock; - } - spin_lock(&dmabuf->name_lock); - kfree(dmabuf->name); - dmabuf->name = name; - spin_unlock(&dmabuf->name_lock); -out_unlock: - dma_resv_unlock(dmabuf->resv); return ret; } @@ -372,6 +428,7 @@ static long dma_buf_ioctl(struct file *file, { struct dma_buf *dmabuf; struct dma_buf_sync sync; + struct dma_buf_sync_partial sync_p; enum dma_data_direction direction; int ret; @@ -408,7 +465,45 @@ static long dma_buf_ioctl(struct file *file, case DMA_BUF_SET_NAME_A: case DMA_BUF_SET_NAME_B: - return dma_buf_set_name(dmabuf, (const char __user *)arg); + return dma_buf_set_name_user(dmabuf, (const char __user *)arg); + + case DMA_BUF_IOCTL_SYNC_PARTIAL: + if (copy_from_user(&sync_p, (void __user *) arg, sizeof(sync_p))) + return -EFAULT; + + if (sync_p.len == 0) + return 0; + + if (sync_p.len > dmabuf->size || sync_p.offset > dmabuf->size - sync_p.len) + return -EINVAL; + + if (sync_p.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK) + return -EINVAL; + + switch (sync_p.flags & DMA_BUF_SYNC_RW) { + case DMA_BUF_SYNC_READ: + direction = DMA_FROM_DEVICE; + break; + case DMA_BUF_SYNC_WRITE: + direction = DMA_TO_DEVICE; + break; + case DMA_BUF_SYNC_RW: + direction = DMA_BIDIRECTIONAL; + break; + default: + return -EINVAL; + } + + if (sync_p.flags & DMA_BUF_SYNC_END) + ret = dma_buf_end_cpu_access_partial(dmabuf, direction, + sync_p.offset, + sync_p.len); + else + ret = dma_buf_begin_cpu_access_partial(dmabuf, direction, + sync_p.offset, + sync_p.len); + + return ret; default: return -ENOTTY; @@ -442,10 +537,11 @@ static const struct file_operations dma_buf_fops = { /* * is_dma_buf_file - Check if struct file* is associated with dma_buf */ -static inline int is_dma_buf_file(struct file *file) +int is_dma_buf_file(struct file *file) { return file->f_op == &dma_buf_fops; } +EXPORT_SYMBOL_GPL(is_dma_buf_file); static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags) { @@ -595,7 +691,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) list_add(&dmabuf->list_node, &db_list.head); mutex_unlock(&db_list.lock); - init_dma_buf_task_info(dmabuf); return dmabuf; err_sysfs: @@ -1132,6 +1227,30 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, } EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); +int dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf, + enum dma_data_direction direction, + unsigned int offset, unsigned int len) +{ + int ret = 0; + + if (WARN_ON(!dmabuf)) + return -EINVAL; + + if (dmabuf->ops->begin_cpu_access_partial) + ret = dmabuf->ops->begin_cpu_access_partial(dmabuf, direction, + offset, len); + + /* Ensure that all fences are waited upon - but we first allow + * the native handler the chance to do so more efficiently if it + * chooses. A double invocation here will be reasonably cheap no-op. + */ + if (ret == 0) + ret = __dma_buf_begin_cpu_access(dmabuf, direction); + + return ret; +} +EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access_partial); + /** * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific @@ -1158,6 +1277,21 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf, } EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); +int dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf, + enum dma_data_direction direction, + unsigned int offset, unsigned int len) +{ + int ret = 0; + + WARN_ON(!dmabuf); + + if (dmabuf->ops->end_cpu_access_partial) + ret = dmabuf->ops->end_cpu_access_partial(dmabuf, direction, + offset, len); + + return ret; +} +EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access_partial); /** * dma_buf_mmap - Setup up a userspace mmap with the given vma @@ -1286,6 +1420,32 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) } EXPORT_SYMBOL_GPL(dma_buf_vunmap); +int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags) +{ + int ret = 0; + + if (WARN_ON(!dmabuf) || !flags) + return -EINVAL; + + if (dmabuf->ops->get_flags) + ret = dmabuf->ops->get_flags(dmabuf, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(dma_buf_get_flags); + +int dma_buf_get_uuid(struct dma_buf *dmabuf, uuid_t *uuid) +{ + if (WARN_ON(!dmabuf) || !uuid) + return -EINVAL; + + if (!dmabuf->ops->get_uuid) + return -ENODEV; + + return dmabuf->ops->get_uuid(dmabuf, uuid); +} +EXPORT_SYMBOL_GPL(dma_buf_get_uuid); + #ifdef CONFIG_DEBUG_FS static int dma_buf_debug_show(struct seq_file *s, void *unused) { @@ -1305,10 +1465,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) return ret; seq_puts(s, "\nDma-buf Objects:\n"); - seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\t" - "%-16s\t%-16s\t%-16s\n", - "size", "flags", "mode", "count", "ino", - "buf_name", "exp_pid", "exp_task_comm"); + seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n", + "size", "flags", "mode", "count", "ino"); list_for_each_entry(buf_obj, &db_list.head, list_node) { @@ -1316,16 +1474,15 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) if (ret) goto error_unlock; - seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\t" - "%-16d\t%-16s\n", + spin_lock(&buf_obj->name_lock); + seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n", buf_obj->size, buf_obj->file->f_flags, buf_obj->file->f_mode, file_count(buf_obj->file), buf_obj->exp_name, file_inode(buf_obj->file)->i_ino, - buf_obj->name ?: "NULL", - dma_buf_exp_pid(buf_obj), - dma_buf_exp_task_comm(buf_obj) ?: "NULL"); + buf_obj->name ?: ""); + spin_unlock(&buf_obj->name_lock); robj = buf_obj->resv; while (true) { @@ -1406,7 +1563,6 @@ static int dma_buf_init_debugfs(void) err = PTR_ERR(d); } - dma_buf_process_info_init_debugfs(dma_buf_debugfs_dir); return err; } @@ -1424,19 +1580,6 @@ static inline void dma_buf_uninit_debugfs(void) } #endif -#ifdef CONFIG_DMABUF_PROCESS_INFO -struct dma_buf *get_dma_buf_from_file(struct file *f) -{ - if (IS_ERR_OR_NULL(f)) - return NULL; - - if (!is_dma_buf_file(f)) - return NULL; - - return f->private_data; -} -#endif /* CONFIG_DMABUF_PROCESS_INFO */ - static int __init dma_buf_init(void) { int ret; @@ -1452,7 +1595,6 @@ static int __init dma_buf_init(void) mutex_init(&db_list.lock); INIT_LIST_HEAD(&db_list.head); dma_buf_init_debugfs(); - dma_buf_process_info_init_procfs(); return 0; } subsys_initcall(dma_buf_init); @@ -1462,6 +1604,5 @@ static void __exit dma_buf_deinit(void) dma_buf_uninit_debugfs(); kern_unmount(dma_buf_mnt); dma_buf_uninit_sysfs_statistics(); - dma_buf_process_info_uninit_procfs(); } __exitcall(dma_buf_deinit); diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 7475e09b0..d64fc0392 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -312,22 +312,25 @@ void __dma_fence_might_wait(void) /** - * dma_fence_signal_locked - signal completion of a fence + * dma_fence_signal_timestamp_locked - signal completion of a fence * @fence: the fence to signal + * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain * * Signal completion for software callbacks on a fence, this will unblock * dma_fence_wait() calls and run all the callbacks added with * dma_fence_add_callback(). Can be called multiple times, but since a fence * can only go from the unsignaled to the signaled state and not back, it will - * only be effective the first time. + * only be effective the first time. Set the timestamp provided as the fence + * signal timestamp. * - * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock - * held. + * Unlike dma_fence_signal_timestamp(), this function must be called with + * &dma_fence.lock held. * * Returns 0 on success and a negative error value when @fence has been * signalled already. */ -int dma_fence_signal_locked(struct dma_fence *fence) +int dma_fence_signal_timestamp_locked(struct dma_fence *fence, + ktime_t timestamp) { struct dma_fence_cb *cur, *tmp; struct list_head cb_list; @@ -341,7 +344,7 @@ int dma_fence_signal_locked(struct dma_fence *fence) /* Stash the cb_list before replacing it with the timestamp */ list_replace(&fence->cb_list, &cb_list); - fence->timestamp = ktime_get(); + fence->timestamp = timestamp; set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); trace_dma_fence_signaled(fence); @@ -352,6 +355,59 @@ int dma_fence_signal_locked(struct dma_fence *fence) return 0; } +EXPORT_SYMBOL(dma_fence_signal_timestamp_locked); + +/** + * dma_fence_signal_timestamp - signal completion of a fence + * @fence: the fence to signal + * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain + * + * Signal completion for software callbacks on a fence, this will unblock + * dma_fence_wait() calls and run all the callbacks added with + * dma_fence_add_callback(). Can be called multiple times, but since a fence + * can only go from the unsignaled to the signaled state and not back, it will + * only be effective the first time. Set the timestamp provided as the fence + * signal timestamp. + * + * Returns 0 on success and a negative error value when @fence has been + * signalled already. + */ +int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp) +{ + unsigned long flags; + int ret; + + if (!fence) + return -EINVAL; + + spin_lock_irqsave(fence->lock, flags); + ret = dma_fence_signal_timestamp_locked(fence, timestamp); + spin_unlock_irqrestore(fence->lock, flags); + + return ret; +} +EXPORT_SYMBOL(dma_fence_signal_timestamp); + +/** + * dma_fence_signal_locked - signal completion of a fence + * @fence: the fence to signal + * + * Signal completion for software callbacks on a fence, this will unblock + * dma_fence_wait() calls and run all the callbacks added with + * dma_fence_add_callback(). Can be called multiple times, but since a fence + * can only go from the unsignaled to the signaled state and not back, it will + * only be effective the first time. + * + * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock + * held. + * + * Returns 0 on success and a negative error value when @fence has been + * signalled already. + */ +int dma_fence_signal_locked(struct dma_fence *fence) +{ + return dma_fence_signal_timestamp_locked(fence, ktime_get()); +} EXPORT_SYMBOL(dma_fence_signal_locked); /** @@ -379,7 +435,7 @@ int dma_fence_signal(struct dma_fence *fence) tmp = dma_fence_begin_signalling(); spin_lock_irqsave(fence->lock, flags); - ret = dma_fence_signal_locked(fence); + ret = dma_fence_signal_timestamp_locked(fence, ktime_get()); spin_unlock_irqrestore(fence->lock, flags); dma_fence_end_signalling(tmp); diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c index bbbfa28b2..4fb22001b 100644 --- a/drivers/dma-buf/dma-heap.c +++ b/drivers/dma-buf/dma-heap.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -31,6 +30,7 @@ * @heap_devt heap device node * @list list head connecting to list of heaps * @heap_cdev heap char device + * @heap_dev heap device struct * * Represents a heap of memory from which buffers can be made. */ @@ -41,6 +41,8 @@ struct dma_heap { dev_t heap_devt; struct list_head list; struct cdev heap_cdev; + struct kref refcount; + struct device *heap_dev; }; static LIST_HEAD(heap_list); @@ -49,20 +51,72 @@ static dev_t dma_heap_devt; static struct class *dma_heap_class; static DEFINE_XARRAY_ALLOC(dma_heap_minors); -static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len, - unsigned int fd_flags, - unsigned int heap_flags) +struct dma_heap *dma_heap_find(const char *name) { + struct dma_heap *h; + + mutex_lock(&heap_list_lock); + list_for_each_entry(h, &heap_list, list) { + if (!strcmp(h->name, name)) { + kref_get(&h->refcount); + mutex_unlock(&heap_list_lock); + return h; + } + } + mutex_unlock(&heap_list_lock); + return NULL; +} +EXPORT_SYMBOL_GPL(dma_heap_find); + + +void dma_heap_buffer_free(struct dma_buf *dmabuf) +{ + dma_buf_put(dmabuf); +} +EXPORT_SYMBOL_GPL(dma_heap_buffer_free); + +struct dma_buf *dma_heap_buffer_alloc(struct dma_heap *heap, size_t len, + unsigned int fd_flags, + unsigned int heap_flags) +{ + if (fd_flags & ~DMA_HEAP_VALID_FD_FLAGS) + return ERR_PTR(-EINVAL); + + if (heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS) + return ERR_PTR(-EINVAL); /* * Allocations from all heaps have to begin * and end on page boundaries. */ len = PAGE_ALIGN(len); if (!len) - return -EINVAL; + return ERR_PTR(-EINVAL); return heap->ops->allocate(heap, len, fd_flags, heap_flags); } +EXPORT_SYMBOL_GPL(dma_heap_buffer_alloc); + +int dma_heap_bufferfd_alloc(struct dma_heap *heap, size_t len, + unsigned int fd_flags, + unsigned int heap_flags) +{ + struct dma_buf *dmabuf; + int fd; + + dmabuf = dma_heap_buffer_alloc(heap, len, fd_flags, heap_flags); + + if (IS_ERR(dmabuf)) + return PTR_ERR(dmabuf); + + fd = dma_buf_fd(dmabuf, fd_flags); + if (fd < 0) { + dma_buf_put(dmabuf); + /* just return, as put will call release and that will free */ + } + return fd; + +} +EXPORT_SYMBOL_GPL(dma_heap_bufferfd_alloc); static int dma_heap_open(struct inode *inode, struct file *file) { @@ -90,15 +144,9 @@ static long dma_heap_ioctl_allocate(struct file *file, void *data) if (heap_allocation->fd) return -EINVAL; - if (heap_allocation->fd_flags & ~DMA_HEAP_VALID_FD_FLAGS) - return -EINVAL; - - if (heap_allocation->heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS) - return -EINVAL; - - fd = dma_heap_buffer_alloc(heap, heap_allocation->len, - heap_allocation->fd_flags, - heap_allocation->heap_flags); + fd = dma_heap_bufferfd_alloc(heap, heap_allocation->len, + heap_allocation->fd_flags, + heap_allocation->heap_flags); if (fd < 0) return fd; @@ -124,7 +172,6 @@ static long dma_heap_ioctl(struct file *file, unsigned int ucmd, if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds)) return -EINVAL; - nr = array_index_nospec(nr, ARRAY_SIZE(dma_heap_ioctl_cmds)); /* Get the kernel ioctl cmd that matches */ kcmd = dma_heap_ioctl_cmds[nr]; @@ -191,6 +238,47 @@ void *dma_heap_get_drvdata(struct dma_heap *heap) { return heap->priv; } +EXPORT_SYMBOL_GPL(dma_heap_get_drvdata); + +static void dma_heap_release(struct kref *ref) +{ + struct dma_heap *heap = container_of(ref, struct dma_heap, refcount); + int minor = MINOR(heap->heap_devt); + + /* Note, we already holding the heap_list_lock here */ + list_del(&heap->list); + + device_destroy(dma_heap_class, heap->heap_devt); + cdev_del(&heap->heap_cdev); + xa_erase(&dma_heap_minors, minor); + + kfree(heap); +} + +void dma_heap_put(struct dma_heap *h) +{ + /* + * Take the heap_list_lock now to avoid racing with code + * scanning the list and then taking a kref. + */ + mutex_lock(&heap_list_lock); + kref_put(&h->refcount, dma_heap_release); + mutex_unlock(&heap_list_lock); +} +EXPORT_SYMBOL_GPL(dma_heap_put); + +/** + * dma_heap_get_dev() - get device struct for the heap + * @heap: DMA-Heap to retrieve device struct from + * + * Returns: + * The device struct for the heap. + */ +struct device *dma_heap_get_dev(struct dma_heap *heap) +{ + return heap->heap_dev; +} +EXPORT_SYMBOL_GPL(dma_heap_get_dev); /** * dma_heap_get_name() - get heap name @@ -203,11 +291,11 @@ const char *dma_heap_get_name(struct dma_heap *heap) { return heap->name; } +EXPORT_SYMBOL_GPL(dma_heap_get_name); struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info) { - struct dma_heap *heap, *h, *err_ret; - struct device *dev_ret; + struct dma_heap *heap, *err_ret; unsigned int minor; int ret; @@ -221,10 +309,20 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info) return ERR_PTR(-EINVAL); } + /* check the name is unique */ + heap = dma_heap_find(exp_info->name); + if (heap) { + pr_err("dma_heap: Already registered heap named %s\n", + exp_info->name); + dma_heap_put(heap); + return ERR_PTR(-EINVAL); + } + heap = kzalloc(sizeof(*heap), GFP_KERNEL); if (!heap) return ERR_PTR(-ENOMEM); + kref_init(&heap->refcount); heap->name = exp_info->name; heap->ops = exp_info->ops; heap->priv = exp_info->priv; @@ -249,37 +347,27 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info) goto err1; } - dev_ret = device_create(dma_heap_class, - NULL, - heap->heap_devt, - NULL, - heap->name); - if (IS_ERR(dev_ret)) { + heap->heap_dev = device_create(dma_heap_class, + NULL, + heap->heap_devt, + NULL, + heap->name); + if (IS_ERR(heap->heap_dev)) { pr_err("dma_heap: Unable to create device\n"); - err_ret = ERR_CAST(dev_ret); + err_ret = ERR_CAST(heap->heap_dev); goto err2; } - mutex_lock(&heap_list_lock); - /* check the name is unique */ - list_for_each_entry(h, &heap_list, list) { - if (!strcmp(h->name, exp_info->name)) { - mutex_unlock(&heap_list_lock); - pr_err("dma_heap: Already registered heap named %s\n", - exp_info->name); - err_ret = ERR_PTR(-EINVAL); - goto err3; - } - } + /* Make sure it doesn't disappear on us */ + heap->heap_dev = get_device(heap->heap_dev); /* Add heap to the list */ + mutex_lock(&heap_list_lock); list_add(&heap->list, &heap_list); mutex_unlock(&heap_list_lock); return heap; -err3: - device_destroy(dma_heap_class, heap->heap_devt); err2: cdev_del(&heap->heap_cdev); err1: @@ -288,27 +376,88 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info) kfree(heap); return err_ret; } +EXPORT_SYMBOL_GPL(dma_heap_add); static char *dma_heap_devnode(struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "dma_heap/%s", dev_name(dev)); } +static ssize_t total_pools_kb_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct dma_heap *heap; + u64 total_pool_size = 0; + + mutex_lock(&heap_list_lock); + list_for_each_entry(heap, &heap_list, list) { + if (heap->ops->get_pool_size) + total_pool_size += heap->ops->get_pool_size(heap); + } + mutex_unlock(&heap_list_lock); + + return sysfs_emit(buf, "%llu\n", total_pool_size / 1024); +} + +static struct kobj_attribute total_pools_kb_attr = + __ATTR_RO(total_pools_kb); + +static struct attribute *dma_heap_sysfs_attrs[] = { + &total_pools_kb_attr.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(dma_heap_sysfs); + +static struct kobject *dma_heap_kobject; + +static int dma_heap_sysfs_setup(void) +{ + int ret; + + dma_heap_kobject = kobject_create_and_add("dma_heap", kernel_kobj); + if (!dma_heap_kobject) + return -ENOMEM; + + ret = sysfs_create_groups(dma_heap_kobject, dma_heap_sysfs_groups); + if (ret) { + kobject_put(dma_heap_kobject); + return ret; + } + + return 0; +} + +static void dma_heap_sysfs_teardown(void) +{ + kobject_put(dma_heap_kobject); +} + static int dma_heap_init(void) { int ret; - ret = alloc_chrdev_region(&dma_heap_devt, 0, NUM_HEAP_MINORS, DEVNAME); + ret = dma_heap_sysfs_setup(); if (ret) return ret; + ret = alloc_chrdev_region(&dma_heap_devt, 0, NUM_HEAP_MINORS, DEVNAME); + if (ret) + goto err_chrdev; + dma_heap_class = class_create(THIS_MODULE, DEVNAME); if (IS_ERR(dma_heap_class)) { - unregister_chrdev_region(dma_heap_devt, NUM_HEAP_MINORS); - return PTR_ERR(dma_heap_class); + ret = PTR_ERR(dma_heap_class); + goto err_class; } dma_heap_class->devnode = dma_heap_devnode; return 0; + +err_class: + unregister_chrdev_region(dma_heap_devt, NUM_HEAP_MINORS); +err_chrdev: + dma_heap_sysfs_teardown(); + return ret; } subsys_initcall(dma_heap_init); diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig index a5eef06c4..341d6d50f 100644 --- a/drivers/dma-buf/heaps/Kconfig +++ b/drivers/dma-buf/heaps/Kconfig @@ -1,12 +1,12 @@ config DMABUF_HEAPS_SYSTEM - bool "DMA-BUF System Heap" - depends on DMABUF_HEAPS + tristate "DMA-BUF System Heap" + depends on DMABUF_HEAPS && DMABUF_HEAPS_DEFERRED_FREE && DMABUF_HEAPS_PAGE_POOL help Choose this option to enable the system dmabuf heap. The system heap is backed by pages from the buddy allocator. If in doubt, say Y. config DMABUF_HEAPS_CMA - bool "DMA-BUF CMA Heap" + tristate "DMA-BUF CMA Heap" depends on DMABUF_HEAPS && DMA_CMA help Choose this option to enable dma-buf CMA heap. This heap is backed diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile index 6e54cdec3..d06078f26 100644 --- a/drivers/dma-buf/heaps/Makefile +++ b/drivers/dma-buf/heaps/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y += heap-helpers.o + obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c index e55384dc1..fd564aa70 100644 --- a/drivers/dma-buf/heaps/cma_heap.c +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -2,76 +2,306 @@ /* * DMABUF CMA heap exporter * - * Copyright (C) 2012, 2019 Linaro Ltd. + * Copyright (C) 2012, 2019, 2020 Linaro Ltd. * Author: for ST-Ericsson. + * + * Also utilizing parts of Andrew Davis' SRAM heap: + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ + * Andrew F. Davis */ - #include -#include #include #include #include #include -#include #include +#include +#include #include -#include #include -#include +#include +#include -#include "heap-helpers.h" struct cma_heap { struct dma_heap *heap; struct cma *cma; }; -static void cma_heap_free(struct heap_helper_buffer *buffer) +struct cma_heap_buffer { + struct cma_heap *heap; + struct list_head attachments; + struct mutex lock; + unsigned long len; + struct page *cma_pages; + struct page **pages; + pgoff_t pagecount; + int vmap_cnt; + void *vaddr; +}; + +struct dma_heap_attachment { + struct device *dev; + struct sg_table table; + struct list_head list; + bool mapped; +}; + +static int cma_heap_attach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + int ret; + + a = kzalloc(sizeof(*a), GFP_KERNEL); + if (!a) + return -ENOMEM; + + ret = sg_alloc_table_from_pages(&a->table, buffer->pages, + buffer->pagecount, 0, + buffer->pagecount << PAGE_SHIFT, + GFP_KERNEL); + if (ret) { + kfree(a); + return ret; + } + + a->dev = attachment->dev; + INIT_LIST_HEAD(&a->list); + a->mapped = false; + + attachment->priv = a; + + mutex_lock(&buffer->lock); + list_add(&a->list, &buffer->attachments); + mutex_unlock(&buffer->lock); + + return 0; +} + +static void cma_heap_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a = attachment->priv; + + mutex_lock(&buffer->lock); + list_del(&a->list); + mutex_unlock(&buffer->lock); + + sg_free_table(&a->table); + kfree(a); +} + +static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction direction) { - struct cma_heap *cma_heap = dma_heap_get_drvdata(buffer->heap); - unsigned long nr_pages = buffer->pagecount; - struct page *cma_pages = buffer->priv_virt; + struct dma_heap_attachment *a = attachment->priv; + struct sg_table *table = &a->table; + int attrs = attachment->dma_map_attrs; + int ret; + + ret = dma_map_sgtable(attachment->dev, table, direction, attrs); + if (ret) + return ERR_PTR(-ENOMEM); + a->mapped = true; + return table; +} + +static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *table, + enum dma_data_direction direction) +{ + struct dma_heap_attachment *a = attachment->priv; + int attrs = attachment->dma_map_attrs; + + a->mapped = false; + dma_unmap_sgtable(attachment->dev, table, direction, attrs); +} + +static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + if (buffer->vmap_cnt) + invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); + + mutex_lock(&buffer->lock); + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_cpu(a->dev, &a->table, direction); + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + if (buffer->vmap_cnt) + flush_kernel_vmap_range(buffer->vaddr, buffer->len); + + mutex_lock(&buffer->lock); + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_device(a->dev, &a->table, direction); + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct cma_heap_buffer *buffer = vma->vm_private_data; + + if (vmf->pgoff > buffer->pagecount) + return VM_FAULT_SIGBUS; + + vmf->page = buffer->pages[vmf->pgoff]; + get_page(vmf->page); + + return 0; +} + +static const struct vm_operations_struct dma_heap_vm_ops = { + .fault = cma_heap_vm_fault, +}; + +static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + + if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) + return -EINVAL; + + vma->vm_ops = &dma_heap_vm_ops; + vma->vm_private_data = buffer; + + return 0; +} + +static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer) +{ + void *vaddr; + + vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL); + if (!vaddr) + return ERR_PTR(-ENOMEM); + + return vaddr; +} + +static void *cma_heap_vmap(struct dma_buf *dmabuf) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + void *vaddr; + + mutex_lock(&buffer->lock); + if (buffer->vmap_cnt) { + buffer->vmap_cnt++; + vaddr = buffer->vaddr; + goto out; + } + + vaddr = cma_heap_do_vmap(buffer); + if (IS_ERR(vaddr)) + goto out; + + buffer->vaddr = vaddr; + buffer->vmap_cnt++; +out: + mutex_unlock(&buffer->lock); + + return vaddr; +} + +static void cma_heap_vunmap(struct dma_buf *dmabuf, void *vaddr) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + + mutex_lock(&buffer->lock); + if (!--buffer->vmap_cnt) { + vunmap(buffer->vaddr); + buffer->vaddr = NULL; + } + mutex_unlock(&buffer->lock); +} + +static void cma_heap_dma_buf_release(struct dma_buf *dmabuf) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct cma_heap *cma_heap = buffer->heap; + + if (buffer->vmap_cnt > 0) { + WARN(1, "%s: buffer still mapped in the kernel\n", __func__); + vunmap(buffer->vaddr); + } /* free page list */ kfree(buffer->pages); /* release memory */ - cma_release(cma_heap->cma, cma_pages, nr_pages); + cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount); kfree(buffer); } -/* dmabuf heap CMA operations functions */ -static int cma_heap_allocate(struct dma_heap *heap, - unsigned long len, - unsigned long fd_flags, - unsigned long heap_flags) +static const struct dma_buf_ops cma_heap_buf_ops = { + .attach = cma_heap_attach, + .detach = cma_heap_detach, + .map_dma_buf = cma_heap_map_dma_buf, + .unmap_dma_buf = cma_heap_unmap_dma_buf, + .begin_cpu_access = cma_heap_dma_buf_begin_cpu_access, + .end_cpu_access = cma_heap_dma_buf_end_cpu_access, + .mmap = cma_heap_mmap, + .vmap = cma_heap_vmap, + .vunmap = cma_heap_vunmap, + .release = cma_heap_dma_buf_release, +}; + +static struct dma_buf *cma_heap_allocate(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags) { struct cma_heap *cma_heap = dma_heap_get_drvdata(heap); - struct heap_helper_buffer *helper_buffer; - struct page *cma_pages; + struct cma_heap_buffer *buffer; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); size_t size = PAGE_ALIGN(len); - unsigned long nr_pages = size >> PAGE_SHIFT; + pgoff_t pagecount = size >> PAGE_SHIFT; unsigned long align = get_order(size); + struct page *cma_pages; struct dma_buf *dmabuf; int ret = -ENOMEM; pgoff_t pg; - if (align > CONFIG_CMA_ALIGNMENT) - align = CONFIG_CMA_ALIGNMENT; + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + if (!buffer) + return ERR_PTR(-ENOMEM); - helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL); - if (!helper_buffer) - return -ENOMEM; + INIT_LIST_HEAD(&buffer->attachments); + mutex_init(&buffer->lock); + buffer->len = size; - init_heap_helper_buffer(helper_buffer, cma_heap_free); - helper_buffer->heap = heap; - helper_buffer->size = len; + if (align > CONFIG_CMA_ALIGNMENT) + align = CONFIG_CMA_ALIGNMENT; - cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false); + cma_pages = cma_alloc(cma_heap->cma, pagecount, align, GFP_KERNEL); if (!cma_pages) - goto free_buf; + goto free_buffer; + /* Clear the cma pages */ if (PageHighMem(cma_pages)) { - unsigned long nr_clear_pages = nr_pages; + unsigned long nr_clear_pages = pagecount; struct page *page = cma_pages; while (nr_clear_pages > 0) { @@ -85,7 +315,6 @@ static int cma_heap_allocate(struct dma_heap *heap, */ if (fatal_signal_pending(current)) goto free_cma; - page++; nr_clear_pages--; } @@ -93,44 +322,41 @@ static int cma_heap_allocate(struct dma_heap *heap, memset(page_address(cma_pages), 0, size); } - helper_buffer->pagecount = nr_pages; - helper_buffer->pages = kmalloc_array(helper_buffer->pagecount, - sizeof(*helper_buffer->pages), - GFP_KERNEL); - if (!helper_buffer->pages) { + buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL); + if (!buffer->pages) { ret = -ENOMEM; goto free_cma; } - for (pg = 0; pg < helper_buffer->pagecount; pg++) - helper_buffer->pages[pg] = &cma_pages[pg]; + for (pg = 0; pg < pagecount; pg++) + buffer->pages[pg] = &cma_pages[pg]; + + buffer->cma_pages = cma_pages; + buffer->heap = cma_heap; + buffer->pagecount = pagecount; /* create the dmabuf */ - dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags); + exp_info.exp_name = dma_heap_get_name(heap); + exp_info.ops = &cma_heap_buf_ops; + exp_info.size = buffer->len; + exp_info.flags = fd_flags; + exp_info.priv = buffer; + dmabuf = dma_buf_export(&exp_info); if (IS_ERR(dmabuf)) { ret = PTR_ERR(dmabuf); goto free_pages; } - helper_buffer->dmabuf = dmabuf; - helper_buffer->priv_virt = cma_pages; - - ret = dma_buf_fd(dmabuf, fd_flags); - if (ret < 0) { - dma_buf_put(dmabuf); - /* just return, as put will call release and that will free */ - return ret; - } - - return ret; + return dmabuf; free_pages: - kfree(helper_buffer->pages); + kfree(buffer->pages); free_cma: - cma_release(cma_heap->cma, cma_pages, nr_pages); -free_buf: - kfree(helper_buffer); - return ret; + cma_release(cma_heap->cma, cma_pages, pagecount); +free_buffer: + kfree(buffer); + + return ERR_PTR(ret); } static const struct dma_heap_ops cma_heap_ops = { diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c index 0bf688e3c..18f55f954 100644 --- a/drivers/dma-buf/heaps/system_heap.c +++ b/drivers/dma-buf/heaps/system_heap.c @@ -3,7 +3,11 @@ * DMABUF System heap exporter * * Copyright (C) 2011 Google, Inc. - * Copyright (C) 2019 Linaro Ltd. + * Copyright (C) 2019, 2020 Linaro Ltd. + * + * Portions based off of Andrew Davis' SRAM heap: + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ + * Andrew F. Davis */ #include @@ -15,99 +19,804 @@ #include #include #include -#include -#include +#include +#include +#include + +#include +#include + +#define CONFIG_SYSTEM_HEAP_FORCE_DMA_SYNC + +static struct dma_heap *sys_heap; +static struct dma_heap *sys_dma32_heap; +static struct dma_heap *sys_uncached_heap; +static struct dma_heap *sys_uncached_dma32_heap; + +/* Default setting */ +static u32 bank_bit_first = 12; +static u32 bank_bit_mask = 0x7; + +struct system_heap_buffer { + struct dma_heap *heap; + struct list_head attachments; + struct mutex lock; + unsigned long len; + struct sg_table sg_table; + int vmap_cnt; + void *vaddr; + struct deferred_freelist_item deferred_free; + + bool uncached; +}; -#include "heap-helpers.h" +struct dma_heap_attachment { + struct device *dev; + struct sg_table *table; + struct list_head list; + bool mapped; -struct dma_heap *sys_heap; + bool uncached; +}; -static void system_heap_free(struct heap_helper_buffer *buffer) +#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP) +#define MID_ORDER_GFP (LOW_ORDER_GFP | __GFP_NOWARN) +#define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \ + | __GFP_NORETRY) & ~__GFP_RECLAIM) \ + | __GFP_COMP) +static gfp_t order_flags[] = {HIGH_ORDER_GFP, MID_ORDER_GFP, LOW_ORDER_GFP}; +/* + * The selection of the orders used for allocation (1MB, 64K, 4K) is designed + * to match with the sizes often found in IOMMUs. Using order 4 pages instead + * of order 0 pages can significantly improve the performance of many IOMMUs + * by reducing TLB pressure and time spent updating page tables. + */ +static unsigned int orders[] = {8, 4, 0}; +#define NUM_ORDERS ARRAY_SIZE(orders) +struct dmabuf_page_pool *pools[NUM_ORDERS]; +struct dmabuf_page_pool *dma32_pools[NUM_ORDERS]; + +static struct sg_table *dup_sg_table(struct sg_table *table) { - pgoff_t pg; + struct sg_table *new_table; + int ret, i; + struct scatterlist *sg, *new_sg; - for (pg = 0; pg < buffer->pagecount; pg++) - __free_page(buffer->pages[pg]); - kfree(buffer->pages); - kfree(buffer); + new_table = kzalloc(sizeof(*new_table), GFP_KERNEL); + if (!new_table) + return ERR_PTR(-ENOMEM); + + ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL); + if (ret) { + kfree(new_table); + return ERR_PTR(-ENOMEM); + } + + new_sg = new_table->sgl; + for_each_sgtable_sg(table, sg, i) { + sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset); + new_sg = sg_next(new_sg); + } + + return new_table; } -static int system_heap_allocate(struct dma_heap *heap, - unsigned long len, - unsigned long fd_flags, - unsigned long heap_flags) +static int system_heap_attach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) { - struct heap_helper_buffer *helper_buffer; - struct dma_buf *dmabuf; - int ret = -ENOMEM; - pgoff_t pg; + struct system_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + struct sg_table *table; + + a = kzalloc(sizeof(*a), GFP_KERNEL); + if (!a) + return -ENOMEM; - helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL); - if (!helper_buffer) + table = dup_sg_table(&buffer->sg_table); + if (IS_ERR(table)) { + kfree(a); return -ENOMEM; + } + + a->table = table; + a->dev = attachment->dev; + INIT_LIST_HEAD(&a->list); + a->mapped = false; + a->uncached = buffer->uncached; + attachment->priv = a; + + mutex_lock(&buffer->lock); + list_add(&a->list, &buffer->attachments); + mutex_unlock(&buffer->lock); + + return 0; +} + +static void system_heap_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a = attachment->priv; + + mutex_lock(&buffer->lock); + list_del(&a->list); + mutex_unlock(&buffer->lock); + + sg_free_table(a->table); + kfree(a->table); + kfree(a); +} + +static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction direction) +{ + struct dma_heap_attachment *a = attachment->priv; + struct sg_table *table = a->table; + int attr = attachment->dma_map_attrs; + int ret; + + if (a->uncached) + attr |= DMA_ATTR_SKIP_CPU_SYNC; + + ret = dma_map_sgtable(attachment->dev, table, direction, attr); + if (ret) + return ERR_PTR(ret); + + a->mapped = true; + return table; +} + +static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *table, + enum dma_data_direction direction) +{ + struct dma_heap_attachment *a = attachment->priv; + int attr = attachment->dma_map_attrs; + + if (a->uncached) + attr |= DMA_ATTR_SKIP_CPU_SYNC; + a->mapped = false; + dma_unmap_sgtable(attachment->dev, table, direction, attr); +} + +static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + mutex_lock(&buffer->lock); + + if (buffer->vmap_cnt) + invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); + + if (!buffer->uncached) { + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_cpu(a->dev, a->table, direction); + } + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + mutex_lock(&buffer->lock); + + if (buffer->vmap_cnt) + flush_kernel_vmap_range(buffer->vaddr, buffer->len); + + if (!buffer->uncached) { + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_device(a->dev, a->table, direction); + } + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static int system_heap_sgl_sync_range(struct device *dev, + struct scatterlist *sgl, + unsigned int nents, + unsigned int offset, + unsigned int length, + enum dma_data_direction dir, + bool for_cpu) +{ + struct scatterlist *sg; + unsigned int len = 0; + dma_addr_t sg_dma_addr; + int i; + + for_each_sg(sgl, sg, nents, i) { + unsigned int sg_offset, sg_left, size = 0; + + sg_dma_addr = sg_dma_address(sg); + + len += sg->length; + if (len <= offset) + continue; + + sg_left = len - offset; + sg_offset = sg->length - sg_left; + + size = (length < sg_left) ? length : sg_left; + if (for_cpu) + dma_sync_single_range_for_cpu(dev, sg_dma_addr, + sg_offset, size, dir); + else + dma_sync_single_range_for_device(dev, sg_dma_addr, + sg_offset, size, dir); + + offset += size; + length -= size; + + if (length == 0) + break; + } + + return 0; +} + +static int +system_heap_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf, + enum dma_data_direction direction, + unsigned int offset, + unsigned int len) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + int ret = 0; + + if (direction == DMA_TO_DEVICE) + return 0; + + mutex_lock(&buffer->lock); + if (IS_ENABLED(CONFIG_SYSTEM_HEAP_FORCE_DMA_SYNC)) { + struct dma_heap *heap = buffer->heap; + struct sg_table *table = &buffer->sg_table; + + ret = system_heap_sgl_sync_range(dma_heap_get_dev(heap), + table->sgl, + table->nents, + offset, len, + direction, true); + goto unlock; + } + + if (buffer->vmap_cnt) + invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); + + if (!buffer->uncached) + goto unlock; + + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + + ret = system_heap_sgl_sync_range(a->dev, a->table->sgl, + a->table->nents, + offset, len, + direction, true); + } + +unlock: + mutex_unlock(&buffer->lock); + + return ret; +} + +static int +system_heap_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf, + enum dma_data_direction direction, + unsigned int offset, + unsigned int len) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + int ret = 0; + + mutex_lock(&buffer->lock); + if (IS_ENABLED(CONFIG_SYSTEM_HEAP_FORCE_DMA_SYNC)) { + struct dma_heap *heap = buffer->heap; + struct sg_table *table = &buffer->sg_table; + + ret = system_heap_sgl_sync_range(dma_heap_get_dev(heap), + table->sgl, + table->nents, + offset, len, + direction, false); + goto unlock; + } + + if (buffer->vmap_cnt) + flush_kernel_vmap_range(buffer->vaddr, buffer->len); + + if (!buffer->uncached) + goto unlock; + + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + + ret = system_heap_sgl_sync_range(a->dev, a->table->sgl, + a->table->nents, + offset, len, + direction, false); + } +unlock: + mutex_unlock(&buffer->lock); + + return ret; +} + +static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct sg_table *table = &buffer->sg_table; + unsigned long addr = vma->vm_start; + struct sg_page_iter piter; + int ret; + + if (buffer->uncached) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + + for_each_sgtable_page(table, &piter, vma->vm_pgoff) { + struct page *page = sg_page_iter_page(&piter); + + ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE, + vma->vm_page_prot); + if (ret) + return ret; + addr += PAGE_SIZE; + if (addr >= vma->vm_end) + return 0; + } + return 0; +} + +static void *system_heap_do_vmap(struct system_heap_buffer *buffer) +{ + struct sg_table *table = &buffer->sg_table; + int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE; + struct page **pages = vmalloc(sizeof(struct page *) * npages); + struct page **tmp = pages; + struct sg_page_iter piter; + pgprot_t pgprot = PAGE_KERNEL; + void *vaddr; + + if (!pages) + return ERR_PTR(-ENOMEM); + + if (buffer->uncached) + pgprot = pgprot_writecombine(PAGE_KERNEL); + + for_each_sgtable_page(table, &piter, 0) { + WARN_ON(tmp - pages >= npages); + *tmp++ = sg_page_iter_page(&piter); + } - init_heap_helper_buffer(helper_buffer, system_heap_free); - helper_buffer->heap = heap; - helper_buffer->size = len; + vaddr = vmap(pages, npages, VM_MAP, pgprot); + vfree(pages); - helper_buffer->pagecount = len / PAGE_SIZE; - helper_buffer->pages = kmalloc_array(helper_buffer->pagecount, - sizeof(*helper_buffer->pages), - GFP_KERNEL); - if (!helper_buffer->pages) { - ret = -ENOMEM; - goto err0; + if (!vaddr) + return ERR_PTR(-ENOMEM); + + return vaddr; +} + +static void *system_heap_vmap(struct dma_buf *dmabuf) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + void *vaddr; + + mutex_lock(&buffer->lock); + if (buffer->vmap_cnt) { + buffer->vmap_cnt++; + vaddr = buffer->vaddr; + goto out; + } + + vaddr = system_heap_do_vmap(buffer); + if (IS_ERR(vaddr)) + goto out; + + buffer->vaddr = vaddr; + buffer->vmap_cnt++; +out: + mutex_unlock(&buffer->lock); + + return vaddr; +} + +static void system_heap_vunmap(struct dma_buf *dmabuf, void *vaddr) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + + mutex_lock(&buffer->lock); + if (!--buffer->vmap_cnt) { + vunmap(buffer->vaddr); + buffer->vaddr = NULL; + } + mutex_unlock(&buffer->lock); +} + +static int system_heap_zero_buffer(struct system_heap_buffer *buffer) +{ + struct sg_table *sgt = &buffer->sg_table; + struct sg_page_iter piter; + struct page *p; + void *vaddr; + int ret = 0; + + for_each_sgtable_page(sgt, &piter, 0) { + p = sg_page_iter_page(&piter); + vaddr = kmap_atomic(p); + memset(vaddr, 0, PAGE_SIZE); + kunmap_atomic(vaddr); + } + + return ret; +} + +static void system_heap_buf_free(struct deferred_freelist_item *item, + enum df_reason reason) +{ + struct system_heap_buffer *buffer; + struct sg_table *table; + struct scatterlist *sg; + int i, j; + + buffer = container_of(item, struct system_heap_buffer, deferred_free); + /* Zero the buffer pages before adding back to the pool */ + if (reason == DF_NORMAL) + if (system_heap_zero_buffer(buffer)) + reason = DF_UNDER_PRESSURE; // On failure, just free + + table = &buffer->sg_table; + for_each_sg(table->sgl, sg, table->nents, i) { + struct page *page = sg_page(sg); + + if (reason == DF_UNDER_PRESSURE) { + __free_pages(page, compound_order(page)); + } else { + for (j = 0; j < NUM_ORDERS; j++) { + if (compound_order(page) == orders[j]) + break; + } + dmabuf_page_pool_free(pools[j], page); + } } + sg_free_table(table); + kfree(buffer); +} + +static void system_heap_dma_buf_release(struct dma_buf *dmabuf) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE; + + deferred_free(&buffer->deferred_free, system_heap_buf_free, npages); +} + +static const struct dma_buf_ops system_heap_buf_ops = { + .attach = system_heap_attach, + .detach = system_heap_detach, + .map_dma_buf = system_heap_map_dma_buf, + .unmap_dma_buf = system_heap_unmap_dma_buf, + .begin_cpu_access = system_heap_dma_buf_begin_cpu_access, + .end_cpu_access = system_heap_dma_buf_end_cpu_access, + .begin_cpu_access_partial = system_heap_dma_buf_begin_cpu_access_partial, + .end_cpu_access_partial = system_heap_dma_buf_end_cpu_access_partial, + .mmap = system_heap_mmap, + .vmap = system_heap_vmap, + .vunmap = system_heap_vunmap, + .release = system_heap_dma_buf_release, +}; + +static struct page *system_heap_alloc_largest_available(struct dma_heap *heap, + unsigned long size, + unsigned int max_order) +{ + struct page *page; + int i; + const char *name = dma_heap_get_name(heap); + struct dmabuf_page_pool **pool; + + pool = strstr(name, "dma32") ? dma32_pools : pools; + for (i = 0; i < NUM_ORDERS; i++) { + if (size < (PAGE_SIZE << orders[i])) + continue; + if (max_order < orders[i]) + continue; + page = dmabuf_page_pool_alloc(pool[i]); + if (!page) + continue; + return page; + } + return NULL; +} + +static struct dma_buf *system_heap_do_allocate(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags, + bool uncached) +{ + struct system_heap_buffer *buffer; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + unsigned long size_remaining = len; + unsigned int max_order = orders[0]; + struct dma_buf *dmabuf; + struct sg_table *table; + struct scatterlist *sg; + struct list_head pages; + struct page *page, *tmp_page; + int i, ret = -ENOMEM; + struct list_head lists[8]; + unsigned int block_index[8] = {0}; + unsigned int block_1M = 0; + unsigned int block_64K = 0; + unsigned int maximum; + int j; + + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + if (!buffer) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&buffer->attachments); + mutex_init(&buffer->lock); + buffer->heap = heap; + buffer->len = len; + buffer->uncached = uncached; - for (pg = 0; pg < helper_buffer->pagecount; pg++) { + INIT_LIST_HEAD(&pages); + for (i = 0; i < 8; i++) + INIT_LIST_HEAD(&lists[i]); + i = 0; + while (size_remaining > 0) { /* * Avoid trying to allocate memory if the process - * has been killed by by SIGKILL + * has been killed by SIGKILL */ if (fatal_signal_pending(current)) - goto err1; + goto free_buffer; + + page = system_heap_alloc_largest_available(heap, size_remaining, max_order); + if (!page) + goto free_buffer; + + size_remaining -= page_size(page); + max_order = compound_order(page); + if (max_order) { + if (max_order == 8) + block_1M++; + if (max_order == 4) + block_64K++; + list_add_tail(&page->lru, &pages); + } else { + dma_addr_t phys = page_to_phys(page); + unsigned int bit_index = ((phys >> bank_bit_first) & bank_bit_mask) & 0x7; + + list_add_tail(&page->lru, &lists[bit_index]); + block_index[bit_index]++; + } + i++; + } + + table = &buffer->sg_table; + if (sg_alloc_table(table, i, GFP_KERNEL)) + goto free_buffer; - helper_buffer->pages[pg] = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!helper_buffer->pages[pg]) - goto err1; + maximum = block_index[0]; + for (i = 1; i < 8; i++) + maximum = max(maximum, block_index[i]); + sg = table->sgl; + list_for_each_entry_safe(page, tmp_page, &pages, lru) { + sg_set_page(sg, page, page_size(page), 0); + sg = sg_next(sg); + list_del(&page->lru); + } + for (i = 0; i < maximum; i++) { + for (j = 0; j < 8; j++) { + if (!list_empty(&lists[j])) { + page = list_first_entry(&lists[j], struct page, lru); + sg_set_page(sg, page, PAGE_SIZE, 0); + sg = sg_next(sg); + list_del(&page->lru); + } + } } /* create the dmabuf */ - dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags); + exp_info.exp_name = dma_heap_get_name(heap); + exp_info.ops = &system_heap_buf_ops; + exp_info.size = buffer->len; + exp_info.flags = fd_flags; + exp_info.priv = buffer; + dmabuf = dma_buf_export(&exp_info); if (IS_ERR(dmabuf)) { ret = PTR_ERR(dmabuf); - goto err1; + goto free_pages; } - helper_buffer->dmabuf = dmabuf; + /* + * For uncached buffers, we need to initially flush cpu cache, since + * the __GFP_ZERO on the allocation means the zeroing was done by the + * cpu and thus it is likely cached. Map (and implicitly flush) and + * unmap it now so we don't get corruption later on. + */ + if (buffer->uncached) { + dma_map_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0); + dma_unmap_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0); + } - ret = dma_buf_fd(dmabuf, fd_flags); - if (ret < 0) { - dma_buf_put(dmabuf); - /* just return, as put will call release and that will free */ - return ret; + return dmabuf; + +free_pages: + for_each_sgtable_sg(table, sg, i) { + struct page *p = sg_page(sg); + + __free_pages(p, compound_order(p)); + } + sg_free_table(table); +free_buffer: + list_for_each_entry_safe(page, tmp_page, &pages, lru) + __free_pages(page, compound_order(page)); + for (i = 0; i < 8; i++) { + list_for_each_entry_safe(page, tmp_page, &lists[i], lru) + __free_pages(page, compound_order(page)); } + kfree(buffer); - return ret; + return ERR_PTR(ret); +} -err1: - while (pg > 0) - __free_page(helper_buffer->pages[--pg]); - kfree(helper_buffer->pages); -err0: - kfree(helper_buffer); +static struct dma_buf *system_heap_allocate(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags) +{ + return system_heap_do_allocate(heap, len, fd_flags, heap_flags, false); +} - return ret; +static long system_get_pool_size(struct dma_heap *heap) +{ + int i; + long num_pages = 0; + struct dmabuf_page_pool **pool; + const char *name = dma_heap_get_name(heap); + + pool = pools; + if (!strcmp(name, "system-dma32") || !strcmp(name, "system-uncached-dma32")) + pool = dma32_pools; + for (i = 0; i < NUM_ORDERS; i++, pool++) { + num_pages += ((*pool)->count[POOL_LOWPAGE] + + (*pool)->count[POOL_HIGHPAGE]) << (*pool)->order; + } + + return num_pages << PAGE_SHIFT; } static const struct dma_heap_ops system_heap_ops = { .allocate = system_heap_allocate, + .get_pool_size = system_get_pool_size, +}; + +static struct dma_buf *system_uncached_heap_allocate(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags) +{ + return system_heap_do_allocate(heap, len, fd_flags, heap_flags, true); +} + +/* Dummy function to be used until we can call coerce_mask_and_coherent */ +static struct dma_buf *system_uncached_heap_not_initialized(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags) +{ + return ERR_PTR(-EBUSY); +} + +static struct dma_heap_ops system_uncached_heap_ops = { + /* After system_heap_create is complete, we will swap this */ + .allocate = system_uncached_heap_not_initialized, }; +static int set_heap_dev_dma(struct device *heap_dev) +{ + int err = 0; + + if (!heap_dev) + return -EINVAL; + + dma_coerce_mask_and_coherent(heap_dev, DMA_BIT_MASK(64)); + + if (!heap_dev->dma_parms) { + heap_dev->dma_parms = devm_kzalloc(heap_dev, + sizeof(*heap_dev->dma_parms), + GFP_KERNEL); + if (!heap_dev->dma_parms) + return -ENOMEM; + + err = dma_set_max_seg_size(heap_dev, (unsigned int)DMA_BIT_MASK(64)); + if (err) { + devm_kfree(heap_dev, heap_dev->dma_parms); + dev_err(heap_dev, "Failed to set DMA segment size, err:%d\n", err); + return err; + } + } + + return 0; +} + static int system_heap_create(void) { struct dma_heap_export_info exp_info; - int ret = 0; + int i, err = 0; + struct dram_addrmap_info *ddr_map_info; + + /* + * Since swiotlb has memory size limitation, this will calculate + * the maximum size locally. + * + * Once swiotlb_max_segment() return not '0', means that the totalram size + * is larger than 4GiB and swiotlb is not force mode, in this case, system + * heap should limit largest allocation. + * + * FIX: fix the orders[] as a workaround. + */ + if (swiotlb_max_segment()) { + unsigned int max_size = (1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE; + int max_order = MAX_ORDER; + int i; + + max_size = max_t(unsigned int, max_size, PAGE_SIZE) >> PAGE_SHIFT; + max_order = min(max_order, ilog2(max_size)); + for (i = 0; i < NUM_ORDERS; i++) { + if (max_order < orders[i]) + orders[i] = max_order; + pr_info("system_heap: orders[%d] = %u\n", i, orders[i]); + } + } + + for (i = 0; i < NUM_ORDERS; i++) { + pools[i] = dmabuf_page_pool_create(order_flags[i], orders[i]); + + if (!pools[i]) { + int j; + + pr_err("%s: page pool creation failed!\n", __func__); + for (j = 0; j < i; j++) + dmabuf_page_pool_destroy(pools[j]); + return -ENOMEM; + } + } + + for (i = 0; i < NUM_ORDERS; i++) { + dma32_pools[i] = dmabuf_page_pool_create(order_flags[i] | GFP_DMA32, orders[i]); + + if (!dma32_pools[i]) { + int j; + + pr_err("%s: page dma32 pool creation failed!\n", __func__); + for (j = 0; j < i; j++) + dmabuf_page_pool_destroy(dma32_pools[j]); + goto err_dma32_pool; + } + } exp_info.name = "system"; exp_info.ops = &system_heap_ops; @@ -115,9 +824,56 @@ static int system_heap_create(void) sys_heap = dma_heap_add(&exp_info); if (IS_ERR(sys_heap)) - ret = PTR_ERR(sys_heap); + return PTR_ERR(sys_heap); - return ret; + exp_info.name = "system-dma32"; + exp_info.ops = &system_heap_ops; + exp_info.priv = NULL; + + sys_dma32_heap = dma_heap_add(&exp_info); + if (IS_ERR(sys_dma32_heap)) + return PTR_ERR(sys_dma32_heap); + + exp_info.name = "system-uncached"; + exp_info.ops = &system_uncached_heap_ops; + exp_info.priv = NULL; + + sys_uncached_heap = dma_heap_add(&exp_info); + if (IS_ERR(sys_uncached_heap)) + return PTR_ERR(sys_uncached_heap); + + err = set_heap_dev_dma(dma_heap_get_dev(sys_uncached_heap)); + if (err) + return err; + + exp_info.name = "system-uncached-dma32"; + exp_info.ops = &system_uncached_heap_ops; + exp_info.priv = NULL; + + sys_uncached_dma32_heap = dma_heap_add(&exp_info); + if (IS_ERR(sys_uncached_dma32_heap)) + return PTR_ERR(sys_uncached_dma32_heap); + + err = set_heap_dev_dma(dma_heap_get_dev(sys_uncached_dma32_heap)); + if (err) + return err; + dma_coerce_mask_and_coherent(dma_heap_get_dev(sys_uncached_dma32_heap), DMA_BIT_MASK(32)); + + mb(); /* make sure we only set allocate after dma_mask is set */ + system_uncached_heap_ops.allocate = system_uncached_heap_allocate; + + ddr_map_info = sip_smc_get_dram_map(); + if (ddr_map_info) { + bank_bit_first = ddr_map_info->bank_bit_first; + bank_bit_mask = ddr_map_info->bank_bit_mask; + } + + return 0; +err_dma32_pool: + for (i = 0; i < NUM_ORDERS; i++) + dmabuf_page_pool_destroy(pools[i]); + + return -ENOMEM; } module_init(system_heap_create); MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 348b3a917..3daa6c76b 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -7,6 +7,8 @@ #include #include +#include +#include #include #include #include @@ -410,3 +412,13 @@ const struct file_operations sw_sync_debugfs_fops = { .unlocked_ioctl = sw_sync_ioctl, .compat_ioctl = compat_ptr_ioctl, }; + +static struct miscdevice sw_sync_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "sw_sync", + .fops = &sw_sync_debugfs_fops, +}; + +module_misc_device(sw_sync_dev); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h index 6176e52ba..fb676da19 100644 --- a/drivers/dma-buf/sync_debug.h +++ b/drivers/dma-buf/sync_debug.h @@ -64,9 +64,16 @@ struct sync_pt { extern const struct file_operations sw_sync_debugfs_fops; +#ifdef CONFIG_SW_SYNC_DEBUG void sync_timeline_debug_add(struct sync_timeline *obj); void sync_timeline_debug_remove(struct sync_timeline *obj); void sync_file_debug_add(struct sync_file *fence); void sync_file_debug_remove(struct sync_file *fence); +#else +static inline void sync_timeline_debug_add(struct sync_timeline *obj) {} +static inline void sync_timeline_debug_remove(struct sync_timeline *obj) {} +static inline void sync_file_debug_add(struct sync_file *fence) {} +static inline void sync_file_debug_remove(struct sync_file *fence) {} +#endif #endif /* _LINUX_SYNC_H */ diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 2e63274a4..ab666917b 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -1046,3 +1046,14 @@ void of_gpiochip_remove(struct gpio_chip *chip) { of_node_put(chip->of_node); } + +void of_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev) +{ + /* If the gpiochip has an assigned OF node this takes precedence */ + if (gc->of_node) + gdev->dev.of_node = gc->of_node; + else + gc->of_node = gdev->dev.of_node; + if (gdev->dev.of_node) + gdev->dev.fwnode = of_fwnode_handle(gdev->dev.of_node); +} diff --git a/drivers/gpio/gpiolib-of.h b/drivers/gpio/gpiolib-of.h index ed26664f1..8af2bc899 100644 --- a/drivers/gpio/gpiolib-of.h +++ b/drivers/gpio/gpiolib-of.h @@ -15,6 +15,7 @@ int of_gpiochip_add(struct gpio_chip *gc); void of_gpiochip_remove(struct gpio_chip *gc); int of_gpio_get_count(struct device *dev, const char *con_id); bool of_gpio_need_valid_mask(const struct gpio_chip *gc); +void of_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev); #else static inline struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, @@ -33,6 +34,10 @@ static inline bool of_gpio_need_valid_mask(const struct gpio_chip *gc) { return false; } +static inline void of_gpio_dev_init(struct gpio_chip *gc, + struct gpio_device *gdev) +{ +} #endif /* CONFIG_OF_GPIO */ extern struct notifier_block gpio_of_notifier; diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 4e9b3a95f..95994151a 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -32,6 +32,10 @@ config DRM_MIPI_DBI depends on DRM select DRM_KMS_HELPER +config DRM_IGNORE_IOTCL_PERMIT + bool "Ignore drm ioctl permission" + depends on DRM && ANDROID && NO_GKI + config DRM_MIPI_DSI bool depends on DRM diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index e8baa0745..0bc97715e 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -40,6 +41,20 @@ struct bridge_init { struct device_node *node; }; +static bool analogix_dp_bandwidth_ok(struct analogix_dp_device *dp, + const struct drm_display_mode *mode, + unsigned int rate, unsigned int lanes) +{ + u32 max_bw, req_bw, bpp = 24; + + req_bw = mode->clock * bpp / 8; + max_bw = lanes * rate; + if (req_bw > max_bw) + return false; + + return true; +} + static int analogix_dp_init_dp(struct analogix_dp_device *dp) { int ret; @@ -64,6 +79,46 @@ static int analogix_dp_init_dp(struct analogix_dp_device *dp) return 0; } +static int analogix_dp_panel_prepare(struct analogix_dp_device *dp) +{ + int ret; + + mutex_lock(&dp->panel_lock); + + if (dp->panel_is_prepared) + goto out; + + ret = drm_panel_prepare(dp->plat_data->panel); + if (ret) + goto out; + + dp->panel_is_prepared = true; + +out: + mutex_unlock(&dp->panel_lock); + return 0; +} + +static int analogix_dp_panel_unprepare(struct analogix_dp_device *dp) +{ + int ret; + + mutex_lock(&dp->panel_lock); + + if (!dp->panel_is_prepared) + goto out; + + ret = drm_panel_unprepare(dp->plat_data->panel); + if (ret) + goto out; + + dp->panel_is_prepared = false; + +out: + mutex_unlock(&dp->panel_lock); + return 0; +} + static int analogix_dp_detect_hpd(struct analogix_dp_device *dp) { int timeout_loop = 0; @@ -108,6 +163,9 @@ static bool analogix_dp_detect_sink_psr(struct analogix_dp_device *dp) unsigned char psr_version; int ret; + if (!device_property_read_bool(dp->dev, "support-psr")) + return 0; + ret = drm_dp_dpcd_readb(&dp->aux, DP_PSR_SUPPORT, &psr_version); if (ret != 1) { dev_err(dp->dev, "failed to get PSR version, disable it\n"); @@ -216,8 +274,24 @@ static int analogix_dp_set_enhanced_mode(struct analogix_dp_device *dp) if (ret < 0) return ret; + if (!data) { + /* + * A setting of 1 indicates that this is an eDP device that + * uses only Enhanced Framing, independently of the setting by + * the source of ENHANCED_FRAME_EN + */ + ret = drm_dp_dpcd_readb(&dp->aux, DP_EDP_CONFIGURATION_CAP, + &data); + if (ret < 0) + return ret; + + data = !!(data & DP_FRAMING_CHANGE_CAP); + } + analogix_dp_enable_enhanced_mode(dp, data); + dp->link_train.enhanced_framing = data; + return 0; } @@ -233,32 +307,10 @@ static int analogix_dp_training_pattern_dis(struct analogix_dp_device *dp) return ret < 0 ? ret : 0; } -static void -analogix_dp_set_lane_lane_pre_emphasis(struct analogix_dp_device *dp, - int pre_emphasis, int lane) -{ - switch (lane) { - case 0: - analogix_dp_set_lane0_pre_emphasis(dp, pre_emphasis); - break; - case 1: - analogix_dp_set_lane1_pre_emphasis(dp, pre_emphasis); - break; - - case 2: - analogix_dp_set_lane2_pre_emphasis(dp, pre_emphasis); - break; - - case 3: - analogix_dp_set_lane3_pre_emphasis(dp, pre_emphasis); - break; - } -} - static int analogix_dp_link_start(struct analogix_dp_device *dp) { u8 buf[4]; - int lane, lane_count, pll_tries, retval; + int lane, lane_count, retval; lane_count = dp->link_train.lane_count; @@ -278,6 +330,14 @@ static int analogix_dp_link_start(struct analogix_dp_device *dp) retval = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, 2); if (retval < 0) return retval; + + /* Spread AMP if required, enable 8b/10b coding */ + buf[0] = analogix_dp_ssc_supported(dp) ? DP_SPREAD_AMP_0_5 : 0; + buf[1] = DP_SET_ANSI_8B10B; + retval = drm_dp_dpcd_write(&dp->aux, DP_DOWNSPREAD_CTRL, buf, 2); + if (retval < 0) + return retval; + /* set enhanced mode if available */ retval = analogix_dp_set_enhanced_mode(dp); if (retval < 0) { @@ -285,22 +345,12 @@ static int analogix_dp_link_start(struct analogix_dp_device *dp) return retval; } - /* Set TX pre-emphasis to minimum */ + /* Set TX voltage-swing and pre-emphasis to minimum */ for (lane = 0; lane < lane_count; lane++) - analogix_dp_set_lane_lane_pre_emphasis(dp, - PRE_EMPHASIS_LEVEL_0, lane); - - /* Wait for PLL lock */ - pll_tries = 0; - while (analogix_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { - if (pll_tries == DP_TIMEOUT_LOOP_COUNT) { - dev_err(dp->dev, "Wait for PLL lock timed out\n"); - return -ETIMEDOUT; - } - - pll_tries++; - usleep_range(90, 120); - } + dp->link_train.training_lane[lane] = + DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | + DP_TRAIN_PRE_EMPH_LEVEL_0; + analogix_dp_set_lane_link_training(dp); /* Set training pattern 1 */ analogix_dp_set_training_pattern(dp, TRAINING_PTN1); @@ -383,54 +433,6 @@ static unsigned char analogix_dp_get_adjust_request_pre_emphasis( return ((link_value >> shift) & 0xc) >> 2; } -static void analogix_dp_set_lane_link_training(struct analogix_dp_device *dp, - u8 training_lane_set, int lane) -{ - switch (lane) { - case 0: - analogix_dp_set_lane0_link_training(dp, training_lane_set); - break; - case 1: - analogix_dp_set_lane1_link_training(dp, training_lane_set); - break; - - case 2: - analogix_dp_set_lane2_link_training(dp, training_lane_set); - break; - - case 3: - analogix_dp_set_lane3_link_training(dp, training_lane_set); - break; - } -} - -static unsigned int -analogix_dp_get_lane_link_training(struct analogix_dp_device *dp, - int lane) -{ - u32 reg; - - switch (lane) { - case 0: - reg = analogix_dp_get_lane0_link_training(dp); - break; - case 1: - reg = analogix_dp_get_lane1_link_training(dp); - break; - case 2: - reg = analogix_dp_get_lane2_link_training(dp); - break; - case 3: - reg = analogix_dp_get_lane3_link_training(dp); - break; - default: - WARN_ON(1); - return 0; - } - - return reg; -} - static void analogix_dp_reduce_link_rate(struct analogix_dp_device *dp) { analogix_dp_training_pattern_dis(dp); @@ -463,13 +465,27 @@ static void analogix_dp_get_adjust_training_lane(struct analogix_dp_device *dp, } } +static bool analogix_dp_tps3_supported(struct analogix_dp_device *dp) +{ + bool source_tps3_supported, sink_tps3_supported; + u8 dpcd = 0; + + source_tps3_supported = + dp->video_info.max_link_rate == DP_LINK_BW_5_4; + drm_dp_dpcd_readb(&dp->aux, DP_MAX_LANE_COUNT, &dpcd); + sink_tps3_supported = dpcd & DP_TPS3_SUPPORTED; + + return source_tps3_supported && sink_tps3_supported; +} + static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp) { int lane, lane_count, retval; u8 voltage_swing, pre_emphasis, training_lane; u8 link_status[2], adjust_request[2]; + u8 training_pattern = TRAINING_PTN2; - usleep_range(100, 101); + drm_dp_link_train_clock_recovery_delay(dp->dpcd); lane_count = dp->link_train.lane_count; @@ -483,12 +499,16 @@ static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp) return retval; if (analogix_dp_clock_recovery_ok(link_status, lane_count) == 0) { - /* set training pattern 2 for EQ */ - analogix_dp_set_training_pattern(dp, TRAINING_PTN2); + if (analogix_dp_tps3_supported(dp)) + training_pattern = TRAINING_PTN3; + + /* set training pattern for EQ */ + analogix_dp_set_training_pattern(dp, training_pattern); retval = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET, DP_LINK_SCRAMBLING_DISABLE | - DP_TRAINING_PATTERN_2); + (training_pattern == TRAINING_PTN3 ? + DP_TRAINING_PATTERN_3 : DP_TRAINING_PATTERN_2)); if (retval < 0) return retval; @@ -522,10 +542,7 @@ static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp) } analogix_dp_get_adjust_training_lane(dp, adjust_request); - - for (lane = 0; lane < lane_count; lane++) - analogix_dp_set_lane_link_training(dp, - dp->link_train.training_lane[lane], lane); + analogix_dp_set_lane_link_training(dp); retval = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, dp->link_train.training_lane, lane_count); @@ -537,11 +554,11 @@ static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp) static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp) { - int lane, lane_count, retval; + int lane_count, retval; u32 reg; u8 link_align, link_status[2], adjust_request[2]; - usleep_range(400, 401); + drm_dp_link_train_channel_eq_delay(dp->dpcd); lane_count = dp->link_train.lane_count; @@ -597,9 +614,7 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp) return -EIO; } - for (lane = 0; lane < lane_count; lane++) - analogix_dp_set_lane_link_training(dp, - dp->link_train.training_lane[lane], lane); + analogix_dp_set_lane_link_training(dp); retval = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, dp->link_train.training_lane, lane_count); @@ -609,10 +624,11 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp) return 0; } -static void analogix_dp_get_max_rx_bandwidth(struct analogix_dp_device *dp, - u8 *bandwidth) +static int analogix_dp_get_max_rx_bandwidth(struct analogix_dp_device *dp, + u8 *bandwidth) { u8 data; + int ret; /* * For DP rev.1.1, Maximum link rate of Main Link lanes @@ -620,28 +636,41 @@ static void analogix_dp_get_max_rx_bandwidth(struct analogix_dp_device *dp, * For DP rev.1.2, Maximum link rate of Main Link lanes * 0x06 = 1.62 Gbps, 0x0a = 2.7 Gbps, 0x14 = 5.4Gbps */ - drm_dp_dpcd_readb(&dp->aux, DP_MAX_LINK_RATE, &data); + ret = drm_dp_dpcd_readb(&dp->aux, DP_MAX_LINK_RATE, &data); + if (ret < 0) + return ret; + *bandwidth = data; + + return 0; } -static void analogix_dp_get_max_rx_lane_count(struct analogix_dp_device *dp, - u8 *lane_count) +static int analogix_dp_get_max_rx_lane_count(struct analogix_dp_device *dp, + u8 *lane_count) { u8 data; + int ret; /* * For DP rev.1.1, Maximum number of Main Link lanes * 0x01 = 1 lane, 0x02 = 2 lanes, 0x04 = 4 lanes */ - drm_dp_dpcd_readb(&dp->aux, DP_MAX_LANE_COUNT, &data); + ret = drm_dp_dpcd_readb(&dp->aux, DP_MAX_LANE_COUNT, &data); + if (ret < 0) + return ret; + *lane_count = DPCD_MAX_LANE_COUNT(data); + + return 0; } static int analogix_dp_full_link_train(struct analogix_dp_device *dp, u32 max_lanes, u32 max_rate) { + struct video_info *video = &dp->video_info; int retval = 0; bool training_finished = false; + u8 dpcd; /* * MACRO_RST must be applied after the PLL_LOCK to avoid @@ -667,6 +696,16 @@ static int analogix_dp_full_link_train(struct analogix_dp_device *dp, dp->link_train.lane_count = (u8)LANE_COUNT1; } + if (!analogix_dp_bandwidth_ok(dp, &video->mode, + drm_dp_bw_code_to_link_rate(dp->link_train.link_rate), + dp->link_train.lane_count)) { + dev_err(dp->dev, "bandwidth overflow\n"); + return -EINVAL; + } + + drm_dp_dpcd_readb(&dp->aux, DP_MAX_DOWNSPREAD, &dpcd); + dp->link_train.ssc = !!(dpcd & DP_MAX_DOWNSPREAD_0_5); + /* Setup TX lane count & rate */ if (dp->link_train.lane_count > max_lanes) dp->link_train.lane_count = max_lanes; @@ -711,27 +750,15 @@ static int analogix_dp_full_link_train(struct analogix_dp_device *dp, static int analogix_dp_fast_link_train(struct analogix_dp_device *dp) { - int i, ret; + int ret; u8 link_align, link_status[2]; - enum pll_status status; analogix_dp_reset_macro(dp); analogix_dp_set_link_bandwidth(dp, dp->link_train.link_rate); analogix_dp_set_lane_count(dp, dp->link_train.lane_count); - - for (i = 0; i < dp->link_train.lane_count; i++) { - analogix_dp_set_lane_link_training(dp, - dp->link_train.training_lane[i], i); - } - - ret = readx_poll_timeout(analogix_dp_get_pll_lock_status, dp, status, - status != PLL_UNLOCKED, 120, - 120 * DP_TIMEOUT_LOOP_COUNT); - if (ret) { - DRM_DEV_ERROR(dp->dev, "Wait for pll lock failed %d\n", ret); - return ret; - } + analogix_dp_set_lane_link_training(dp); + analogix_dp_enable_enhanced_mode(dp, dp->link_train.enhanced_framing); /* source Set training pattern 1 */ analogix_dp_set_training_pattern(dp, TRAINING_PTN1); @@ -742,7 +769,6 @@ static int analogix_dp_fast_link_train(struct analogix_dp_device *dp) /* From DP spec, pattern must be on-screen for a minimum 500us */ usleep_range(500, 600); - /* TODO: enhanced_mode?*/ analogix_dp_set_training_pattern(dp, DP_NONE); /* @@ -884,25 +910,44 @@ static int analogix_dp_enable_scramble(struct analogix_dp_device *dp, return ret < 0 ? ret : 0; } +static irqreturn_t analogix_dp_hpd_irq_handler(int irq, void *arg) +{ + struct analogix_dp_device *dp = arg; + + if (dp->drm_dev) + drm_helper_hpd_irq_event(dp->drm_dev); + + return IRQ_HANDLED; +} + static irqreturn_t analogix_dp_hardirq(int irq, void *arg) { struct analogix_dp_device *dp = arg; - irqreturn_t ret = IRQ_NONE; enum dp_irq_type irq_type; + int ret; + + ret = pm_runtime_get_sync(dp->dev); + if (ret < 0) + return IRQ_NONE; irq_type = analogix_dp_get_irq_type(dp); - if (irq_type != DP_IRQ_TYPE_UNKNOWN) { + if (irq_type != DP_IRQ_TYPE_UNKNOWN) analogix_dp_mute_hpd_interrupt(dp); - ret = IRQ_WAKE_THREAD; - } - return ret; + pm_runtime_put_sync(dp->dev); + + return IRQ_WAKE_THREAD; } static irqreturn_t analogix_dp_irq_thread(int irq, void *arg) { struct analogix_dp_device *dp = arg; enum dp_irq_type irq_type; + int ret; + + ret = pm_runtime_get_sync(dp->dev); + if (ret < 0) + return IRQ_NONE; irq_type = analogix_dp_get_irq_type(dp); if (irq_type & DP_IRQ_TYPE_HP_CABLE_IN || @@ -917,6 +962,8 @@ static irqreturn_t analogix_dp_irq_thread(int irq, void *arg) analogix_dp_unmute_hpd_interrupt(dp); } + pm_runtime_put_sync(dp->dev); + return IRQ_HANDLED; } @@ -936,16 +983,73 @@ static int analogix_dp_fast_link_train_detection(struct analogix_dp_device *dp) return 0; } +static int analogix_dp_link_power_up(struct analogix_dp_device *dp) +{ + u8 value; + int ret; + + if (dp->dpcd[DP_DPCD_REV] < 0x11) + return 0; + + ret = drm_dp_dpcd_readb(&dp->aux, DP_SET_POWER, &value); + if (ret < 0) + return ret; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D0; + + ret = drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, value); + if (ret < 0) + return ret; + + usleep_range(1000, 2000); + + return 0; +} + +static int analogix_dp_link_power_down(struct analogix_dp_device *dp) +{ + u8 value; + int ret; + + if (dp->dpcd[DP_DPCD_REV] < 0x11) + return 0; + + ret = drm_dp_dpcd_readb(&dp->aux, DP_SET_POWER, &value); + if (ret < 0) + return ret; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D3; + + ret = drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, value); + if (ret < 0) + return ret; + + return 0; +} + static int analogix_dp_commit(struct analogix_dp_device *dp) { + struct video_info *video = &dp->video_info; int ret; - /* Keep the panel disabled while we configure video */ - if (dp->plat_data->panel) { - if (drm_panel_disable(dp->plat_data->panel)) - DRM_ERROR("failed to disable the panel\n"); + ret = drm_dp_read_dpcd_caps(&dp->aux, dp->dpcd); + if (ret < 0) { + dev_err(dp->dev, "failed to read dpcd caps: %d\n", ret); + return ret; + } + + ret = analogix_dp_link_power_up(dp); + if (ret) { + dev_err(dp->dev, "failed to power up link: %d\n", ret); + return ret; } + if (device_property_read_bool(dp->dev, "panel-self-test")) + return drm_dp_dpcd_writeb(&dp->aux, DP_EDP_CONFIGURATION_SET, + DP_PANEL_SELF_TEST_ENABLE); + ret = analogix_dp_train_link(dp); if (ret) { dev_err(dp->dev, "unable to do link train, ret=%d\n", ret); @@ -959,21 +1063,17 @@ static int analogix_dp_commit(struct analogix_dp_device *dp) } analogix_dp_init_video(dp); + analogix_dp_set_video_format(dp); + + if (video->video_bist_enable) + analogix_dp_video_bist_enable(dp); + ret = analogix_dp_config_video(dp); if (ret) { dev_err(dp->dev, "unable to config video\n"); return ret; } - /* Safe to enable the panel now */ - if (dp->plat_data->panel) { - ret = drm_panel_enable(dp->plat_data->panel); - if (ret) { - DRM_ERROR("failed to enable the panel\n"); - return ret; - } - } - /* Check whether panel supports fast training */ ret = analogix_dp_fast_link_train_detection(dp); if (ret) @@ -1058,66 +1158,18 @@ static int analogix_dp_disable_psr(struct analogix_dp_device *dp) return analogix_dp_send_psr_spd(dp, &psr_vsc, true); } -/* - * This function is a bit of a catch-all for panel preparation, hopefully - * simplifying the logic of functions that need to prepare/unprepare the panel - * below. - * - * If @prepare is true, this function will prepare the panel. Conversely, if it - * is false, the panel will be unprepared. - * - * If @is_modeset_prepare is true, the function will disregard the current state - * of the panel and either prepare/unprepare the panel based on @prepare. Once - * it finishes, it will update dp->panel_is_modeset to reflect the current state - * of the panel. - */ -static int analogix_dp_prepare_panel(struct analogix_dp_device *dp, - bool prepare, bool is_modeset_prepare) -{ - int ret = 0; - - if (!dp->plat_data->panel) - return 0; - - mutex_lock(&dp->panel_lock); - - /* - * Exit early if this is a temporary prepare/unprepare and we're already - * modeset (since we neither want to prepare twice or unprepare early). - */ - if (dp->panel_is_modeset && !is_modeset_prepare) - goto out; - - if (prepare) - ret = drm_panel_prepare(dp->plat_data->panel); - else - ret = drm_panel_unprepare(dp->plat_data->panel); - - if (ret) - goto out; - - if (is_modeset_prepare) - dp->panel_is_modeset = prepare; - -out: - mutex_unlock(&dp->panel_lock); - return ret; -} - static int analogix_dp_get_modes(struct drm_connector *connector) { struct analogix_dp_device *dp = to_dp(connector); struct edid *edid; - int ret, num_modes = 0; + int num_modes = 0; - if (dp->plat_data->panel) { + if (dp->plat_data->panel) num_modes += drm_panel_get_modes(dp->plat_data->panel, connector); - } else { - ret = analogix_dp_prepare_panel(dp, true, false); - if (ret) { - DRM_ERROR("Failed to prepare panel (%d)\n", ret); - return 0; - } + + if (!num_modes) { + if (dp->plat_data->panel) + analogix_dp_panel_prepare(dp); pm_runtime_get_sync(dp->dev); edid = drm_get_edid(connector, &dp->aux.ddc); @@ -1128,15 +1180,18 @@ static int analogix_dp_get_modes(struct drm_connector *connector) num_modes += drm_add_edid_modes(&dp->connector, edid); kfree(edid); } - - ret = analogix_dp_prepare_panel(dp, false, false); - if (ret) - DRM_ERROR("Failed to unprepare panel (%d)\n", ret); } if (dp->plat_data->get_modes) num_modes += dp->plat_data->get_modes(dp->plat_data, connector); + if (num_modes > 0 && dp->plat_data->split_mode) { + struct drm_display_mode *mode; + + list_for_each_entry(mode, &connector->probed_modes, head) + dp->plat_data->convert_to_split_mode(mode); + } + return num_modes; } @@ -1182,34 +1237,52 @@ static const struct drm_connector_helper_funcs analogix_dp_connector_helper_func }; static enum drm_connector_status -analogix_dp_detect(struct drm_connector *connector, bool force) +analogix_dp_detect(struct analogix_dp_device *dp) { - struct analogix_dp_device *dp = to_dp(connector); enum drm_connector_status status = connector_status_disconnected; int ret; if (dp->plat_data->panel) - return connector_status_connected; + analogix_dp_panel_prepare(dp); - ret = analogix_dp_prepare_panel(dp, true, false); - if (ret) { - DRM_ERROR("Failed to prepare panel (%d)\n", ret); - return connector_status_disconnected; - } + pm_runtime_get_sync(dp->dev); + + if (!analogix_dp_detect_hpd(dp)) { + ret = analogix_dp_get_max_rx_bandwidth(dp, &dp->link_train.link_rate); + if (ret) { + dev_err(dp->dev, "failed to read max link rate\n"); + goto out; + } + + ret = analogix_dp_get_max_rx_lane_count(dp, &dp->link_train.lane_count); + if (ret) { + dev_err(dp->dev, "failed to read max lane count\n"); + goto out; + } - if (!analogix_dp_detect_hpd(dp)) status = connector_status_connected; + } - ret = analogix_dp_prepare_panel(dp, false, false); - if (ret) - DRM_ERROR("Failed to unprepare panel (%d)\n", ret); +out: + pm_runtime_put(dp->dev); return status; } +static enum drm_connector_status +analogix_dp_connector_detect(struct drm_connector *connector, bool force) +{ + struct analogix_dp_device *dp = to_dp(connector); + + if (dp->plat_data->right && analogix_dp_detect(dp->plat_data->right) != connector_status_connected) + return connector_status_disconnected; + + return analogix_dp_detect(dp); +} + static const struct drm_connector_funcs analogix_dp_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, - .detect = analogix_dp_detect, + .detect = analogix_dp_connector_detect, .destroy = drm_connector_cleanup, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, @@ -1224,10 +1297,8 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge, struct drm_connector *connector = NULL; int ret = 0; - if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) { - DRM_ERROR("Fix bridge driver to make connector optional!"); - return -EINVAL; - } + if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) + return 0; if (!bridge->encoder) { DRM_ERROR("Parent encoder object not found"); @@ -1268,23 +1339,12 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge, return 0; } -static -struct drm_crtc *analogix_dp_get_old_crtc(struct analogix_dp_device *dp, - struct drm_atomic_state *state) +static void analogix_dp_bridge_detach(struct drm_bridge *bridge) { - struct drm_encoder *encoder = dp->encoder; - struct drm_connector *connector; - struct drm_connector_state *conn_state; - - connector = drm_atomic_get_old_connector_for_encoder(state, encoder); - if (!connector) - return NULL; - - conn_state = drm_atomic_get_old_connector_state(state, connector); - if (!conn_state) - return NULL; + struct analogix_dp_device *dp = bridge->driver_private; - return conn_state->crtc; + if (dp->plat_data->detach) + dp->plat_data->detach(dp->plat_data, bridge); } static @@ -1314,20 +1374,20 @@ analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge, struct analogix_dp_device *dp = bridge->driver_private; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; - int ret; - crtc = analogix_dp_get_new_crtc(dp, old_state); - if (!crtc) - return; + if (dp->psr_supported) { + crtc = analogix_dp_get_new_crtc(dp, old_state); + if (!crtc) + return; - old_crtc_state = drm_atomic_get_old_crtc_state(old_state, crtc); - /* Don't touch the panel if we're coming back from PSR */ - if (old_crtc_state && old_crtc_state->self_refresh_active) - return; + old_crtc_state = drm_atomic_get_old_crtc_state(old_state, crtc); + /* Don't touch the panel if we're coming back from PSR */ + if (old_crtc_state && old_crtc_state->self_refresh_active) + return; + } - ret = analogix_dp_prepare_panel(dp, true, true); - if (ret) - DRM_ERROR("failed to setup the panel ret = %d\n", ret); + if (dp->plat_data->panel) + analogix_dp_panel_prepare(dp); } static int analogix_dp_set_bridge(struct analogix_dp_device *dp) @@ -1336,16 +1396,10 @@ static int analogix_dp_set_bridge(struct analogix_dp_device *dp) pm_runtime_get_sync(dp->dev); - ret = clk_prepare_enable(dp->clock); - if (ret < 0) { - DRM_ERROR("Failed to prepare_enable the clock clk [%d]\n", ret); - goto out_dp_clk_pre; - } - if (dp->plat_data->power_on_start) dp->plat_data->power_on_start(dp->plat_data); - phy_power_on(dp->phy); + analogix_dp_phy_power_on(dp); ret = analogix_dp_init_dp(dp); if (ret) @@ -1363,11 +1417,14 @@ static int analogix_dp_set_bridge(struct analogix_dp_device *dp) } ret = analogix_dp_commit(dp); - if (ret) { + if (ret < 0) { DRM_ERROR("dp commit error, ret = %d\n", ret); goto out_dp_init; } + if (dp->plat_data->panel) + drm_panel_enable(dp->plat_data->panel); + if (dp->plat_data->power_on_end) dp->plat_data->power_on_end(dp->plat_data); @@ -1375,11 +1432,9 @@ static int analogix_dp_set_bridge(struct analogix_dp_device *dp) return 0; out_dp_init: - phy_power_off(dp->phy); + analogix_dp_phy_power_off(dp); if (dp->plat_data->power_off) dp->plat_data->power_off(dp->plat_data); - clk_disable_unprepare(dp->clock); -out_dp_clk_pre: pm_runtime_put_sync(dp->dev); return ret; @@ -1396,17 +1451,19 @@ analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge, int timeout_loop = 0; int ret; - crtc = analogix_dp_get_new_crtc(dp, old_state); - if (!crtc) - return; + if (dp->psr_supported) { + crtc = analogix_dp_get_new_crtc(dp, old_state); + if (!crtc) + return; - old_crtc_state = drm_atomic_get_old_crtc_state(old_state, crtc); - /* Not a full enable, just disable PSR and continue */ - if (old_crtc_state && old_crtc_state->self_refresh_active) { - ret = analogix_dp_disable_psr(dp); - if (ret) - DRM_ERROR("Failed to disable psr %d\n", ret); - return; + old_crtc_state = drm_atomic_get_old_crtc_state(old_state, crtc); + /* Not a full enable, just disable PSR and continue */ + if (old_crtc_state && old_crtc_state->self_refresh_active) { + ret = analogix_dp_disable_psr(dp); + if (ret) + DRM_ERROR("Failed to disable psr %d\n", ret); + return; + } } if (dp->dpms_mode == DRM_MODE_DPMS_ON) @@ -1428,7 +1485,6 @@ analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge, static void analogix_dp_bridge_disable(struct drm_bridge *bridge) { struct analogix_dp_device *dp = bridge->driver_private; - int ret; if (dp->dpms_mode != DRM_MODE_DPMS_ON) return; @@ -1440,21 +1496,22 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge) } } + if (!analogix_dp_get_plug_in_status(dp)) + analogix_dp_link_power_down(dp); + disable_irq(dp->irq); if (dp->plat_data->power_off) dp->plat_data->power_off(dp->plat_data); + analogix_dp_reset_aux(dp); analogix_dp_set_analog_power_down(dp, POWER_ALL, 1); - phy_power_off(dp->phy); - - clk_disable_unprepare(dp->clock); + analogix_dp_phy_power_off(dp); pm_runtime_put_sync(dp->dev); - ret = analogix_dp_prepare_panel(dp, false, true); - if (ret) - DRM_ERROR("failed to setup the panel ret = %d\n", ret); + if (dp->plat_data->panel) + analogix_dp_panel_unprepare(dp); dp->fast_train_enable = false; dp->psr_supported = false; @@ -1467,16 +1524,14 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge, { struct drm_atomic_state *old_state = old_bridge_state->base.state; struct analogix_dp_device *dp = bridge->driver_private; - struct drm_crtc *old_crtc, *new_crtc; - struct drm_crtc_state *old_crtc_state = NULL; + struct drm_crtc *crtc; struct drm_crtc_state *new_crtc_state = NULL; - int ret; - new_crtc = analogix_dp_get_new_crtc(dp, old_state); - if (!new_crtc) + crtc = analogix_dp_get_new_crtc(dp, old_state); + if (!crtc) goto out; - new_crtc_state = drm_atomic_get_new_crtc_state(old_state, new_crtc); + new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc); if (!new_crtc_state) goto out; @@ -1485,19 +1540,6 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge, return; out: - old_crtc = analogix_dp_get_old_crtc(dp, old_state); - if (old_crtc) { - old_crtc_state = drm_atomic_get_old_crtc_state(old_state, - old_crtc); - - /* When moving from PSR to fully disabled, exit PSR first. */ - if (old_crtc_state && old_crtc_state->self_refresh_active) { - ret = analogix_dp_disable_psr(dp); - if (ret) - DRM_ERROR("Failed to disable psr (%d)\n", ret); - } - } - analogix_dp_bridge_disable(bridge); } @@ -1526,14 +1568,19 @@ analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge, static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *orig_mode, - const struct drm_display_mode *mode) + const struct drm_display_mode *adj_mode) { struct analogix_dp_device *dp = bridge->driver_private; struct drm_display_info *display_info = &dp->connector.display_info; struct video_info *video = &dp->video_info; + struct drm_display_mode *mode = &video->mode; struct device_node *dp_node = dp->dev->of_node; int vic; + drm_mode_copy(mode, adj_mode); + if (dp->plat_data->split_mode) + dp->plat_data->convert_to_origin_mode(mode); + /* Input video interlaces & hsync pol & vsync pol */ video->interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE); video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC); @@ -1601,6 +1648,27 @@ static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge, video->interlaced = true; } +static enum drm_mode_status +analogix_dp_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + struct analogix_dp_device *dp = bridge->driver_private; + struct drm_display_mode m; + + drm_mode_copy(&m, mode); + + if (dp->plat_data->split_mode) + dp->plat_data->convert_to_origin_mode(&m); + + if (!analogix_dp_bandwidth_ok(dp, &m, + drm_dp_bw_code_to_link_rate(dp->link_train.link_rate), + dp->link_train.lane_count)) + return MODE_BAD; + + return MODE_OK; +} + static const struct drm_bridge_funcs analogix_dp_bridge_funcs = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, @@ -1611,29 +1679,30 @@ static const struct drm_bridge_funcs analogix_dp_bridge_funcs = { .atomic_post_disable = analogix_dp_bridge_atomic_post_disable, .mode_set = analogix_dp_bridge_mode_set, .attach = analogix_dp_bridge_attach, + .detach = analogix_dp_bridge_detach, + .mode_valid = analogix_dp_bridge_mode_valid, }; -static int analogix_dp_create_bridge(struct drm_device *drm_dev, - struct analogix_dp_device *dp) +static int analogix_dp_bridge_init(struct analogix_dp_device *dp) { - struct drm_bridge *bridge; + struct drm_bridge *bridge = &dp->bridge; int ret; - bridge = devm_kzalloc(drm_dev->dev, sizeof(*bridge), GFP_KERNEL); - if (!bridge) { - DRM_ERROR("failed to allocate for drm bridge\n"); - return -ENOMEM; + if (!dp->plat_data->left) { + ret = drm_bridge_attach(dp->encoder, bridge, NULL, 0); + if (ret) { + DRM_ERROR("failed to attach drm bridge\n"); + return ret; + } } - dp->bridge = bridge; + if (dp->plat_data->right) { + struct analogix_dp_device *secondary = dp->plat_data->right; - bridge->driver_private = dp; - bridge->funcs = &analogix_dp_bridge_funcs; - - ret = drm_bridge_attach(dp->encoder, bridge, NULL, 0); - if (ret) { - DRM_ERROR("failed to attach drm bridge\n"); - return -EINVAL; + ret = drm_bridge_attach(dp->encoder, &secondary->bridge, bridge, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) + return ret; } return 0; @@ -1646,7 +1715,7 @@ static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp) switch (dp->plat_data->dev_type) { case RK3288_DP: - case RK3399_EDP: + case RK3568_EDP: /* * Like Rk3288 DisplayPort TRM indicate that "Main link * containing 4 physical lanes of 2.7/1.62 Gbps/lane". @@ -1654,6 +1723,11 @@ static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp) video_info->max_link_rate = 0x0A; video_info->max_lane_count = 0x04; break; + case RK3399_EDP: + case RK3588_EDP: + video_info->max_link_rate = 0x14; + video_info->max_lane_count = 0x04; + break; case EXYNOS_DP: /* * NOTE: those property parseing code is used for @@ -1666,6 +1740,9 @@ static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp) break; } + video_info->video_bist_enable = + of_property_read_bool(dp_node, "analogix,video-bist-enable"); + return 0; } @@ -1673,20 +1750,69 @@ static ssize_t analogix_dpaux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { struct analogix_dp_device *dp = to_dp(aux); + + return analogix_dp_transfer(dp, msg); +} + +int analogix_dp_audio_hw_params(struct analogix_dp_device *dp, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) +{ + switch (daifmt->fmt) { + case HDMI_SPDIF: + analogix_dp_audio_config_spdif(dp); + break; + case HDMI_I2S: + analogix_dp_audio_config_i2s(dp); + break; + default: + DRM_DEV_ERROR(dp->dev, "invalid daifmt %d\n", daifmt->fmt); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(analogix_dp_audio_hw_params); + +void analogix_dp_audio_shutdown(struct analogix_dp_device *dp) +{ + analogix_dp_audio_disable(dp); +} +EXPORT_SYMBOL_GPL(analogix_dp_audio_shutdown); + +int analogix_dp_audio_startup(struct analogix_dp_device *dp) +{ + analogix_dp_audio_enable(dp); + + return 0; +} +EXPORT_SYMBOL_GPL(analogix_dp_audio_startup); + +int analogix_dp_audio_get_eld(struct analogix_dp_device *dp, u8 *buf, size_t len) +{ + memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len)); + + return 0; +} +EXPORT_SYMBOL_GPL(analogix_dp_audio_get_eld); + +int analogix_dp_loader_protect(struct analogix_dp_device *dp) +{ int ret; - pm_runtime_get_sync(dp->dev); + ret = pm_runtime_resume_and_get(dp->dev); + if (ret) { + dev_err(dp->dev, "failed to get runtime PM: %d\n", ret); + return ret; + } - ret = analogix_dp_detect_hpd(dp); - if (ret) - goto out; + analogix_dp_phy_power_on(dp); - ret = analogix_dp_transfer(dp, msg); -out: - pm_runtime_put(dp->dev); + dp->dpms_mode = DRM_MODE_DPMS_ON; - return ret; + return 0; } +EXPORT_SYMBOL_GPL(analogix_dp_loader_protect); struct analogix_dp_device * analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data) @@ -1694,7 +1820,6 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data) struct platform_device *pdev = to_platform_device(dev); struct analogix_dp_device *dp; struct resource *res; - unsigned int irq_flags; int ret; if (!plat_data) { @@ -1710,7 +1835,7 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data) dp->dpms_mode = DRM_MODE_DPMS_OFF; mutex_init(&dp->panel_lock); - dp->panel_is_modeset = false; + dp->panel_is_prepared = false; /* * platform dp driver need containor_of the plat_data to get @@ -1739,21 +1864,19 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data) } } - dp->clock = devm_clk_get(&pdev->dev, "dp"); - if (IS_ERR(dp->clock)) { - dev_err(&pdev->dev, "failed to get clock\n"); - return ERR_CAST(dp->clock); + ret = devm_clk_bulk_get_all(dev, &dp->clks); + if (ret < 0) { + dev_err(dev, "failed to get clocks %d\n", ret); + return ERR_PTR(ret); } - clk_prepare_enable(dp->clock); + dp->nr_clks = ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); dp->reg_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(dp->reg_base)) { - ret = PTR_ERR(dp->reg_base); - goto err_disable_clk; - } + if (IS_ERR(dp->reg_base)) + return ERR_CAST(dp->reg_base); dp->force_hpd = of_property_read_bool(dev->of_node, "force-hpd"); @@ -1765,46 +1888,44 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data) if (IS_ERR(dp->hpd_gpiod)) { dev_err(dev, "error getting HDP GPIO: %ld\n", PTR_ERR(dp->hpd_gpiod)); - ret = PTR_ERR(dp->hpd_gpiod); - goto err_disable_clk; + return ERR_CAST(dp->hpd_gpiod); } if (dp->hpd_gpiod) { - /* - * Set up the hotplug GPIO from the device tree as an interrupt. - * Simply specifying a different interrupt in the device tree - * doesn't work since we handle hotplug rather differently when - * using a GPIO. We also need the actual GPIO specifier so - * that we can get the current state of the GPIO. - */ - dp->irq = gpiod_to_irq(dp->hpd_gpiod); - irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING; - } else { - dp->irq = platform_get_irq(pdev, 0); - irq_flags = 0; + ret = devm_request_threaded_irq(dev, + gpiod_to_irq(dp->hpd_gpiod), + NULL, + analogix_dp_hpd_irq_handler, + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING | + IRQF_ONESHOT, + "analogix-hpd", dp); + if (ret) { + dev_err(dev, "failed to request hpd IRQ: %d\n", ret); + return ERR_PTR(ret); + } } + dp->irq = platform_get_irq(pdev, 0); if (dp->irq == -ENXIO) { dev_err(&pdev->dev, "failed to get irq\n"); - ret = -ENODEV; - goto err_disable_clk; + return ERR_PTR(-ENODEV); } + irq_set_status_flags(dp->irq, IRQ_NOAUTOEN); ret = devm_request_threaded_irq(&pdev->dev, dp->irq, analogix_dp_hardirq, analogix_dp_irq_thread, - irq_flags, "analogix-dp", dp); + 0, "analogix-dp", dp); if (ret) { dev_err(&pdev->dev, "failed to request irq\n"); - goto err_disable_clk; + return ERR_PTR(ret); } - disable_irq(dp->irq); - return dp; + dp->bridge.driver_private = dp; + dp->bridge.funcs = &analogix_dp_bridge_funcs; -err_disable_clk: - clk_disable_unprepare(dp->clock); - return ERR_PTR(ret); + return dp; } EXPORT_SYMBOL_GPL(analogix_dp_probe); @@ -1825,9 +1946,9 @@ int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev) pm_runtime_enable(dp->dev); - ret = analogix_dp_create_bridge(drm_dev, dp); + ret = analogix_dp_bridge_init(dp); if (ret) { - DRM_ERROR("failed to create bridge (%d)\n", ret); + DRM_ERROR("failed to init bridge (%d)\n", ret); goto err_disable_pm_runtime; } @@ -1842,14 +1963,7 @@ EXPORT_SYMBOL_GPL(analogix_dp_bind); void analogix_dp_unbind(struct analogix_dp_device *dp) { - analogix_dp_bridge_disable(dp->bridge); dp->connector.funcs->destroy(&dp->connector); - - if (dp->plat_data->panel) { - if (drm_panel_unprepare(dp->plat_data->panel)) - DRM_ERROR("failed to turnoff the panel\n"); - } - drm_dp_aux_unregister(&dp->aux); pm_runtime_disable(dp->dev); } @@ -1857,32 +1971,22 @@ EXPORT_SYMBOL_GPL(analogix_dp_unbind); void analogix_dp_remove(struct analogix_dp_device *dp) { - clk_disable_unprepare(dp->clock); } EXPORT_SYMBOL_GPL(analogix_dp_remove); -#ifdef CONFIG_PM -int analogix_dp_suspend(struct analogix_dp_device *dp) +int analogix_dp_runtime_suspend(struct analogix_dp_device *dp) { - clk_disable_unprepare(dp->clock); + clk_bulk_disable_unprepare(dp->nr_clks, dp->clks); + return 0; } -EXPORT_SYMBOL_GPL(analogix_dp_suspend); +EXPORT_SYMBOL_GPL(analogix_dp_runtime_suspend); -int analogix_dp_resume(struct analogix_dp_device *dp) +int analogix_dp_runtime_resume(struct analogix_dp_device *dp) { - int ret; - - ret = clk_prepare_enable(dp->clock); - if (ret < 0) { - DRM_ERROR("Failed to prepare_enable the clock clk [%d]\n", ret); - return ret; - } - - return 0; + return clk_bulk_prepare_enable(dp->nr_clks, dp->clks); } -EXPORT_SYMBOL_GPL(analogix_dp_resume); -#endif +EXPORT_SYMBOL_GPL(analogix_dp_runtime_resume); int analogix_dp_start_crc(struct drm_connector *connector) { diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h index c051502d7..804a87d59 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h @@ -10,6 +10,7 @@ #define _ANALOGIX_DP_CORE_H #include +#include #include #define DP_TIMEOUT_LOOP_COUNT 100 @@ -69,6 +70,7 @@ enum pattern_set { D10_2, TRAINING_PTN1, TRAINING_PTN2, + TRAINING_PTN3, DP_NONE }; @@ -129,6 +131,7 @@ enum dp_irq_type { struct video_info { char *name; + struct drm_display_mode mode; bool h_sync_polarity; bool v_sync_polarity; @@ -141,6 +144,8 @@ struct video_info { int max_link_rate; enum link_lane_count_type max_lane_count; + + bool video_bist_enable; }; struct link_train { @@ -150,6 +155,8 @@ struct link_train { u8 link_rate; u8 lane_count; u8 training_lane[4]; + bool ssc; + bool enhanced_framing; enum link_training_state lt_state; }; @@ -159,15 +166,17 @@ struct analogix_dp_device { struct device *dev; struct drm_device *drm_dev; struct drm_connector connector; - struct drm_bridge *bridge; + struct drm_bridge bridge; struct drm_dp_aux aux; - struct clk *clock; + struct clk_bulk_data *clks; + int nr_clks; unsigned int irq; void __iomem *reg_base; struct video_info video_info; struct link_train link_train; struct phy *phy; + bool phy_enabled; int dpms_mode; struct gpio_desc *hpd_gpiod; bool force_hpd; @@ -175,8 +184,9 @@ struct analogix_dp_device { bool psr_supported; struct mutex panel_lock; - bool panel_is_modeset; + bool panel_is_prepared; + u8 dpcd[DP_RECEIVER_CAP_SIZE]; struct analogix_dp_plat_data *plat_data; }; @@ -213,26 +223,8 @@ void analogix_dp_enable_enhanced_mode(struct analogix_dp_device *dp, bool enable); void analogix_dp_set_training_pattern(struct analogix_dp_device *dp, enum pattern_set pattern); -void analogix_dp_set_lane0_pre_emphasis(struct analogix_dp_device *dp, - u32 level); -void analogix_dp_set_lane1_pre_emphasis(struct analogix_dp_device *dp, - u32 level); -void analogix_dp_set_lane2_pre_emphasis(struct analogix_dp_device *dp, - u32 level); -void analogix_dp_set_lane3_pre_emphasis(struct analogix_dp_device *dp, - u32 level); -void analogix_dp_set_lane0_link_training(struct analogix_dp_device *dp, - u32 training_lane); -void analogix_dp_set_lane1_link_training(struct analogix_dp_device *dp, - u32 training_lane); -void analogix_dp_set_lane2_link_training(struct analogix_dp_device *dp, - u32 training_lane); -void analogix_dp_set_lane3_link_training(struct analogix_dp_device *dp, - u32 training_lane); -u32 analogix_dp_get_lane0_link_training(struct analogix_dp_device *dp); -u32 analogix_dp_get_lane1_link_training(struct analogix_dp_device *dp); -u32 analogix_dp_get_lane2_link_training(struct analogix_dp_device *dp); -u32 analogix_dp_get_lane3_link_training(struct analogix_dp_device *dp); +void analogix_dp_set_lane_link_training(struct analogix_dp_device *dp); +u32 analogix_dp_get_lane_link_training(struct analogix_dp_device *dp, u8 lane); void analogix_dp_reset_macro(struct analogix_dp_device *dp); void analogix_dp_init_video(struct analogix_dp_device *dp); @@ -255,5 +247,14 @@ int analogix_dp_send_psr_spd(struct analogix_dp_device *dp, struct dp_sdp *vsc, bool blocking); ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, struct drm_dp_aux_msg *msg); +void analogix_dp_set_video_format(struct analogix_dp_device *dp); +void analogix_dp_video_bist_enable(struct analogix_dp_device *dp); +bool analogix_dp_ssc_supported(struct analogix_dp_device *dp); +void analogix_dp_phy_power_on(struct analogix_dp_device *dp); +void analogix_dp_phy_power_off(struct analogix_dp_device *dp); +void analogix_dp_audio_config_spdif(struct analogix_dp_device *dp); +void analogix_dp_audio_config_i2s(struct analogix_dp_device *dp); +void analogix_dp_audio_enable(struct analogix_dp_device *dp); +void analogix_dp_audio_disable(struct analogix_dp_device *dp); #endif /* _ANALOGIX_DP_CORE_H */ diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c index cab3f5c4e..e76c66c7c 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c @@ -11,6 +11,7 @@ #include #include #include +#include #include @@ -21,20 +22,37 @@ #define COMMON_INT_MASK_2 0 #define COMMON_INT_MASK_3 0 #define COMMON_INT_MASK_4 (HOTPLUG_CHG | HPD_LOST | PLUG) -#define INT_STA_MASK INT_HPD + +static void analogix_dp_write(struct analogix_dp_device *dp, u32 reg, u32 val) +{ + if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) { + readl(dp->reg_base); + writel(val, dp->reg_base + reg); + } + + writel(val, dp->reg_base + reg); +} + +static u32 analogix_dp_read(struct analogix_dp_device *dp, u32 reg) +{ + if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) + readl(dp->reg_base + reg); + + return readl(dp->reg_base + reg); +} void analogix_dp_enable_video_mute(struct analogix_dp_device *dp, bool enable) { u32 reg; if (enable) { - reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); + reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_1); reg |= HDCP_VIDEO_MUTE; - writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_1, reg); } else { - reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); + reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_1); reg &= ~HDCP_VIDEO_MUTE; - writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_1, reg); } } @@ -42,9 +60,9 @@ void analogix_dp_stop_video(struct analogix_dp_device *dp) { u32 reg; - reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); + reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_1); reg &= ~VIDEO_EN; - writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_1, reg); } void analogix_dp_lane_swap(struct analogix_dp_device *dp, bool enable) @@ -58,7 +76,7 @@ void analogix_dp_lane_swap(struct analogix_dp_device *dp, bool enable) reg = LANE3_MAP_LOGIC_LANE_3 | LANE2_MAP_LOGIC_LANE_2 | LANE1_MAP_LOGIC_LANE_1 | LANE0_MAP_LOGIC_LANE_0; - writel(reg, dp->reg_base + ANALOGIX_DP_LANE_MAP); + analogix_dp_write(dp, ANALOGIX_DP_LANE_MAP, reg); } void analogix_dp_init_analog_param(struct analogix_dp_device *dp) @@ -66,53 +84,54 @@ void analogix_dp_init_analog_param(struct analogix_dp_device *dp) u32 reg; reg = TX_TERMINAL_CTRL_50_OHM; - writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_1); + analogix_dp_write(dp, ANALOGIX_DP_ANALOG_CTL_1, reg); reg = SEL_24M | TX_DVDD_BIT_1_0625V; - writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_2); + analogix_dp_write(dp, ANALOGIX_DP_ANALOG_CTL_2, reg); if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) { reg = REF_CLK_24M; if (dp->plat_data->dev_type == RK3288_DP) reg ^= REF_CLK_MASK; - writel(reg, dp->reg_base + ANALOGIX_DP_PLL_REG_1); - writel(0x95, dp->reg_base + ANALOGIX_DP_PLL_REG_2); - writel(0x40, dp->reg_base + ANALOGIX_DP_PLL_REG_3); - writel(0x58, dp->reg_base + ANALOGIX_DP_PLL_REG_4); - writel(0x22, dp->reg_base + ANALOGIX_DP_PLL_REG_5); + analogix_dp_write(dp, ANALOGIX_DP_PLL_REG_1, reg); + analogix_dp_write(dp, ANALOGIX_DP_PLL_REG_2, 0x99); + analogix_dp_write(dp, ANALOGIX_DP_PLL_REG_3, 0x40); + analogix_dp_write(dp, ANALOGIX_DP_PLL_REG_4, 0x58); + analogix_dp_write(dp, ANALOGIX_DP_PLL_REG_5, 0x22); + analogix_dp_write(dp, ANALOGIX_DP_BIAS, 0x44); } reg = DRIVE_DVDD_BIT_1_0625V | VCO_BIT_600_MICRO; - writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_3); + analogix_dp_write(dp, ANALOGIX_DP_ANALOG_CTL_3, reg); reg = PD_RING_OSC | AUX_TERMINAL_CTRL_50_OHM | TX_CUR1_2X | TX_CUR_16_MA; - writel(reg, dp->reg_base + ANALOGIX_DP_PLL_FILTER_CTL_1); + analogix_dp_write(dp, ANALOGIX_DP_PLL_FILTER_CTL_1, reg); reg = CH3_AMP_400_MV | CH2_AMP_400_MV | CH1_AMP_400_MV | CH0_AMP_400_MV; - writel(reg, dp->reg_base + ANALOGIX_DP_TX_AMP_TUNING_CTL); + analogix_dp_write(dp, ANALOGIX_DP_TX_AMP_TUNING_CTL, reg); } void analogix_dp_init_interrupt(struct analogix_dp_device *dp) { /* Set interrupt pin assertion polarity as high */ - writel(INT_POL1 | INT_POL0, dp->reg_base + ANALOGIX_DP_INT_CTL); + analogix_dp_write(dp, ANALOGIX_DP_INT_CTL, INT_POL1 | INT_POL0); /* Clear pending regisers */ - writel(0xff, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_1); - writel(0x4f, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_2); - writel(0xe0, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_3); - writel(0xe7, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_4); - writel(0x63, dp->reg_base + ANALOGIX_DP_INT_STA); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_STA_1, 0xff); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_STA_2, 0x4f); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_STA_3, 0xe0); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_STA_4, 0xe7); + analogix_dp_write(dp, ANALOGIX_DP_INT_STA, 0x63); /* 0:mask,1: unmask */ - writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_1); - writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_2); - writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_3); - writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4); - writel(0x00, dp->reg_base + ANALOGIX_DP_INT_STA_MASK); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_MASK_1, 0x00); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_MASK_2, 0x00); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_MASK_3, 0x00); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_MASK_4, 0x00); + analogix_dp_write(dp, ANALOGIX_DP_INT_STA_MASK, 0x00); } void analogix_dp_reset(struct analogix_dp_device *dp) @@ -130,44 +149,44 @@ void analogix_dp_reset(struct analogix_dp_device *dp) AUD_FIFO_FUNC_EN_N | AUD_FUNC_EN_N | HDCP_FUNC_EN_N | SW_FUNC_EN_N; - writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1); + analogix_dp_write(dp, ANALOGIX_DP_FUNC_EN_1, reg); reg = SSC_FUNC_EN_N | AUX_FUNC_EN_N | SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N; - writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2); + analogix_dp_write(dp, ANALOGIX_DP_FUNC_EN_2, reg); usleep_range(20, 30); analogix_dp_lane_swap(dp, 0); - writel(0x0, dp->reg_base + ANALOGIX_DP_SYS_CTL_1); - writel(0x40, dp->reg_base + ANALOGIX_DP_SYS_CTL_2); - writel(0x0, dp->reg_base + ANALOGIX_DP_SYS_CTL_3); - writel(0x0, dp->reg_base + ANALOGIX_DP_SYS_CTL_4); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_1, 0x0); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_2, 0x40); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_3, 0x0); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_4, 0x0); - writel(0x0, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); - writel(0x0, dp->reg_base + ANALOGIX_DP_HDCP_CTL); + analogix_dp_write(dp, ANALOGIX_DP_PKT_SEND_CTL, 0x0); + analogix_dp_write(dp, ANALOGIX_DP_HDCP_CTL, 0x0); - writel(0x5e, dp->reg_base + ANALOGIX_DP_HPD_DEGLITCH_L); - writel(0x1a, dp->reg_base + ANALOGIX_DP_HPD_DEGLITCH_H); + analogix_dp_write(dp, ANALOGIX_DP_HPD_DEGLITCH_L, 0x5e); + analogix_dp_write(dp, ANALOGIX_DP_HPD_DEGLITCH_H, 0x1a); - writel(0x10, dp->reg_base + ANALOGIX_DP_LINK_DEBUG_CTL); + analogix_dp_write(dp, ANALOGIX_DP_LINK_DEBUG_CTL, 0x10); - writel(0x0, dp->reg_base + ANALOGIX_DP_PHY_TEST); + analogix_dp_write(dp, ANALOGIX_DP_PHY_TEST, 0x0); - writel(0x0, dp->reg_base + ANALOGIX_DP_VIDEO_FIFO_THRD); - writel(0x20, dp->reg_base + ANALOGIX_DP_AUDIO_MARGIN); + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_FIFO_THRD, 0x0); + analogix_dp_write(dp, ANALOGIX_DP_AUDIO_MARGIN, 0x20); - writel(0x4, dp->reg_base + ANALOGIX_DP_M_VID_GEN_FILTER_TH); - writel(0x2, dp->reg_base + ANALOGIX_DP_M_AUD_GEN_FILTER_TH); + analogix_dp_write(dp, ANALOGIX_DP_M_VID_GEN_FILTER_TH, 0x4); + analogix_dp_write(dp, ANALOGIX_DP_M_AUD_GEN_FILTER_TH, 0x2); - writel(0x00000101, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL); + analogix_dp_write(dp, ANALOGIX_DP_SOC_GENERAL_CTL, 0x00000101); } void analogix_dp_swreset(struct analogix_dp_device *dp) { - writel(RESET_DP_TX, dp->reg_base + ANALOGIX_DP_TX_SW_RESET); + analogix_dp_write(dp, ANALOGIX_DP_TX_SW_RESET, RESET_DP_TX); } void analogix_dp_config_interrupt(struct analogix_dp_device *dp) @@ -176,19 +195,18 @@ void analogix_dp_config_interrupt(struct analogix_dp_device *dp) /* 0: mask, 1: unmask */ reg = COMMON_INT_MASK_1; - writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_1); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_MASK_1, reg); reg = COMMON_INT_MASK_2; - writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_2); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_MASK_2, reg); reg = COMMON_INT_MASK_3; - writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_3); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_MASK_3, reg); - reg = COMMON_INT_MASK_4; - writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4); - - reg = INT_STA_MASK; - writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA_MASK); + if (dp->force_hpd || dp->hpd_gpiod) + analogix_dp_mute_hpd_interrupt(dp); + else + analogix_dp_unmute_hpd_interrupt(dp); } void analogix_dp_mute_hpd_interrupt(struct analogix_dp_device *dp) @@ -196,13 +214,13 @@ void analogix_dp_mute_hpd_interrupt(struct analogix_dp_device *dp) u32 reg; /* 0: mask, 1: unmask */ - reg = readl(dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4); + reg = analogix_dp_read(dp, ANALOGIX_DP_COMMON_INT_MASK_4); reg &= ~COMMON_INT_MASK_4; - writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_MASK_4, reg); - reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA_MASK); - reg &= ~INT_STA_MASK; - writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA_MASK); + reg = analogix_dp_read(dp, ANALOGIX_DP_INT_STA_MASK); + reg &= ~INT_HPD; + analogix_dp_write(dp, ANALOGIX_DP_INT_STA_MASK, reg); } void analogix_dp_unmute_hpd_interrupt(struct analogix_dp_device *dp) @@ -211,17 +229,18 @@ void analogix_dp_unmute_hpd_interrupt(struct analogix_dp_device *dp) /* 0: mask, 1: unmask */ reg = COMMON_INT_MASK_4; - writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_MASK_4, reg); - reg = INT_STA_MASK; - writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA_MASK); + reg = analogix_dp_read(dp, ANALOGIX_DP_INT_STA_MASK); + reg |= INT_HPD; + analogix_dp_write(dp, ANALOGIX_DP_INT_STA_MASK, reg); } enum pll_status analogix_dp_get_pll_lock_status(struct analogix_dp_device *dp) { u32 reg; - reg = readl(dp->reg_base + ANALOGIX_DP_DEBUG_CTL); + reg = analogix_dp_read(dp, ANALOGIX_DP_DEBUG_CTL); if (reg & PLL_LOCK) return PLL_LOCKED; else @@ -239,12 +258,12 @@ void analogix_dp_set_pll_power_down(struct analogix_dp_device *dp, bool enable) mask = RK_PLL_PD; } - reg = readl(dp->reg_base + pd_addr); + reg = analogix_dp_read(dp, pd_addr); if (enable) reg |= mask; else reg &= ~mask; - writel(reg, dp->reg_base + pd_addr); + analogix_dp_write(dp, pd_addr, reg); } void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp, @@ -265,52 +284,54 @@ void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp, else mask = AUX_PD; - reg = readl(dp->reg_base + phy_pd_addr); - if (enable) + reg = analogix_dp_read(dp, phy_pd_addr); + if (enable) { + reg &= ~(DP_INC_BG | DP_EXP_BG); reg |= mask; - else + } else { reg &= ~mask; - writel(reg, dp->reg_base + phy_pd_addr); + } + analogix_dp_write(dp, phy_pd_addr, reg); break; case CH0_BLOCK: mask = CH0_PD; - reg = readl(dp->reg_base + phy_pd_addr); + reg = analogix_dp_read(dp, phy_pd_addr); if (enable) reg |= mask; else reg &= ~mask; - writel(reg, dp->reg_base + phy_pd_addr); + analogix_dp_write(dp, phy_pd_addr, reg); break; case CH1_BLOCK: mask = CH1_PD; - reg = readl(dp->reg_base + phy_pd_addr); + reg = analogix_dp_read(dp, phy_pd_addr); if (enable) reg |= mask; else reg &= ~mask; - writel(reg, dp->reg_base + phy_pd_addr); + analogix_dp_write(dp, phy_pd_addr, reg); break; case CH2_BLOCK: mask = CH2_PD; - reg = readl(dp->reg_base + phy_pd_addr); + reg = analogix_dp_read(dp, phy_pd_addr); if (enable) reg |= mask; else reg &= ~mask; - writel(reg, dp->reg_base + phy_pd_addr); + analogix_dp_write(dp, phy_pd_addr, reg); break; case CH3_BLOCK: mask = CH3_PD; - reg = readl(dp->reg_base + phy_pd_addr); + reg = analogix_dp_read(dp, phy_pd_addr); if (enable) reg |= mask; else reg &= ~mask; - writel(reg, dp->reg_base + phy_pd_addr); + analogix_dp_write(dp, phy_pd_addr, reg); break; case ANALOG_TOTAL: /* @@ -323,29 +344,29 @@ void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp, else mask = DP_PHY_PD; - reg = readl(dp->reg_base + phy_pd_addr); + reg = analogix_dp_read(dp, phy_pd_addr); if (enable) reg |= mask; else reg &= ~mask; - writel(reg, dp->reg_base + phy_pd_addr); + analogix_dp_write(dp, phy_pd_addr, reg); if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) usleep_range(10, 15); break; case POWER_ALL: if (enable) { reg = DP_ALL_PD; - writel(reg, dp->reg_base + phy_pd_addr); + analogix_dp_write(dp, phy_pd_addr, reg); } else { reg = DP_ALL_PD; - writel(reg, dp->reg_base + phy_pd_addr); + analogix_dp_write(dp, phy_pd_addr, reg); usleep_range(10, 15); reg &= ~DP_INC_BG; - writel(reg, dp->reg_base + phy_pd_addr); + analogix_dp_write(dp, phy_pd_addr, reg); usleep_range(10, 15); - writel(0x00, dp->reg_base + phy_pd_addr); + analogix_dp_write(dp, phy_pd_addr, 0x00); } break; default: @@ -356,36 +377,24 @@ void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp, int analogix_dp_init_analog_func(struct analogix_dp_device *dp) { u32 reg; - int timeout_loop = 0; analogix_dp_set_analog_power_down(dp, POWER_ALL, 0); reg = PLL_LOCK_CHG; - writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_1); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_STA_1, reg); - reg = readl(dp->reg_base + ANALOGIX_DP_DEBUG_CTL); + reg = analogix_dp_read(dp, ANALOGIX_DP_DEBUG_CTL); reg &= ~(F_PLL_LOCK | PLL_LOCK_CTRL); - writel(reg, dp->reg_base + ANALOGIX_DP_DEBUG_CTL); + analogix_dp_write(dp, ANALOGIX_DP_DEBUG_CTL, reg); /* Power up PLL */ - if (analogix_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { - analogix_dp_set_pll_power_down(dp, 0); - - while (analogix_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { - timeout_loop++; - if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) { - dev_err(dp->dev, "failed to get pll lock status\n"); - return -ETIMEDOUT; - } - usleep_range(10, 20); - } - } + analogix_dp_set_pll_power_down(dp, 0); /* Enable Serdes FIFO function and Link symbol clock domain module */ - reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_2); + reg = analogix_dp_read(dp, ANALOGIX_DP_FUNC_EN_2); reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N | AUX_FUNC_EN_N); - writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2); + analogix_dp_write(dp, ANALOGIX_DP_FUNC_EN_2, reg); return 0; } @@ -397,10 +406,10 @@ void analogix_dp_clear_hotplug_interrupts(struct analogix_dp_device *dp) return; reg = HOTPLUG_CHG | HPD_LOST | PLUG; - writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_4); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_STA_4, reg); reg = INT_HPD; - writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA); + analogix_dp_write(dp, ANALOGIX_DP_INT_STA, reg); } void analogix_dp_init_hpd(struct analogix_dp_device *dp) @@ -412,45 +421,37 @@ void analogix_dp_init_hpd(struct analogix_dp_device *dp) analogix_dp_clear_hotplug_interrupts(dp); - reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3); + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_3); reg &= ~(F_HPD | HPD_CTRL); - writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_3, reg); } void analogix_dp_force_hpd(struct analogix_dp_device *dp) { u32 reg; - reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3); - reg = (F_HPD | HPD_CTRL); - writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3); + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_3); + reg |= (F_HPD | HPD_CTRL); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_3, reg); } enum dp_irq_type analogix_dp_get_irq_type(struct analogix_dp_device *dp) { u32 reg; - if (dp->hpd_gpiod) { - reg = gpiod_get_value(dp->hpd_gpiod); - if (reg) - return DP_IRQ_TYPE_HP_CABLE_IN; - else - return DP_IRQ_TYPE_HP_CABLE_OUT; - } else { - /* Parse hotplug interrupt status register */ - reg = readl(dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_4); + /* Parse hotplug interrupt status register */ + reg = analogix_dp_read(dp, ANALOGIX_DP_COMMON_INT_STA_4); - if (reg & PLUG) - return DP_IRQ_TYPE_HP_CABLE_IN; + if (reg & PLUG) + return DP_IRQ_TYPE_HP_CABLE_IN; - if (reg & HPD_LOST) - return DP_IRQ_TYPE_HP_CABLE_OUT; + if (reg & HPD_LOST) + return DP_IRQ_TYPE_HP_CABLE_OUT; - if (reg & HOTPLUG_CHG) - return DP_IRQ_TYPE_HP_CHANGE; + if (reg & HOTPLUG_CHG) + return DP_IRQ_TYPE_HP_CHANGE; - return DP_IRQ_TYPE_UNKNOWN; - } + return DP_IRQ_TYPE_UNKNOWN; } void analogix_dp_reset_aux(struct analogix_dp_device *dp) @@ -458,9 +459,9 @@ void analogix_dp_reset_aux(struct analogix_dp_device *dp) u32 reg; /* Disable AUX channel module */ - reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_2); + reg = analogix_dp_read(dp, ANALOGIX_DP_FUNC_EN_2); reg |= AUX_FUNC_EN_N; - writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2); + analogix_dp_write(dp, ANALOGIX_DP_FUNC_EN_2, reg); } void analogix_dp_init_aux(struct analogix_dp_device *dp) @@ -469,7 +470,7 @@ void analogix_dp_init_aux(struct analogix_dp_device *dp) /* Clear inerrupts related to AUX channel */ reg = RPLY_RECEIV | AUX_ERR; - writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA); + analogix_dp_write(dp, ANALOGIX_DP_INT_STA, reg); analogix_dp_set_analog_power_down(dp, AUX_BLOCK, true); usleep_range(10, 11); @@ -487,16 +488,17 @@ void analogix_dp_init_aux(struct analogix_dp_device *dp) reg |= AUX_HW_RETRY_COUNT_SEL(0) | AUX_HW_RETRY_INTERVAL_600_MICROSECONDS; - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_HW_RETRY_CTL); + analogix_dp_write(dp, ANALOGIX_DP_AUX_HW_RETRY_CTL, reg); /* Receive AUX Channel DEFER commands equal to DEFFER_COUNT*64 */ reg = DEFER_CTRL_EN | DEFER_COUNT(1); - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_DEFER_CTL); + analogix_dp_write(dp, ANALOGIX_DP_AUX_CH_DEFER_CTL, reg); /* Enable AUX channel module */ - reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_2); + analogix_dp_enable_sw_function(dp); + reg = analogix_dp_read(dp, ANALOGIX_DP_FUNC_EN_2); reg &= ~AUX_FUNC_EN_N; - writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2); + analogix_dp_write(dp, ANALOGIX_DP_FUNC_EN_2, reg); } int analogix_dp_get_plug_in_status(struct analogix_dp_device *dp) @@ -507,7 +509,7 @@ int analogix_dp_get_plug_in_status(struct analogix_dp_device *dp) if (gpiod_get_value(dp->hpd_gpiod)) return 0; } else { - reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3); + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_3); if (reg & HPD_STATUS) return 0; } @@ -519,145 +521,181 @@ void analogix_dp_enable_sw_function(struct analogix_dp_device *dp) { u32 reg; - reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_1); + reg = analogix_dp_read(dp, ANALOGIX_DP_FUNC_EN_1); reg &= ~SW_FUNC_EN_N; - writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1); + analogix_dp_write(dp, ANALOGIX_DP_FUNC_EN_1, reg); } -int analogix_dp_start_aux_transaction(struct analogix_dp_device *dp) +static void analogix_dp_ssc_enable(struct analogix_dp_device *dp) { - int reg; - int retval = 0; - int timeout_loop = 0; - - /* Enable AUX CH operation */ - reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2); - reg |= AUX_EN; - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2); - - /* Is AUX CH command reply received? */ - reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA); - while (!(reg & RPLY_RECEIV)) { - timeout_loop++; - if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) { - dev_err(dp->dev, "AUX CH command reply failed!\n"); - return -ETIMEDOUT; - } - reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA); - usleep_range(10, 11); - } - - /* Clear interrupt source for AUX CH command reply */ - writel(RPLY_RECEIV, dp->reg_base + ANALOGIX_DP_INT_STA); - - /* Clear interrupt source for AUX CH access error */ - reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA); - if (reg & AUX_ERR) { - writel(AUX_ERR, dp->reg_base + ANALOGIX_DP_INT_STA); - return -EREMOTEIO; - } - - /* Check AUX CH error access status */ - reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_STA); - if ((reg & AUX_STATUS_MASK) != 0) { - dev_err(dp->dev, "AUX CH error happens: %d\n\n", - reg & AUX_STATUS_MASK); - return -EREMOTEIO; - } + u32 reg; - return retval; + /* 4500ppm */ + writel(0x19, dp->reg_base + ANALOIGX_DP_SSC_REG); + /* + * To apply updated SSC parameters into SSC operation, + * firmware must disable and enable this bit. + */ + reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_2); + reg |= SSC_FUNC_EN_N; + writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2); + reg &= ~SSC_FUNC_EN_N; + writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2); } -int analogix_dp_write_byte_to_dpcd(struct analogix_dp_device *dp, - unsigned int reg_addr, - unsigned char data) +static void analogix_dp_ssc_disable(struct analogix_dp_device *dp) { u32 reg; - int i; - int retval; - - for (i = 0; i < 3; i++) { - /* Clear AUX CH data buffer */ - reg = BUF_CLR; - writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL); - - /* Select DPCD device address */ - reg = AUX_ADDR_7_0(reg_addr); - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0); - reg = AUX_ADDR_15_8(reg_addr); - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8); - reg = AUX_ADDR_19_16(reg_addr); - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16); - - /* Write data buffer */ - reg = (unsigned int)data; - writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0); - /* - * Set DisplayPort transaction and write 1 byte - * If bit 3 is 1, DisplayPort transaction. - * If Bit 3 is 0, I2C transaction. - */ - reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE; - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1); - - /* Start AUX transaction */ - retval = analogix_dp_start_aux_transaction(dp); - if (retval == 0) - break; - - dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__); - } + reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_2); + reg |= SSC_FUNC_EN_N; + writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2); +} - return retval; +bool analogix_dp_ssc_supported(struct analogix_dp_device *dp) +{ + /* Check if SSC is supported by both sides */ + return dp->plat_data->ssc && dp->link_train.ssc; } void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype) { - u32 reg; + u32 status; + int ret; - reg = bwtype; - if ((bwtype == DP_LINK_BW_2_7) || (bwtype == DP_LINK_BW_1_62)) - writel(reg, dp->reg_base + ANALOGIX_DP_LINK_BW_SET); + analogix_dp_write(dp, ANALOGIX_DP_LINK_BW_SET, bwtype); + + if (dp->phy) { + union phy_configure_opts phy_cfg = {0}; + + phy_cfg.dp.lanes = dp->link_train.lane_count; + phy_cfg.dp.link_rate = + drm_dp_bw_code_to_link_rate(dp->link_train.link_rate) / 100; + phy_cfg.dp.ssc = analogix_dp_ssc_supported(dp); + phy_cfg.dp.set_lanes = false; + phy_cfg.dp.set_rate = true; + phy_cfg.dp.set_voltages = false; + ret = phy_configure(dp->phy, &phy_cfg); + if (ret && ret != -EOPNOTSUPP) { + dev_err(dp->dev, "%s: phy_configure failed: %d\n", + __func__, ret); + return; + } + } else { + if (analogix_dp_ssc_supported(dp)) + analogix_dp_ssc_enable(dp); + else + analogix_dp_ssc_disable(dp); + } + + ret = readx_poll_timeout(analogix_dp_get_pll_lock_status, dp, status, + status != PLL_UNLOCKED, 120, + 120 * DP_TIMEOUT_LOOP_COUNT); + if (ret) { + dev_err(dp->dev, "Wait for pll lock failed %d\n", ret); + return; + } } void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype) { u32 reg; - reg = readl(dp->reg_base + ANALOGIX_DP_LINK_BW_SET); + reg = analogix_dp_read(dp, ANALOGIX_DP_LINK_BW_SET); *bwtype = reg; } void analogix_dp_set_lane_count(struct analogix_dp_device *dp, u32 count) { u32 reg; + int ret; reg = count; - writel(reg, dp->reg_base + ANALOGIX_DP_LANE_COUNT_SET); + analogix_dp_write(dp, ANALOGIX_DP_LANE_COUNT_SET, reg); + + if (dp->phy) { + union phy_configure_opts phy_cfg = {0}; + + phy_cfg.dp.lanes = dp->link_train.lane_count; + phy_cfg.dp.set_lanes = true; + phy_cfg.dp.set_rate = false; + phy_cfg.dp.set_voltages = false; + ret = phy_configure(dp->phy, &phy_cfg); + if (ret && ret != -EOPNOTSUPP) { + dev_err(dp->dev, "%s: phy_configure() failed: %d\n", + __func__, ret); + return; + } + } } void analogix_dp_get_lane_count(struct analogix_dp_device *dp, u32 *count) { u32 reg; - reg = readl(dp->reg_base + ANALOGIX_DP_LANE_COUNT_SET); + reg = analogix_dp_read(dp, ANALOGIX_DP_LANE_COUNT_SET); *count = reg; } +void analogix_dp_set_lane_link_training(struct analogix_dp_device *dp) +{ + u8 lane; + int ret; + + for (lane = 0; lane < dp->link_train.lane_count; lane++) + analogix_dp_write(dp, + ANALOGIX_DP_LN0_LINK_TRAINING_CTL + 4 * lane, + dp->link_train.training_lane[lane]); + + if (dp->phy) { + union phy_configure_opts phy_cfg = {0}; + + for (lane = 0; lane < dp->link_train.lane_count; lane++) { + u8 training_lane = dp->link_train.training_lane[lane]; + u8 vs, pe; + + vs = (training_lane & DP_TRAIN_VOLTAGE_SWING_MASK) >> + DP_TRAIN_VOLTAGE_SWING_SHIFT; + pe = (training_lane & DP_TRAIN_PRE_EMPHASIS_MASK) >> + DP_TRAIN_PRE_EMPHASIS_SHIFT; + phy_cfg.dp.voltage[lane] = vs; + phy_cfg.dp.pre[lane] = pe; + } + + phy_cfg.dp.lanes = dp->link_train.lane_count; + phy_cfg.dp.link_rate = + drm_dp_bw_code_to_link_rate(dp->link_train.link_rate) / 100; + phy_cfg.dp.set_lanes = false; + phy_cfg.dp.set_rate = false; + phy_cfg.dp.set_voltages = true; + ret = phy_configure(dp->phy, &phy_cfg); + if (ret && ret != -EOPNOTSUPP) { + dev_err(dp->dev, "%s: phy_configure() failed: %d\n", + __func__, ret); + return; + } + } +} + +u32 analogix_dp_get_lane_link_training(struct analogix_dp_device *dp, u8 lane) +{ + return analogix_dp_read(dp, + ANALOGIX_DP_LN0_LINK_TRAINING_CTL + 4 * lane); +} + void analogix_dp_enable_enhanced_mode(struct analogix_dp_device *dp, bool enable) { u32 reg; if (enable) { - reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4); + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_4); reg |= ENHANCED; - writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_4, reg); } else { - reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4); + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_4); reg &= ~ENHANCED; - writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_4, reg); } } @@ -669,144 +707,48 @@ void analogix_dp_set_training_pattern(struct analogix_dp_device *dp, switch (pattern) { case PRBS7: reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_PRBS7; - writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); + analogix_dp_write(dp, ANALOGIX_DP_TRAINING_PTN_SET, reg); break; case D10_2: reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_D10_2; - writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); + analogix_dp_write(dp, ANALOGIX_DP_TRAINING_PTN_SET, reg); break; case TRAINING_PTN1: reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN1; - writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); + analogix_dp_write(dp, ANALOGIX_DP_TRAINING_PTN_SET, reg); break; case TRAINING_PTN2: reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN2; - writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); + analogix_dp_write(dp, ANALOGIX_DP_TRAINING_PTN_SET, reg); + break; + case TRAINING_PTN3: + reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN3; + analogix_dp_write(dp, ANALOGIX_DP_TRAINING_PTN_SET, reg); break; case DP_NONE: reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_DISABLE | SW_TRAINING_PATTERN_SET_NORMAL; - writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); + analogix_dp_write(dp, ANALOGIX_DP_TRAINING_PTN_SET, reg); break; default: break; } } -void analogix_dp_set_lane0_pre_emphasis(struct analogix_dp_device *dp, - u32 level) -{ - u32 reg; - - reg = readl(dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL); - reg &= ~PRE_EMPHASIS_SET_MASK; - reg |= level << PRE_EMPHASIS_SET_SHIFT; - writel(reg, dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL); -} - -void analogix_dp_set_lane1_pre_emphasis(struct analogix_dp_device *dp, - u32 level) -{ - u32 reg; - - reg = readl(dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL); - reg &= ~PRE_EMPHASIS_SET_MASK; - reg |= level << PRE_EMPHASIS_SET_SHIFT; - writel(reg, dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL); -} - -void analogix_dp_set_lane2_pre_emphasis(struct analogix_dp_device *dp, - u32 level) -{ - u32 reg; - - reg = readl(dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL); - reg &= ~PRE_EMPHASIS_SET_MASK; - reg |= level << PRE_EMPHASIS_SET_SHIFT; - writel(reg, dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL); -} - -void analogix_dp_set_lane3_pre_emphasis(struct analogix_dp_device *dp, - u32 level) -{ - u32 reg; - - reg = readl(dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL); - reg &= ~PRE_EMPHASIS_SET_MASK; - reg |= level << PRE_EMPHASIS_SET_SHIFT; - writel(reg, dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL); -} - -void analogix_dp_set_lane0_link_training(struct analogix_dp_device *dp, - u32 training_lane) -{ - u32 reg; - - reg = training_lane; - writel(reg, dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL); -} - -void analogix_dp_set_lane1_link_training(struct analogix_dp_device *dp, - u32 training_lane) -{ - u32 reg; - - reg = training_lane; - writel(reg, dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL); -} - -void analogix_dp_set_lane2_link_training(struct analogix_dp_device *dp, - u32 training_lane) -{ - u32 reg; - - reg = training_lane; - writel(reg, dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL); -} - -void analogix_dp_set_lane3_link_training(struct analogix_dp_device *dp, - u32 training_lane) -{ - u32 reg; - - reg = training_lane; - writel(reg, dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL); -} - -u32 analogix_dp_get_lane0_link_training(struct analogix_dp_device *dp) -{ - return readl(dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL); -} - -u32 analogix_dp_get_lane1_link_training(struct analogix_dp_device *dp) -{ - return readl(dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL); -} - -u32 analogix_dp_get_lane2_link_training(struct analogix_dp_device *dp) -{ - return readl(dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL); -} - -u32 analogix_dp_get_lane3_link_training(struct analogix_dp_device *dp) -{ - return readl(dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL); -} - void analogix_dp_reset_macro(struct analogix_dp_device *dp) { u32 reg; - reg = readl(dp->reg_base + ANALOGIX_DP_PHY_TEST); + reg = analogix_dp_read(dp, ANALOGIX_DP_PHY_TEST); reg |= MACRO_RST; - writel(reg, dp->reg_base + ANALOGIX_DP_PHY_TEST); + analogix_dp_write(dp, ANALOGIX_DP_PHY_TEST, reg); /* 10 us is the minimum reset time. */ usleep_range(10, 20); reg &= ~MACRO_RST; - writel(reg, dp->reg_base + ANALOGIX_DP_PHY_TEST); + analogix_dp_write(dp, ANALOGIX_DP_PHY_TEST, reg); } void analogix_dp_init_video(struct analogix_dp_device *dp) @@ -814,19 +756,19 @@ void analogix_dp_init_video(struct analogix_dp_device *dp) u32 reg; reg = VSYNC_DET | VID_FORMAT_CHG | VID_CLK_CHG; - writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_1); + analogix_dp_write(dp, ANALOGIX_DP_COMMON_INT_STA_1, reg); reg = 0x0; - writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_1); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_1, reg); reg = CHA_CRI(4) | CHA_CTRL; - writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_2); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_2, reg); reg = 0x0; - writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_3, reg); reg = VID_HRES_TH(2) | VID_VRES_TH(0); - writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_8); + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_8, reg); } void analogix_dp_set_video_color_format(struct analogix_dp_device *dp) @@ -837,36 +779,36 @@ void analogix_dp_set_video_color_format(struct analogix_dp_device *dp) reg = (dp->video_info.dynamic_range << IN_D_RANGE_SHIFT) | (dp->video_info.color_depth << IN_BPC_SHIFT) | (dp->video_info.color_space << IN_COLOR_F_SHIFT); - writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_2); + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_2, reg); /* Set Input Color YCbCr Coefficients to ITU601 or ITU709 */ - reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3); + reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_3); reg &= ~IN_YC_COEFFI_MASK; if (dp->video_info.ycbcr_coeff) reg |= IN_YC_COEFFI_ITU709; else reg |= IN_YC_COEFFI_ITU601; - writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3); + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_3, reg); } int analogix_dp_is_slave_video_stream_clock_on(struct analogix_dp_device *dp) { u32 reg; - reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_1); - writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_1); + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_1); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_1, reg); - reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_1); + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_1); if (!(reg & DET_STA)) { dev_dbg(dp->dev, "Input stream clock not detected.\n"); return -EINVAL; } - reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_2); - writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_2); + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_2); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_2, reg); - reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_2); + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_2); dev_dbg(dp->dev, "wait SYS_CTL_2.\n"); if (reg & CHA_STA) { @@ -884,30 +826,30 @@ void analogix_dp_set_video_cr_mn(struct analogix_dp_device *dp, u32 reg; if (type == REGISTER_M) { - reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4); + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_4); reg |= FIX_M_VID; - writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_4, reg); reg = m_value & 0xff; - writel(reg, dp->reg_base + ANALOGIX_DP_M_VID_0); + analogix_dp_write(dp, ANALOGIX_DP_M_VID_0, reg); reg = (m_value >> 8) & 0xff; - writel(reg, dp->reg_base + ANALOGIX_DP_M_VID_1); + analogix_dp_write(dp, ANALOGIX_DP_M_VID_1, reg); reg = (m_value >> 16) & 0xff; - writel(reg, dp->reg_base + ANALOGIX_DP_M_VID_2); + analogix_dp_write(dp, ANALOGIX_DP_M_VID_2, reg); reg = n_value & 0xff; - writel(reg, dp->reg_base + ANALOGIX_DP_N_VID_0); + analogix_dp_write(dp, ANALOGIX_DP_N_VID_0, reg); reg = (n_value >> 8) & 0xff; - writel(reg, dp->reg_base + ANALOGIX_DP_N_VID_1); + analogix_dp_write(dp, ANALOGIX_DP_N_VID_1, reg); reg = (n_value >> 16) & 0xff; - writel(reg, dp->reg_base + ANALOGIX_DP_N_VID_2); + analogix_dp_write(dp, ANALOGIX_DP_N_VID_2, reg); } else { - reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4); + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_4); reg &= ~FIX_M_VID; - writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_4, reg); - writel(0x00, dp->reg_base + ANALOGIX_DP_N_VID_0); - writel(0x80, dp->reg_base + ANALOGIX_DP_N_VID_1); - writel(0x00, dp->reg_base + ANALOGIX_DP_N_VID_2); + analogix_dp_write(dp, ANALOGIX_DP_N_VID_0, 0x00); + analogix_dp_write(dp, ANALOGIX_DP_N_VID_1, 0x80); + analogix_dp_write(dp, ANALOGIX_DP_N_VID_2, 0x00); } } @@ -916,13 +858,13 @@ void analogix_dp_set_video_timing_mode(struct analogix_dp_device *dp, u32 type) u32 reg; if (type == VIDEO_TIMING_FROM_CAPTURE) { - reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_10); reg &= ~FORMAT_SEL; - writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_10, reg); } else { - reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_10); reg |= FORMAT_SEL; - writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_10, reg); } } @@ -931,15 +873,15 @@ void analogix_dp_enable_video_master(struct analogix_dp_device *dp, bool enable) u32 reg; if (enable) { - reg = readl(dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL); + reg = analogix_dp_read(dp, ANALOGIX_DP_SOC_GENERAL_CTL); reg &= ~VIDEO_MODE_MASK; reg |= VIDEO_MASTER_MODE_EN | VIDEO_MODE_MASTER_MODE; - writel(reg, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL); + analogix_dp_write(dp, ANALOGIX_DP_SOC_GENERAL_CTL, reg); } else { - reg = readl(dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL); + reg = analogix_dp_read(dp, ANALOGIX_DP_SOC_GENERAL_CTL); reg &= ~VIDEO_MODE_MASK; reg |= VIDEO_MODE_SLAVE_MODE; - writel(reg, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL); + analogix_dp_write(dp, ANALOGIX_DP_SOC_GENERAL_CTL, reg); } } @@ -947,19 +889,19 @@ void analogix_dp_start_video(struct analogix_dp_device *dp) { u32 reg; - reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); + reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_1); reg |= VIDEO_EN; - writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_1, reg); } int analogix_dp_is_video_stream_on(struct analogix_dp_device *dp) { u32 reg; - reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3); - writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3); + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_3); + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_3, reg); - reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3); + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_3); if (!(reg & STRM_VALID)) { dev_dbg(dp->dev, "Input video stream is not detected.\n"); return -EINVAL; @@ -972,55 +914,55 @@ void analogix_dp_config_video_slave_mode(struct analogix_dp_device *dp) { u32 reg; - reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_1); + reg = analogix_dp_read(dp, ANALOGIX_DP_FUNC_EN_1); if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) { reg &= ~(RK_VID_CAP_FUNC_EN_N | RK_VID_FIFO_FUNC_EN_N); } else { reg &= ~(MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N); reg |= MASTER_VID_FUNC_EN_N; } - writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1); + analogix_dp_write(dp, ANALOGIX_DP_FUNC_EN_1, reg); - reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_10); reg &= ~INTERACE_SCAN_CFG; reg |= (dp->video_info.interlaced << 2); - writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_10, reg); - reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_10); reg &= ~VSYNC_POLARITY_CFG; reg |= (dp->video_info.v_sync_polarity << 1); - writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_10, reg); - reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_10); reg &= ~HSYNC_POLARITY_CFG; reg |= (dp->video_info.h_sync_polarity << 0); - writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_10, reg); reg = AUDIO_MODE_SPDIF_MODE | VIDEO_MODE_SLAVE_MODE; - writel(reg, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL); + analogix_dp_write(dp, ANALOGIX_DP_SOC_GENERAL_CTL, reg); } void analogix_dp_enable_scrambling(struct analogix_dp_device *dp) { u32 reg; - reg = readl(dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); + reg = analogix_dp_read(dp, ANALOGIX_DP_TRAINING_PTN_SET); reg &= ~SCRAMBLING_DISABLE; - writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); + analogix_dp_write(dp, ANALOGIX_DP_TRAINING_PTN_SET, reg); } void analogix_dp_disable_scrambling(struct analogix_dp_device *dp) { u32 reg; - reg = readl(dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); + reg = analogix_dp_read(dp, ANALOGIX_DP_TRAINING_PTN_SET); reg |= SCRAMBLING_DISABLE; - writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); + analogix_dp_write(dp, ANALOGIX_DP_TRAINING_PTN_SET, reg); } void analogix_dp_enable_psr_crc(struct analogix_dp_device *dp) { - writel(PSR_VID_CRC_ENABLE, dp->reg_base + ANALOGIX_DP_CRC_CON); + analogix_dp_write(dp, ANALOGIX_DP_CRC_CON, PSR_VID_CRC_ENABLE); } static ssize_t analogix_dp_get_psr_status(struct analogix_dp_device *dp) @@ -1044,63 +986,53 @@ int analogix_dp_send_psr_spd(struct analogix_dp_device *dp, ssize_t psr_status; /* don't send info frame */ - val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); + val = analogix_dp_read(dp, ANALOGIX_DP_PKT_SEND_CTL); val &= ~IF_EN; - writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); + analogix_dp_write(dp, ANALOGIX_DP_PKT_SEND_CTL, val); /* configure single frame update mode */ - writel(PSR_FRAME_UP_TYPE_BURST | PSR_CRC_SEL_HARDWARE, - dp->reg_base + ANALOGIX_DP_PSR_FRAME_UPDATE_CTRL); + analogix_dp_write(dp, ANALOGIX_DP_PSR_FRAME_UPDATE_CTRL, + PSR_FRAME_UP_TYPE_BURST | PSR_CRC_SEL_HARDWARE); /* configure VSC HB0~HB3 */ - writel(vsc->sdp_header.HB0, dp->reg_base + ANALOGIX_DP_SPD_HB0); - writel(vsc->sdp_header.HB1, dp->reg_base + ANALOGIX_DP_SPD_HB1); - writel(vsc->sdp_header.HB2, dp->reg_base + ANALOGIX_DP_SPD_HB2); - writel(vsc->sdp_header.HB3, dp->reg_base + ANALOGIX_DP_SPD_HB3); + analogix_dp_write(dp, ANALOGIX_DP_SPD_HB0, vsc->sdp_header.HB0); + analogix_dp_write(dp, ANALOGIX_DP_SPD_HB1, vsc->sdp_header.HB1); + analogix_dp_write(dp, ANALOGIX_DP_SPD_HB2, vsc->sdp_header.HB2); + analogix_dp_write(dp, ANALOGIX_DP_SPD_HB3, vsc->sdp_header.HB3); /* configure reused VSC PB0~PB3, magic number from vendor */ - writel(0x00, dp->reg_base + ANALOGIX_DP_SPD_PB0); - writel(0x16, dp->reg_base + ANALOGIX_DP_SPD_PB1); - writel(0xCE, dp->reg_base + ANALOGIX_DP_SPD_PB2); - writel(0x5D, dp->reg_base + ANALOGIX_DP_SPD_PB3); + analogix_dp_write(dp, ANALOGIX_DP_SPD_PB0, 0x00); + analogix_dp_write(dp, ANALOGIX_DP_SPD_PB1, 0x16); + analogix_dp_write(dp, ANALOGIX_DP_SPD_PB2, 0xCE); + analogix_dp_write(dp, ANALOGIX_DP_SPD_PB3, 0x5D); /* configure DB0 / DB1 values */ - writel(vsc->db[0], dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB0); - writel(vsc->db[1], dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB1); + analogix_dp_write(dp, ANALOGIX_DP_VSC_SHADOW_DB0, vsc->db[0]); + analogix_dp_write(dp, ANALOGIX_DP_VSC_SHADOW_DB1, vsc->db[1]); /* set reuse spd inforframe */ - val = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3); + val = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_3); val |= REUSE_SPD_EN; - writel(val, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3); + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_3, val); /* mark info frame update */ - val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); + val = analogix_dp_read(dp, ANALOGIX_DP_PKT_SEND_CTL); val = (val | IF_UP) & ~IF_EN; - writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); + analogix_dp_write(dp, ANALOGIX_DP_PKT_SEND_CTL, val); /* send info frame */ - val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); + val = analogix_dp_read(dp, ANALOGIX_DP_PKT_SEND_CTL); val |= IF_EN; - writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); + analogix_dp_write(dp, ANALOGIX_DP_PKT_SEND_CTL, val); if (!blocking) return 0; - /* - * db[1]!=0: entering PSR, wait for fully active remote frame buffer. - * db[1]==0: exiting PSR, wait for either - * (a) ACTIVE_RESYNC - the sink "must display the - * incoming active frames from the Source device with no visible - * glitches and/or artifacts", even though timings may still be - * re-synchronizing; or - * (b) INACTIVE - the transition is fully complete. - */ ret = readx_poll_timeout(analogix_dp_get_psr_status, dp, psr_status, psr_status >= 0 && ((vsc->db[1] && psr_status == DP_PSR_SINK_ACTIVE_RFB) || - (!vsc->db[1] && (psr_status == DP_PSR_SINK_ACTIVE_RESYNC || - psr_status == DP_PSR_SINK_INACTIVE))), - 1500, DP_TIMEOUT_PSR_LOOP_MS * 1000); + (!vsc->db[1] && psr_status == DP_PSR_SINK_INACTIVE)), 1500, + DP_TIMEOUT_PSR_LOOP_MS * 1000); if (ret) { dev_warn(dp->dev, "Failed to apply PSR %d\n", ret); return ret; @@ -1108,11 +1040,43 @@ int analogix_dp_send_psr_spd(struct analogix_dp_device *dp, return 0; } +void analogix_dp_phy_power_on(struct analogix_dp_device *dp) +{ + if (dp->phy_enabled) + return; + + phy_set_mode(dp->phy, PHY_MODE_DP); + phy_power_on(dp->phy); + + dp->phy_enabled = true; +} + +void analogix_dp_phy_power_off(struct analogix_dp_device *dp) +{ + if (!dp->phy_enabled) + return; + + phy_power_off(dp->phy); + + dp->phy_enabled = false; +} + +enum { + AUX_STATUS_OK, + AUX_STATUS_NACK_ERROR, + AUX_STATUS_TIMEOUT_ERROR, + AUX_STATUS_UNKNOWN_ERROR, + AUX_STATUS_MUCH_DEFER_ERROR, + AUX_STATUS_TX_SHORT_ERROR, + AUX_STATUS_RX_SHORT_ERROR, + AUX_STATUS_NACK_WITHOUT_M_ERROR, + AUX_STATUS_I2C_NACK_ERROR +}; + ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, struct drm_dp_aux_msg *msg) { u32 reg; - u32 status_reg; u8 *buffer = msg->buffer; unsigned int i; int num_transferred = 0; @@ -1122,9 +1086,15 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, if (WARN_ON(msg->size > 16)) return -E2BIG; + reg = analogix_dp_read(dp, ANALOGIX_DP_FUNC_EN_2); + if (reg & AUX_FUNC_EN_N) { + analogix_dp_phy_power_on(dp); + analogix_dp_init_aux(dp); + } + /* Clear AUX CH data buffer */ reg = BUF_CLR; - writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL); + analogix_dp_write(dp, ANALOGIX_DP_BUFFER_DATA_CTL, reg); switch (msg->request & ~DP_AUX_I2C_MOT) { case DP_AUX_I2C_WRITE: @@ -1152,21 +1122,21 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, } reg |= AUX_LENGTH(msg->size); - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1); + analogix_dp_write(dp, ANALOGIX_DP_AUX_CH_CTL_1, reg); /* Select DPCD device address */ reg = AUX_ADDR_7_0(msg->address); - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0); + analogix_dp_write(dp, ANALOGIX_DP_AUX_ADDR_7_0, reg); reg = AUX_ADDR_15_8(msg->address); - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8); + analogix_dp_write(dp, ANALOGIX_DP_AUX_ADDR_15_8, reg); reg = AUX_ADDR_19_16(msg->address); - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16); + analogix_dp_write(dp, ANALOGIX_DP_AUX_ADDR_19_16, reg); if (!(msg->request & DP_AUX_I2C_READ)) { for (i = 0; i < msg->size; i++) { reg = buffer[i]; - writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0 + - 4 * i); + analogix_dp_write(dp, ANALOGIX_DP_BUF_DATA_0 + 4 * i, + reg); num_transferred++; } } @@ -1178,7 +1148,7 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, if (msg->size < 1) reg |= ADDR_ONLY; - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2); + analogix_dp_write(dp, ANALOGIX_DP_AUX_CH_CTL_2, reg); ret = readx_poll_timeout(readl, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2, reg, !(reg & AUX_EN), 25, 500 * 1000); @@ -1197,30 +1167,31 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, } /* Clear interrupt source for AUX CH command reply */ - writel(RPLY_RECEIV, dp->reg_base + ANALOGIX_DP_INT_STA); - - /* Clear interrupt source for AUX CH access error */ - reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA); - status_reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_STA); - if ((reg & AUX_ERR) || (status_reg & AUX_STATUS_MASK)) { - writel(AUX_ERR, dp->reg_base + ANALOGIX_DP_INT_STA); + analogix_dp_write(dp, ANALOGIX_DP_INT_STA, RPLY_RECEIV); - dev_warn(dp->dev, "AUX CH error happened: %#x (%d)\n", - status_reg & AUX_STATUS_MASK, !!(reg & AUX_ERR)); - goto aux_error; - } + reg = analogix_dp_read(dp, ANALOGIX_DP_AUX_CH_STA); + if ((reg & AUX_STATUS_MASK) == AUX_STATUS_TIMEOUT_ERROR) + return -ETIMEDOUT; if (msg->request & DP_AUX_I2C_READ) { + size_t buf_data_count; + + reg = analogix_dp_read(dp, ANALOGIX_DP_BUFFER_DATA_CTL); + buf_data_count = BUF_DATA_COUNT(reg); + + if (buf_data_count != msg->size) + return -EBUSY; + for (i = 0; i < msg->size; i++) { - reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0 + - 4 * i); + reg = analogix_dp_read(dp, ANALOGIX_DP_BUF_DATA_0 + + 4 * i); buffer[i] = (unsigned char)reg; num_transferred++; } } /* Check if Rx sends defer */ - reg = readl(dp->reg_base + ANALOGIX_DP_AUX_RX_COMM); + reg = analogix_dp_read(dp, ANALOGIX_DP_AUX_RX_COMM); if (reg == AUX_RX_COMM_AUX_DEFER) msg->reply = DP_AUX_NATIVE_REPLY_DEFER; else if (reg == AUX_RX_COMM_I2C_DEFER) @@ -1232,7 +1203,7 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, (msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_READ) msg->reply = DP_AUX_NATIVE_REPLY_ACK; - return num_transferred > 0 ? num_transferred : -EBUSY; + return (num_transferred == msg->size) ? num_transferred : -EBUSY; aux_error: /* if aux err happen, reset aux */ @@ -1240,3 +1211,119 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, return -EREMOTEIO; } + +void analogix_dp_set_video_format(struct analogix_dp_device *dp) +{ + struct video_info *video = &dp->video_info; + const struct drm_display_mode *mode = &video->mode; + unsigned int hsw, hfp, hbp, vsw, vfp, vbp; + + hsw = mode->hsync_end - mode->hsync_start; + hfp = mode->hsync_start - mode->hdisplay; + hbp = mode->htotal - mode->hsync_end; + vsw = mode->vsync_end - mode->vsync_start; + vfp = mode->vsync_start - mode->vdisplay; + vbp = mode->vtotal - mode->vsync_end; + + /* Set Video Format Parameters */ + analogix_dp_write(dp, ANALOGIX_DP_TOTAL_LINE_CFG_L, + TOTAL_LINE_CFG_L(mode->vtotal)); + analogix_dp_write(dp, ANALOGIX_DP_TOTAL_LINE_CFG_H, + TOTAL_LINE_CFG_H(mode->vtotal >> 8)); + analogix_dp_write(dp, ANALOGIX_DP_ACTIVE_LINE_CFG_L, + ACTIVE_LINE_CFG_L(mode->vdisplay)); + analogix_dp_write(dp, ANALOGIX_DP_ACTIVE_LINE_CFG_H, + ACTIVE_LINE_CFG_H(mode->vdisplay >> 8)); + analogix_dp_write(dp, ANALOGIX_DP_V_F_PORCH_CFG, + V_F_PORCH_CFG(vfp)); + analogix_dp_write(dp, ANALOGIX_DP_V_SYNC_WIDTH_CFG, + V_SYNC_WIDTH_CFG(vsw)); + analogix_dp_write(dp, ANALOGIX_DP_V_B_PORCH_CFG, + V_B_PORCH_CFG(vbp)); + analogix_dp_write(dp, ANALOGIX_DP_TOTAL_PIXEL_CFG_L, + TOTAL_PIXEL_CFG_L(mode->htotal)); + analogix_dp_write(dp, ANALOGIX_DP_TOTAL_PIXEL_CFG_H, + TOTAL_PIXEL_CFG_H(mode->htotal >> 8)); + analogix_dp_write(dp, ANALOGIX_DP_ACTIVE_PIXEL_CFG_L, + ACTIVE_PIXEL_CFG_L(mode->hdisplay)); + analogix_dp_write(dp, ANALOGIX_DP_ACTIVE_PIXEL_CFG_H, + ACTIVE_PIXEL_CFG_H(mode->hdisplay >> 8)); + analogix_dp_write(dp, ANALOGIX_DP_H_F_PORCH_CFG_L, + H_F_PORCH_CFG_L(hfp)); + analogix_dp_write(dp, ANALOGIX_DP_H_F_PORCH_CFG_H, + H_F_PORCH_CFG_H(hfp >> 8)); + analogix_dp_write(dp, ANALOGIX_DP_H_SYNC_CFG_L, + H_SYNC_CFG_L(hsw)); + analogix_dp_write(dp, ANALOGIX_DP_H_SYNC_CFG_H, + H_SYNC_CFG_H(hsw >> 8)); + analogix_dp_write(dp, ANALOGIX_DP_H_B_PORCH_CFG_L, + H_B_PORCH_CFG_L(hbp)); + analogix_dp_write(dp, ANALOGIX_DP_H_B_PORCH_CFG_H, + H_B_PORCH_CFG_H(hbp >> 8)); +} + +void analogix_dp_video_bist_enable(struct analogix_dp_device *dp) +{ + u32 reg; + + /* Enable Video BIST */ + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_4, BIST_EN); + + /* + * Note that if BIST_EN is set to 1, F_SEL must be cleared to 0 + * although video format information comes from registers set by user. + */ + reg = analogix_dp_read(dp, ANALOGIX_DP_VIDEO_CTL_10); + reg &= ~FORMAT_SEL; + analogix_dp_write(dp, ANALOGIX_DP_VIDEO_CTL_10, reg); +} + +void analogix_dp_audio_config_i2s(struct analogix_dp_device *dp) +{ + u32 reg; + + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_4); + reg &= ~FIX_M_AUD; + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_4, reg); + + reg = analogix_dp_read(dp, ANALOGIX_DP_I2S_CTRL); + reg |= I2S_EN; + analogix_dp_write(dp, ANALOGIX_DP_I2S_CTRL, reg); +} + +void analogix_dp_audio_config_spdif(struct analogix_dp_device *dp) +{ + u32 reg; + + reg = analogix_dp_read(dp, ANALOGIX_DP_SYS_CTL_4); + reg &= ~FIX_M_AUD; + analogix_dp_write(dp, ANALOGIX_DP_SYS_CTL_4, reg); + + reg = analogix_dp_read(dp, ANALOGIX_DP_SPDIF_AUDIO_CTL_0); + reg |= AUD_SPDIF_EN; + analogix_dp_write(dp, ANALOGIX_DP_SPDIF_AUDIO_CTL_0, reg); +} + +void analogix_dp_audio_enable(struct analogix_dp_device *dp) +{ + u32 reg; + + reg = analogix_dp_read(dp, ANALOGIX_DP_FUNC_EN_1); + reg &= ~(AUD_FIFO_FUNC_EN_N | AUD_FUNC_EN_N); + analogix_dp_write(dp, ANALOGIX_DP_FUNC_EN_1, reg); + + reg = analogix_dp_read(dp, ANALOGIX_DP_AUD_CTL); + reg |= MISC_CTRL_RESET | DP_AUDIO_EN; + analogix_dp_write(dp, ANALOGIX_DP_AUD_CTL, reg); +} + +void analogix_dp_audio_disable(struct analogix_dp_device *dp) +{ + u32 reg; + + analogix_dp_write(dp, ANALOGIX_DP_AUD_CTL, 0); + + reg = analogix_dp_read(dp, ANALOGIX_DP_FUNC_EN_1); + reg |= AUD_FIFO_FUNC_EN_N | AUD_FUNC_EN_N; + analogix_dp_write(dp, ANALOGIX_DP_FUNC_EN_1, reg); +} diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h index e284ee8da..df88f1ad0 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h @@ -15,9 +15,27 @@ #define ANALOGIX_DP_VIDEO_CTL_1 0x20 #define ANALOGIX_DP_VIDEO_CTL_2 0x24 #define ANALOGIX_DP_VIDEO_CTL_3 0x28 +#define ANALOGIX_DP_VIDEO_CTL_4 0x2C #define ANALOGIX_DP_VIDEO_CTL_8 0x3C #define ANALOGIX_DP_VIDEO_CTL_10 0x44 +#define ANALOGIX_DP_TOTAL_LINE_CFG_L 0x48 +#define ANALOGIX_DP_TOTAL_LINE_CFG_H 0x4C +#define ANALOGIX_DP_ACTIVE_LINE_CFG_L 0x50 +#define ANALOGIX_DP_ACTIVE_LINE_CFG_H 0x54 +#define ANALOGIX_DP_V_F_PORCH_CFG 0x58 +#define ANALOGIX_DP_V_SYNC_WIDTH_CFG 0x5C +#define ANALOGIX_DP_V_B_PORCH_CFG 0x60 +#define ANALOGIX_DP_TOTAL_PIXEL_CFG_L 0x64 +#define ANALOGIX_DP_TOTAL_PIXEL_CFG_H 0x68 +#define ANALOGIX_DP_ACTIVE_PIXEL_CFG_L 0x6C +#define ANALOGIX_DP_ACTIVE_PIXEL_CFG_H 0x70 +#define ANALOGIX_DP_H_F_PORCH_CFG_L 0x74 +#define ANALOGIX_DP_H_F_PORCH_CFG_H 0x78 +#define ANALOGIX_DP_H_SYNC_CFG_L 0x7C +#define ANALOGIX_DP_H_SYNC_CFG_H 0x80 +#define ANALOGIX_DP_H_B_PORCH_CFG_L 0x84 +#define ANALOGIX_DP_H_B_PORCH_CFG_H 0x88 #define ANALOGIX_DP_SPDIF_AUDIO_CTL_0 0xD8 @@ -27,6 +45,8 @@ #define ANALOGIX_DP_PLL_REG_4 0x9ec #define ANALOGIX_DP_PLL_REG_5 0xa00 +#define ANALOIGX_DP_SSC_REG 0x104 +#define ANALOGIX_DP_BIAS 0x124 #define ANALOGIX_DP_PD 0x12c #define ANALOGIX_DP_IF_TYPE 0x244 @@ -70,7 +90,7 @@ #define ANALOGIX_DP_SYS_CTL_2 0x604 #define ANALOGIX_DP_SYS_CTL_3 0x608 #define ANALOGIX_DP_SYS_CTL_4 0x60C - +#define ANALOGIX_DP_AUD_CTL 0x618 #define ANALOGIX_DP_PKT_SEND_CTL 0x640 #define ANALOGIX_DP_HDCP_CTL 0x648 @@ -116,8 +136,9 @@ #define ANALOGIX_DP_BUF_DATA_0 0x7C0 #define ANALOGIX_DP_SOC_GENERAL_CTL 0x800 - +#define ANALOGIX_DP_AUD_CHANNEL_CTL 0x834 #define ANALOGIX_DP_CRC_CON 0x890 +#define ANALOGIX_DP_I2S_CTRL 0x9C8 /* ANALOGIX_DP_TX_SW_RESET */ #define RESET_DP_TX (0x1 << 0) @@ -171,6 +192,11 @@ #define VID_CHK_UPDATE_TYPE_0 (0x0 << 4) #define REUSE_SPD_EN (0x1 << 3) +/* ANALOGIX_DP_VIDEO_CTL_4 */ +#define BIST_EN (0x1 << 3) +#define BIST_WIDTH(x) (((x) & 0x1) << 2) +#define BIST_TYPE(x) (((x) & 0x3) << 0) + /* ANALOGIX_DP_VIDEO_CTL_8 */ #define VID_HRES_TH(x) (((x) & 0xf) << 4) #define VID_VRES_TH(x) (((x) & 0xf) << 0) @@ -181,6 +207,60 @@ #define VSYNC_POLARITY_CFG (0x1 << 1) #define HSYNC_POLARITY_CFG (0x1 << 0) +/* ANALOGIX_DP_TOTAL_LINE_CFG_L */ +#define TOTAL_LINE_CFG_L(x) (((x) & 0xff) << 0) + +/* ANALOGIX_DP_TOTAL_LINE_CFG_H */ +#define TOTAL_LINE_CFG_H(x) (((x) & 0xf) << 0) + +/* ANALOGIX_DP_ACTIVE_LINE_CFG_L */ +#define ACTIVE_LINE_CFG_L(x) (((x) & 0xff) << 0) + +/* ANALOGIX_DP_ACTIVE_LINE_CFG_H */ +#define ACTIVE_LINE_CFG_H(x) (((x) & 0xf) << 0) + +/* ANALOGIX_DP_V_F_PORCH_CFG */ +#define V_F_PORCH_CFG(x) (((x) & 0xff) << 0) + +/* ANALOGIX_DP_V_SYNC_WIDTH_CFG */ +#define V_SYNC_WIDTH_CFG(x) (((x) & 0xff) << 0) + +/* ANALOGIX_DP_V_B_PORCH_CFG */ +#define V_B_PORCH_CFG(x) (((x) & 0xff) << 0) + +/* ANALOGIX_DP_TOTAL_PIXEL_CFG_L */ +#define TOTAL_PIXEL_CFG_L(x) (((x) & 0xff) << 0) + +/* ANALOGIX_DP_TOTAL_PIXEL_CFG_H */ +#define TOTAL_PIXEL_CFG_H(x) (((x) & 0x3f) << 0) + +/* ANALOGIX_DP_ACTIVE_PIXEL_CFG_L */ +#define ACTIVE_PIXEL_CFG_L(x) (((x) & 0xff) << 0) + +/* ANALOGIX_DP_ACTIVE_PIXEL_CFG_H */ +#define ACTIVE_PIXEL_CFG_H(x) (((x) & 0x3f) << 0) + +/* ANALOGIX_DP_H_F_PORCH_CFG_L */ +#define H_F_PORCH_CFG_L(x) (((x) & 0xff) << 0) + +/* ANALOGIX_DP_H_F_PORCH_CFG_H */ +#define H_F_PORCH_CFG_H(x) (((x) & 0xf) << 0) + +/* ANALOGIX_DP_H_SYNC_CFG_L */ +#define H_SYNC_CFG_L(x) (((x) & 0xff) << 0) + +/* ANALOGIX_DP_H_SYNC_CFG_H */ +#define H_SYNC_CFG_H(x) (((x) & 0xf) << 0) + +/* ANALOGIX_DP_H_B_PORCH_CFG_L */ +#define H_B_PORCH_CFG_L(x) (((x) & 0xff) << 0) + +/* ANALOGIX_DP_H_B_PORCH_CFG_H */ +#define H_B_PORCH_CFG_H(x) (((x) & 0xf) << 0) + +/* ANALOGIX_DP_SPDIF_AUDIO_CTL_0 */ +#define AUD_SPDIF_EN (0x1 << 7) + /* ANALOGIX_DP_PLL_REG_1 */ #define REF_CLK_24M (0x1 << 0) #define REF_CLK_27M (0x0 << 0) @@ -309,6 +389,10 @@ #define FIX_M_VID (0x1 << 2) #define M_VID_UPDATE_CTRL (0x3 << 0) +/* ANALOGIX_DP_AUD_CTL */ +#define MISC_CTRL_RESET (0x1 << 4) +#define DP_AUDIO_EN (0x1 << 0) + /* ANALOGIX_DP_TRAINING_PTN_SET */ #define SCRAMBLER_TYPE (0x1 << 9) #define HW_LINK_TRAINING_PATTERN (0x1 << 8) @@ -319,6 +403,7 @@ #define LINK_QUAL_PATTERN_SET_D10_2 (0x1 << 2) #define LINK_QUAL_PATTERN_SET_DISABLE (0x0 << 2) #define SW_TRAINING_PATTERN_SET_MASK (0x3 << 0) +#define SW_TRAINING_PATTERN_SET_PTN3 (0x3 << 0) #define SW_TRAINING_PATTERN_SET_PTN2 (0x2 << 0) #define SW_TRAINING_PATTERN_SET_PTN1 (0x1 << 0) #define SW_TRAINING_PATTERN_SET_NORMAL (0x0 << 0) @@ -406,6 +491,11 @@ #define VIDEO_MODE_SLAVE_MODE (0x1 << 0) #define VIDEO_MODE_MASTER_MODE (0x0 << 0) +/* ANALOGIX_DP_AUD_CHANNEL_CTL */ +#define AUD_CHANNEL_COUNT_6 (0x5 << 0) +#define AUD_CHANNEL_COUNT_4 (0x3 << 0) +#define AUD_CHANNEL_COUNT_2 (0x1 << 0) + /* ANALOGIX_DP_PKT_SEND_CTL */ #define IF_UP (0x1 << 4) #define IF_EN (0x1 << 0) @@ -414,4 +504,7 @@ #define PSR_VID_CRC_FLUSH (0x1 << 2) #define PSR_VID_CRC_ENABLE (0x1 << 0) +/* ANALOGIX_DP_I2S_CTRL */ +#define I2S_EN (0x1 << 4) + #endif /* _ANALOGIX_DP_REG_H */ diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h index f72d27208..20c818225 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h @@ -10,10 +10,12 @@ struct dw_hdmi_audio_data { int irq; struct dw_hdmi *hdmi; u8 *(*get_eld)(struct dw_hdmi *hdmi); + u8 *eld; }; struct dw_hdmi_i2s_audio_data { struct dw_hdmi *hdmi; + u8 *eld; void (*write)(struct dw_hdmi *hdmi, u8 val, int offset); u8 (*read)(struct dw_hdmi *hdmi, int offset); diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c index 70ab4fbdc..48fc36d56 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c @@ -12,6 +12,7 @@ #include #include +#include #include #include @@ -262,6 +263,8 @@ static int dw_hdmi_cec_probe(struct platform_device *pdev) if (IS_ERR(cec->adap)) return PTR_ERR(cec->adap); + dw_hdmi_set_cec_adap(cec->hdmi, cec->adap); + /* override the module pointer */ cec->adap->owner = THIS_MODULE; diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 2c3c743df..268ecdf3c 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -9,6 +9,8 @@ #include #include #include +#include +#include #include #include #include @@ -18,6 +20,7 @@ #include #include #include +#include #include @@ -36,6 +39,7 @@ #include "dw-hdmi-audio.h" #include "dw-hdmi-cec.h" +#include "dw-hdmi-hdcp.h" #include "dw-hdmi.h" #define DDC_CI_ADDR 0x37 @@ -48,6 +52,11 @@ #define HDMI14_MAX_TMDSCLK 340000000 +static const unsigned int dw_hdmi_cable[] = { + EXTCON_DISP_HDMI, + EXTCON_NONE, +}; + enum hdmi_datamap { RGB444_8B = 0x01, RGB444_10B = 0x03, @@ -62,6 +71,61 @@ enum hdmi_datamap { YCbCr422_12B = 0x12, }; +/* + * Unless otherwise noted, entries in this table are 100% optimization. + * Values can be obtained from hdmi_compute_n() but that function is + * slow so we pre-compute values we expect to see. + * + * All 32k and 48k values are expected to be the same (due to the way + * the math works) for any rate that's an exact kHz. + */ +static const struct dw_hdmi_audio_tmds_n common_tmds_n_table[] = { + { .tmds = 25175000, .n_32k = 4096, .n_44k1 = 12854, .n_48k = 6144, }, + { .tmds = 25200000, .n_32k = 4096, .n_44k1 = 5656, .n_48k = 6144, }, + { .tmds = 27000000, .n_32k = 4096, .n_44k1 = 5488, .n_48k = 6144, }, + { .tmds = 28320000, .n_32k = 4096, .n_44k1 = 5586, .n_48k = 6144, }, + { .tmds = 30240000, .n_32k = 4096, .n_44k1 = 5642, .n_48k = 6144, }, + { .tmds = 31500000, .n_32k = 4096, .n_44k1 = 5600, .n_48k = 6144, }, + { .tmds = 32000000, .n_32k = 4096, .n_44k1 = 5733, .n_48k = 6144, }, + { .tmds = 33750000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, }, + { .tmds = 36000000, .n_32k = 4096, .n_44k1 = 5684, .n_48k = 6144, }, + { .tmds = 40000000, .n_32k = 4096, .n_44k1 = 5733, .n_48k = 6144, }, + { .tmds = 49500000, .n_32k = 4096, .n_44k1 = 5488, .n_48k = 6144, }, + { .tmds = 50000000, .n_32k = 4096, .n_44k1 = 5292, .n_48k = 6144, }, + { .tmds = 54000000, .n_32k = 4096, .n_44k1 = 5684, .n_48k = 6144, }, + { .tmds = 65000000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, }, + { .tmds = 68250000, .n_32k = 4096, .n_44k1 = 5376, .n_48k = 6144, }, + { .tmds = 71000000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, }, + { .tmds = 72000000, .n_32k = 4096, .n_44k1 = 5635, .n_48k = 6144, }, + { .tmds = 73250000, .n_32k = 4096, .n_44k1 = 14112, .n_48k = 6144, }, + { .tmds = 74250000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, }, + { .tmds = 75000000, .n_32k = 4096, .n_44k1 = 5880, .n_48k = 6144, }, + { .tmds = 78750000, .n_32k = 4096, .n_44k1 = 5600, .n_48k = 6144, }, + { .tmds = 78800000, .n_32k = 4096, .n_44k1 = 5292, .n_48k = 6144, }, + { .tmds = 79500000, .n_32k = 4096, .n_44k1 = 4704, .n_48k = 6144, }, + { .tmds = 83500000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, }, + { .tmds = 85500000, .n_32k = 4096, .n_44k1 = 5488, .n_48k = 6144, }, + { .tmds = 88750000, .n_32k = 4096, .n_44k1 = 14112, .n_48k = 6144, }, + { .tmds = 97750000, .n_32k = 4096, .n_44k1 = 14112, .n_48k = 6144, }, + { .tmds = 101000000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, }, + { .tmds = 106500000, .n_32k = 4096, .n_44k1 = 4704, .n_48k = 6144, }, + { .tmds = 108000000, .n_32k = 4096, .n_44k1 = 5684, .n_48k = 6144, }, + { .tmds = 115500000, .n_32k = 4096, .n_44k1 = 5712, .n_48k = 6144, }, + { .tmds = 119000000, .n_32k = 4096, .n_44k1 = 5544, .n_48k = 6144, }, + { .tmds = 135000000, .n_32k = 4096, .n_44k1 = 5488, .n_48k = 6144, }, + { .tmds = 146250000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, }, + { .tmds = 148500000, .n_32k = 4096, .n_44k1 = 5488, .n_48k = 6144, }, + { .tmds = 154000000, .n_32k = 4096, .n_44k1 = 5544, .n_48k = 6144, }, + { .tmds = 162000000, .n_32k = 4096, .n_44k1 = 5684, .n_48k = 6144, }, + + /* For 297 MHz+ HDMI spec have some other rule for setting N */ + { .tmds = 297000000, .n_32k = 3073, .n_44k1 = 4704, .n_48k = 5120, }, + { .tmds = 594000000, .n_32k = 3073, .n_44k1 = 9408, .n_48k = 10240, }, + + /* End of table */ + { .tmds = 0, .n_32k = 0, .n_44k1 = 0, .n_48k = 0, }, +}; + static const u16 csc_coeff_default[3][4] = { { 0x2000, 0x0000, 0x0000, 0x0000 }, { 0x0000, 0x2000, 0x0000, 0x0000 }, @@ -98,12 +162,47 @@ static const u16 csc_coeff_rgb_full_to_rgb_limited[3][4] = { { 0x0000, 0x0000, 0x1b7c, 0x0020 } }; +static const struct drm_display_mode dw_hdmi_default_modes[] = { + /* 4 - 1280x720@60Hz 16:9 */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390, + 1430, 1650, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 16 - 1920x1080@60Hz 16:9 */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, + 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 31 - 1920x1080@50Hz 16:9 */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, + 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 19 - 1280x720@50Hz 16:9 */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720, + 1760, 1980, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 17 - 720x576@50Hz 4:3 */ + { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, + 796, 864, 0, 576, 581, 586, 625, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, + /* 2 - 720x480@60Hz 4:3 */ + { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736, + 798, 858, 0, 480, 489, 495, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, +}; + struct hdmi_vmode { bool mdataenablepolarity; + unsigned int previous_pixelclock; unsigned int mpixelclock; unsigned int mpixelrepetitioninput; unsigned int mpixelrepetitionoutput; + unsigned int previous_tmdsclock; unsigned int mtmdsclock; }; @@ -112,8 +211,8 @@ struct hdmi_data_info { unsigned int enc_out_bus_format; unsigned int enc_in_encoding; unsigned int enc_out_encoding; + unsigned int quant_range; unsigned int pix_repet_factor; - unsigned int hdcp_enable; struct hdmi_vmode video_mode; bool rgb_limited_range; }; @@ -128,6 +227,9 @@ struct dw_hdmi_i2c { u8 slave_reg; bool is_regaddr; bool is_segment; + + unsigned int scl_high_ns; + unsigned int scl_low_ns; }; struct dw_hdmi_phy_data { @@ -143,6 +245,8 @@ struct dw_hdmi_phy_data { struct dw_hdmi { struct drm_connector connector; struct drm_bridge bridge; + struct drm_bridge *next_bridge; + struct platform_device *hdcp_dev; unsigned int version; @@ -156,8 +260,10 @@ struct dw_hdmi { struct hdmi_data_info hdmi_data; const struct dw_hdmi_plat_data *plat_data; + struct dw_hdcp *hdcp; int vic; + int irq; u8 edid[HDMI_EDID_LEN]; @@ -174,6 +280,13 @@ struct dw_hdmi { void __iomem *regs; bool sink_is_hdmi; bool sink_has_audio; + bool hpd_state; + bool support_hdmi; + bool force_logo; + int force_output; + + struct delayed_work work; + struct workqueue_struct *workqueue; struct pinctrl *pinctrl; struct pinctrl_state *default_state; @@ -190,10 +303,14 @@ struct dw_hdmi { spinlock_t audio_lock; struct mutex audio_mutex; + struct dentry *debugfs_dir; unsigned int sample_rate; unsigned int audio_cts; unsigned int audio_n; bool audio_enable; + bool scramble_low_rates; + + struct extcon_dev *extcon; unsigned int reg_shift; struct regmap *regm; @@ -202,10 +319,12 @@ struct dw_hdmi { struct mutex cec_notifier_mutex; struct cec_notifier *cec_notifier; + struct cec_adapter *cec_adap; hdmi_codec_plugged_cb plugged_cb; struct device *codec_dev; enum drm_connector_status last_connector_result; + bool initialized; /* hdmi is enabled before bind */ }; #define HDMI_IH_PHY_STAT0_RX_SENSE \ @@ -263,6 +382,124 @@ static void hdmi_mask_writeb(struct dw_hdmi *hdmi, u8 data, unsigned int reg, hdmi_modb(hdmi, data << shift, mask, reg); } +static bool dw_hdmi_check_output_type_changed(struct dw_hdmi *hdmi) +{ + bool sink_hdmi; + + sink_hdmi = hdmi->sink_is_hdmi; + + if (hdmi->force_output == 1) + hdmi->sink_is_hdmi = true; + else if (hdmi->force_output == 2) + hdmi->sink_is_hdmi = false; + else + hdmi->sink_is_hdmi = hdmi->support_hdmi; + + if (sink_hdmi != hdmi->sink_is_hdmi) + return true; + + return false; +} + +static void repo_hpd_event(struct work_struct *p_work) +{ + struct dw_hdmi *hdmi = container_of(p_work, struct dw_hdmi, work.work); + enum drm_connector_status status = hdmi->hpd_state ? + connector_status_connected : connector_status_disconnected; + u8 phy_stat = hdmi_readb(hdmi, HDMI_PHY_STAT0); + + mutex_lock(&hdmi->mutex); + if (!(phy_stat & HDMI_PHY_RX_SENSE)) + hdmi->rxsense = false; + if (phy_stat & HDMI_PHY_HPD) + hdmi->rxsense = true; + mutex_unlock(&hdmi->mutex); + + if (hdmi->bridge.dev) { + bool change; + + change = drm_helper_hpd_irq_event(hdmi->bridge.dev); + if (change && hdmi->cec_adap && + hdmi->cec_adap->devnode.registered) + cec_queue_pin_hpd_event(hdmi->cec_adap, + hdmi->hpd_state, + ktime_get()); + drm_bridge_hpd_notify(&hdmi->bridge, status); + } +} + +static bool check_hdmi_irq(struct dw_hdmi *hdmi, int intr_stat, + int phy_int_pol) +{ + int msecs; + + /* To determine whether interrupt type is HPD */ + if (!(intr_stat & HDMI_IH_PHY_STAT0_HPD)) + return false; + + if (phy_int_pol & HDMI_PHY_HPD) { + dev_dbg(hdmi->dev, "dw hdmi plug in\n"); + msecs = 150; + hdmi->hpd_state = true; + } else { + dev_dbg(hdmi->dev, "dw hdmi plug out\n"); + msecs = 20; + hdmi->hpd_state = false; + } + mod_delayed_work(hdmi->workqueue, &hdmi->work, msecs_to_jiffies(msecs)); + + return true; +} + +static void init_hpd_work(struct dw_hdmi *hdmi) +{ + hdmi->workqueue = create_workqueue("hpd_queue"); + INIT_DELAYED_WORK(&hdmi->work, repo_hpd_event); +} + +static void dw_hdmi_i2c_set_divs(struct dw_hdmi *hdmi) +{ + unsigned long clk_rate_khz; + unsigned long low_ns, high_ns; + unsigned long div_low, div_high; + + /* Standard-mode */ + if (hdmi->i2c->scl_high_ns < 4000) + high_ns = 4708; + else + high_ns = hdmi->i2c->scl_high_ns; + + if (hdmi->i2c->scl_low_ns < 4700) + low_ns = 4916; + else + low_ns = hdmi->i2c->scl_low_ns; + + /* Adjust to avoid overflow */ + clk_rate_khz = DIV_ROUND_UP(clk_get_rate(hdmi->isfr_clk), 1000); + + div_low = (clk_rate_khz * low_ns) / 1000000; + if ((clk_rate_khz * low_ns) % 1000000) + div_low++; + + div_high = (clk_rate_khz * high_ns) / 1000000; + if ((clk_rate_khz * high_ns) % 1000000) + div_high++; + + /* Maximum divider supported by hw is 0xffff */ + if (div_low > 0xffff) + div_low = 0xffff; + + if (div_high > 0xffff) + div_high = 0xffff; + + hdmi_writeb(hdmi, div_high & 0xff, HDMI_I2CM_SS_SCL_HCNT_0_ADDR); + hdmi_writeb(hdmi, (div_high >> 8) & 0xff, + HDMI_I2CM_SS_SCL_HCNT_1_ADDR); + hdmi_writeb(hdmi, div_low & 0xff, HDMI_I2CM_SS_SCL_LCNT_0_ADDR); + hdmi_writeb(hdmi, (div_low >> 8) & 0xff, + HDMI_I2CM_SS_SCL_LCNT_1_ADDR); +} + static void dw_hdmi_i2c_init(struct dw_hdmi *hdmi) { hdmi_writeb(hdmi, HDMI_PHY_I2CM_INT_ADDR_DONE_POL, @@ -276,7 +513,8 @@ static void dw_hdmi_i2c_init(struct dw_hdmi *hdmi) hdmi_writeb(hdmi, 0x00, HDMI_I2CM_SOFTRSTZ); /* Set Standard Mode speed (determined to be 100KHz on iMX6) */ - hdmi_writeb(hdmi, 0x00, HDMI_I2CM_DIV); + hdmi_modb(hdmi, HDMI_I2CM_DIV_STD_MODE, + HDMI_I2CM_DIV_FAST_STD_MODE, HDMI_I2CM_DIV); /* Set done, not acknowledged and arbitration interrupt polarities */ hdmi_writeb(hdmi, HDMI_I2CM_INT_DONE_POL, HDMI_I2CM_INT); @@ -290,6 +528,11 @@ static void dw_hdmi_i2c_init(struct dw_hdmi *hdmi) /* Mute DONE and ERROR interrupts */ hdmi_writeb(hdmi, HDMI_IH_I2CM_STAT0_ERROR | HDMI_IH_I2CM_STAT0_DONE, HDMI_IH_MUTE_I2CM_STAT0); + + /* set SDA high level holding time */ + hdmi_writeb(hdmi, 0x48, HDMI_I2CM_SDA_HOLD); + + dw_hdmi_i2c_set_divs(hdmi); } static bool dw_hdmi_i2c_unwedge(struct dw_hdmi *hdmi) @@ -461,6 +704,8 @@ static int dw_hdmi_i2c_xfer(struct i2c_adapter *adap, hdmi_writeb(hdmi, 0x00, HDMI_IH_MUTE_I2CM_STAT0); /* Set slave device address taken from the first I2C message */ + if (addr == DDC_SEGMENT_ADDR && msgs[0].len == 1) + addr = DDC_ADDR; hdmi_writeb(hdmi, addr, HDMI_I2CM_SLAVE); /* Set slave device register address on transfer */ @@ -570,60 +815,117 @@ static void hdmi_set_cts_n(struct dw_hdmi *hdmi, unsigned int cts, hdmi_writeb(hdmi, n & 0xff, HDMI_AUD_N1); } -static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk) +static int hdmi_match_tmds_n_table(struct dw_hdmi *hdmi, + unsigned long pixel_clk, + unsigned long freq) { - unsigned int n = (128 * freq) / 1000; - unsigned int mult = 1; + const struct dw_hdmi_plat_data *plat_data = hdmi->plat_data; + const struct dw_hdmi_audio_tmds_n *tmds_n = NULL; + int i; + + if (plat_data->tmds_n_table) { + for (i = 0; plat_data->tmds_n_table[i].tmds != 0; i++) { + if (pixel_clk == plat_data->tmds_n_table[i].tmds) { + tmds_n = &plat_data->tmds_n_table[i]; + break; + } + } + } - while (freq > 48000) { - mult *= 2; - freq /= 2; + if (tmds_n == NULL) { + for (i = 0; common_tmds_n_table[i].tmds != 0; i++) { + if (pixel_clk == common_tmds_n_table[i].tmds) { + tmds_n = &common_tmds_n_table[i]; + break; + } + } } + if (tmds_n == NULL) + return -ENOENT; + switch (freq) { case 32000: - if (pixel_clk == 25175000) - n = 4576; - else if (pixel_clk == 27027000) - n = 4096; - else if (pixel_clk == 74176000 || pixel_clk == 148352000) - n = 11648; - else - n = 4096; - n *= mult; - break; - + return tmds_n->n_32k; case 44100: - if (pixel_clk == 25175000) - n = 7007; - else if (pixel_clk == 74176000) - n = 17836; - else if (pixel_clk == 148352000) - n = 8918; - else - n = 6272; - n *= mult; - break; - + case 88200: + case 176400: + return (freq / 44100) * tmds_n->n_44k1; case 48000: - if (pixel_clk == 25175000) - n = 6864; - else if (pixel_clk == 27027000) - n = 6144; - else if (pixel_clk == 74176000) - n = 11648; - else if (pixel_clk == 148352000) - n = 5824; - else - n = 6144; - n *= mult; - break; - + case 96000: + case 192000: + return (freq / 48000) * tmds_n->n_48k; default: - break; + return -ENOENT; + } +} + +static u64 hdmi_audio_math_diff(unsigned int freq, unsigned int n, + unsigned int pixel_clk) +{ + u64 final, diff; + u64 cts; + + final = (u64)pixel_clk * n; + + cts = final; + do_div(cts, 128 * freq); + + diff = final - (u64)cts * (128 * freq); + + return diff; +} + +static unsigned int hdmi_compute_n(struct dw_hdmi *hdmi, + unsigned long pixel_clk, + unsigned long freq) +{ + unsigned int min_n = DIV_ROUND_UP((128 * freq), 1500); + unsigned int max_n = (128 * freq) / 300; + unsigned int ideal_n = (128 * freq) / 1000; + unsigned int best_n_distance = ideal_n; + unsigned int best_n = 0; + u64 best_diff = U64_MAX; + int n; + + /* If the ideal N could satisfy the audio math, then just take it */ + if (hdmi_audio_math_diff(freq, ideal_n, pixel_clk) == 0) + return ideal_n; + + for (n = min_n; n <= max_n; n++) { + u64 diff = hdmi_audio_math_diff(freq, n, pixel_clk); + + if (diff < best_diff || (diff == best_diff && + abs(n - ideal_n) < best_n_distance)) { + best_n = n; + best_diff = diff; + best_n_distance = abs(best_n - ideal_n); + } + + /* + * The best N already satisfy the audio math, and also be + * the closest value to ideal N, so just cut the loop. + */ + if ((best_diff == 0) && (abs(n - ideal_n) > best_n_distance)) + break; } - return n; + return best_n; +} + +static unsigned int hdmi_find_n(struct dw_hdmi *hdmi, unsigned long pixel_clk, + unsigned long sample_rate) +{ + int n; + + n = hdmi_match_tmds_n_table(hdmi, pixel_clk, sample_rate); + if (n > 0) + return n; + + dev_warn(hdmi->dev, "Rate %lu missing; compute N dynamically\n", + pixel_clk); + + return hdmi_compute_n(hdmi, pixel_clk, sample_rate); } /* @@ -654,7 +956,7 @@ static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi, u8 config3; u64 tmp; - n = hdmi_compute_n(sample_rate, pixel_clk); + n = hdmi_find_n(hdmi, pixel_clk, sample_rate); config3 = hdmi_readb(hdmi, HDMI_CONFIG3_ID); @@ -756,14 +1058,6 @@ static void hdmi_enable_audio_clk(struct dw_hdmi *hdmi, bool enable) hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); } -static u8 *hdmi_audio_get_eld(struct dw_hdmi *hdmi) -{ - if (!hdmi->curr_conn) - return NULL; - - return hdmi->curr_conn->eld; -} - static void dw_hdmi_ahb_audio_enable(struct dw_hdmi *hdmi) { hdmi_set_cts_n(hdmi, hdmi->audio_cts, hdmi->audio_n); @@ -1013,6 +1307,15 @@ static bool is_csc_needed(struct dw_hdmi *hdmi) is_color_space_interpolation(hdmi); } +static bool is_rgb_full_to_limited_needed(struct dw_hdmi *hdmi) +{ + if (hdmi->hdmi_data.quant_range == HDMI_QUANTIZATION_RANGE_LIMITED || + (!hdmi->hdmi_data.quant_range && hdmi->hdmi_data.rgb_limited_range)) + return true; + + return false; +} + static void dw_hdmi_update_csc_coeffs(struct dw_hdmi *hdmi) { const u16 (*csc_coeff)[3][4] = &csc_coeff_default; @@ -1035,7 +1338,7 @@ static void dw_hdmi_update_csc_coeffs(struct dw_hdmi *hdmi) csc_coeff = &csc_coeff_rgb_in_eitu709; csc_scale = 0; } else if (is_input_rgb && is_output_rgb && - hdmi->hdmi_data.rgb_limited_range) { + is_rgb_full_to_limited_needed(hdmi)) { csc_coeff = &csc_coeff_rgb_full_to_rgb_limited; } @@ -1067,7 +1370,7 @@ static void hdmi_video_csc(struct dw_hdmi *hdmi) if (is_color_space_interpolation(hdmi)) interpolation = HDMI_CSC_CFG_INTMODE_CHROMA_INT_FORMULA1; else if (is_color_space_decimation(hdmi)) - decimation = HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA3; + decimation = HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA1; switch (hdmi_bus_fmt_color_depth(hdmi->hdmi_data.enc_out_bus_format)) { case 8: @@ -1114,7 +1417,7 @@ static void hdmi_video_packetize(struct dw_hdmi *hdmi) switch (hdmi_bus_fmt_color_depth( hdmi->hdmi_data.enc_out_bus_format)) { case 8: - color_depth = 4; + color_depth = 0; output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS; break; case 10: @@ -1152,18 +1455,15 @@ static void hdmi_video_packetize(struct dw_hdmi *hdmi) } /* set the packetizer registers */ - val = ((color_depth << HDMI_VP_PR_CD_COLOR_DEPTH_OFFSET) & - HDMI_VP_PR_CD_COLOR_DEPTH_MASK) | - ((hdmi_data->pix_repet_factor << - HDMI_VP_PR_CD_DESIRED_PR_FACTOR_OFFSET) & - HDMI_VP_PR_CD_DESIRED_PR_FACTOR_MASK); + val = (color_depth << HDMI_VP_PR_CD_COLOR_DEPTH_OFFSET) & + HDMI_VP_PR_CD_COLOR_DEPTH_MASK; hdmi_writeb(hdmi, val, HDMI_VP_PR_CD); hdmi_modb(hdmi, HDMI_VP_STUFF_PR_STUFFING_STUFFING_MODE, HDMI_VP_STUFF_PR_STUFFING_MASK, HDMI_VP_STUFF); /* Data from pixel repeater block */ - if (hdmi_data->pix_repet_factor > 1) { + if (hdmi_data->pix_repet_factor > 0) { vp_conf = HDMI_VP_CONF_PR_EN_ENABLE | HDMI_VP_CONF_BYPASS_SELECT_PIX_REPEATER; } else { /* data from packetizer block */ @@ -1175,8 +1475,13 @@ static void hdmi_video_packetize(struct dw_hdmi *hdmi) HDMI_VP_CONF_PR_EN_MASK | HDMI_VP_CONF_BYPASS_SELECT_MASK, HDMI_VP_CONF); - hdmi_modb(hdmi, 1 << HDMI_VP_STUFF_IDEFAULT_PHASE_OFFSET, - HDMI_VP_STUFF_IDEFAULT_PHASE_MASK, HDMI_VP_STUFF); + if ((color_depth == 5 && hdmi->previous_mode.htotal % 4) || + (color_depth == 6 && hdmi->previous_mode.htotal % 2)) + hdmi_modb(hdmi, 0, HDMI_VP_STUFF_IDEFAULT_PHASE_MASK, + HDMI_VP_STUFF); + else + hdmi_modb(hdmi, 1 << HDMI_VP_STUFF_IDEFAULT_PHASE_OFFSET, + HDMI_VP_STUFF_IDEFAULT_PHASE_MASK, HDMI_VP_STUFF); hdmi_writeb(hdmi, remap_size, HDMI_VP_REMAP); @@ -1277,6 +1582,23 @@ static bool dw_hdmi_support_scdc(struct dw_hdmi *hdmi, return true; } +static int hdmi_phy_i2c_read(struct dw_hdmi *hdmi, unsigned char addr) +{ + int val; + + hdmi_writeb(hdmi, 0xFF, HDMI_IH_I2CMPHY_STAT0); + hdmi_writeb(hdmi, addr, HDMI_PHY_I2CM_ADDRESS_ADDR); + hdmi_writeb(hdmi, 0, HDMI_PHY_I2CM_DATAI_1_ADDR); + hdmi_writeb(hdmi, 0, HDMI_PHY_I2CM_DATAI_0_ADDR); + hdmi_writeb(hdmi, HDMI_PHY_I2CM_OPERATION_ADDR_READ, + HDMI_PHY_I2CM_OPERATION_ADDR); + hdmi_phy_wait_i2c_done(hdmi, 1000); + val = hdmi_readb(hdmi, HDMI_PHY_I2CM_DATAI_1_ADDR); + val = (val & 0xff) << 8; + val += hdmi_readb(hdmi, HDMI_PHY_I2CM_DATAI_0_ADDR) & 0xff; + return val; +} + /* * HDMI2.0 Specifies the following procedure for High TMDS Bit Rates: * - The Source shall suspend transmission of the TMDS clock and data @@ -1454,6 +1776,13 @@ static int hdmi_phy_configure_dwc_hdmi_3d_tx(struct dw_hdmi *hdmi, const struct dw_hdmi_mpll_config *mpll_config = pdata->mpll_cfg; const struct dw_hdmi_curr_ctrl *curr_ctrl = pdata->cur_ctr; const struct dw_hdmi_phy_config *phy_config = pdata->phy_config; + unsigned int tmdsclock = hdmi->hdmi_data.video_mode.mtmdsclock; + unsigned int depth = + hdmi_bus_fmt_color_depth(hdmi->hdmi_data.enc_out_bus_format); + + if (hdmi_bus_fmt_is_yuv420(hdmi->hdmi_data.enc_out_bus_format) && + pdata->mpll_cfg_420) + mpll_config = pdata->mpll_cfg_420; /* TOFIX Will need 420 specific PHY configuration tables */ @@ -1463,11 +1792,11 @@ static int hdmi_phy_configure_dwc_hdmi_3d_tx(struct dw_hdmi *hdmi, break; for (; curr_ctrl->mpixelclock != ~0UL; curr_ctrl++) - if (mpixelclock <= curr_ctrl->mpixelclock) + if (tmdsclock <= curr_ctrl->mpixelclock) break; for (; phy_config->mpixelclock != ~0UL; phy_config++) - if (mpixelclock <= phy_config->mpixelclock) + if (tmdsclock <= phy_config->mpixelclock) break; if (mpll_config->mpixelclock == ~0UL || @@ -1475,11 +1804,18 @@ static int hdmi_phy_configure_dwc_hdmi_3d_tx(struct dw_hdmi *hdmi, phy_config->mpixelclock == ~0UL) return -EINVAL; - dw_hdmi_phy_i2c_write(hdmi, mpll_config->res[0].cpce, + if (!hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format)) + depth = fls(depth - 8); + else + depth = 0; + if (depth) + depth--; + + dw_hdmi_phy_i2c_write(hdmi, mpll_config->res[depth].cpce, HDMI_3D_TX_PHY_CPCE_CTRL); - dw_hdmi_phy_i2c_write(hdmi, mpll_config->res[0].gmp, + dw_hdmi_phy_i2c_write(hdmi, mpll_config->res[depth].gmp, HDMI_3D_TX_PHY_GMPCTRL); - dw_hdmi_phy_i2c_write(hdmi, curr_ctrl->curr[0], + dw_hdmi_phy_i2c_write(hdmi, curr_ctrl->curr[depth], HDMI_3D_TX_PHY_CURRCTRL); dw_hdmi_phy_i2c_write(hdmi, 0, HDMI_3D_TX_PHY_PLLPHBYCTRL); @@ -1492,10 +1828,6 @@ static int hdmi_phy_configure_dwc_hdmi_3d_tx(struct dw_hdmi *hdmi, dw_hdmi_phy_i2c_write(hdmi, phy_config->vlev_ctr, HDMI_3D_TX_PHY_VLEVCTRL); - /* Override and disable clock termination. */ - dw_hdmi_phy_i2c_write(hdmi, HDMI_3D_TX_PHY_CKCALCTRL_OVERRIDE, - HDMI_3D_TX_PHY_CKCALCTRL); - return 0; } @@ -1597,14 +1929,16 @@ void dw_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data) hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE, HDMI_IH_PHY_STAT0); - /* Enable cable hot plug irq. */ - hdmi_writeb(hdmi, hdmi->phy_mask, HDMI_PHY_MASK0); + if (!hdmi->next_bridge) { + /* Enable cable hot plug irq. */ + hdmi_writeb(hdmi, hdmi->phy_mask, HDMI_PHY_MASK0); - /* Clear and unmute interrupts. */ - hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE, - HDMI_IH_PHY_STAT0); - hdmi_writeb(hdmi, ~(HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE), - HDMI_IH_MUTE_PHY_STAT0); + /* Clear and unmute interrupts. */ + hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE, + HDMI_IH_PHY_STAT0); + hdmi_writeb(hdmi, ~(HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE), + HDMI_IH_MUTE_PHY_STAT0); + } } EXPORT_SYMBOL_GPL(dw_hdmi_phy_setup_hpd); @@ -1620,23 +1954,36 @@ static const struct dw_hdmi_phy_ops dw_hdmi_synopsys_phy_ops = { * HDMI TX Setup */ -static void hdmi_tx_hdcp_config(struct dw_hdmi *hdmi) +static void hdmi_tx_hdcp_config(struct dw_hdmi *hdmi, + const struct drm_display_mode *mode) { - u8 de; - - if (hdmi->hdmi_data.video_mode.mdataenablepolarity) - de = HDMI_A_VIDPOLCFG_DATAENPOL_ACTIVE_HIGH; - else - de = HDMI_A_VIDPOLCFG_DATAENPOL_ACTIVE_LOW; - - /* disable rx detect */ - hdmi_modb(hdmi, HDMI_A_HDCPCFG0_RXDETECT_DISABLE, - HDMI_A_HDCPCFG0_RXDETECT_MASK, HDMI_A_HDCPCFG0); - - hdmi_modb(hdmi, de, HDMI_A_VIDPOLCFG_DATAENPOL_MASK, HDMI_A_VIDPOLCFG); - - hdmi_modb(hdmi, HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_DISABLE, - HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_MASK, HDMI_A_HDCPCFG1); + struct hdmi_vmode *vmode = &hdmi->hdmi_data.video_mode; + u8 vsync_pol, hsync_pol, data_pol, hdmi_dvi; + + /* Configure the video polarity */ + vsync_pol = mode->flags & DRM_MODE_FLAG_PVSYNC ? + HDMI_A_VIDPOLCFG_VSYNCPOL_ACTIVE_HIGH : + HDMI_A_VIDPOLCFG_VSYNCPOL_ACTIVE_LOW; + hsync_pol = mode->flags & DRM_MODE_FLAG_PHSYNC ? + HDMI_A_VIDPOLCFG_HSYNCPOL_ACTIVE_HIGH : + HDMI_A_VIDPOLCFG_HSYNCPOL_ACTIVE_LOW; + data_pol = vmode->mdataenablepolarity ? + HDMI_A_VIDPOLCFG_DATAENPOL_ACTIVE_HIGH : + HDMI_A_VIDPOLCFG_DATAENPOL_ACTIVE_LOW; + hdmi_modb(hdmi, vsync_pol | hsync_pol | data_pol, + HDMI_A_VIDPOLCFG_VSYNCPOL_MASK | + HDMI_A_VIDPOLCFG_HSYNCPOL_MASK | + HDMI_A_VIDPOLCFG_DATAENPOL_MASK, + HDMI_A_VIDPOLCFG); + + /* Config the display mode */ + hdmi_dvi = hdmi->sink_is_hdmi ? HDMI_A_HDCPCFG0_HDMIDVI_HDMI : + HDMI_A_HDCPCFG0_HDMIDVI_DVI; + hdmi_modb(hdmi, hdmi_dvi, HDMI_A_HDCPCFG0_HDMIDVI_MASK, + HDMI_A_HDCPCFG0); + + if (hdmi->hdcp && hdmi->hdcp->hdcp_start) + hdmi->hdcp->hdcp_start(hdmi->hdcp); } static void hdmi_config_AVI(struct dw_hdmi *hdmi, @@ -1650,10 +1997,15 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode); if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) { - drm_hdmi_avi_infoframe_quant_range(&frame, connector, mode, - hdmi->hdmi_data.rgb_limited_range ? - HDMI_QUANTIZATION_RANGE_LIMITED : - HDMI_QUANTIZATION_RANGE_FULL); + /* default range */ + if (!hdmi->hdmi_data.quant_range) + drm_hdmi_avi_infoframe_quant_range(&frame, connector, mode, + hdmi->hdmi_data.rgb_limited_range ? + HDMI_QUANTIZATION_RANGE_LIMITED : + HDMI_QUANTIZATION_RANGE_FULL); + else + drm_hdmi_avi_infoframe_quant_range(&frame, connector, mode, + hdmi->hdmi_data.quant_range); } else { frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT; frame.ycc_quantization_range = @@ -1688,6 +2040,14 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, frame.extended_colorimetry = HDMI_EXTENDED_COLORIMETRY_XV_YCC_709; break; + case V4L2_YCBCR_ENC_BT2020: + if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_BT2020) + frame.colorimetry = HDMI_COLORIMETRY_EXTENDED; + else + frame.colorimetry = HDMI_COLORIMETRY_ITU_709; + frame.extended_colorimetry = + HDMI_EXTENDED_COLORIMETRY_BT2020; + break; default: /* Carries no data */ frame.colorimetry = HDMI_COLORIMETRY_ITU_601; frame.extended_colorimetry = @@ -1824,17 +2184,44 @@ static void hdmi_config_drm_infoframe(struct dw_hdmi *hdmi, const struct drm_connector *connector) { const struct drm_connector_state *conn_state = connector->state; + struct hdr_output_metadata *hdr_metadata; struct hdmi_drm_infoframe frame; u8 buffer[30]; ssize_t err; int i; + /* Dynamic Range and Mastering Infoframe is introduced in v2.11a. */ + if (hdmi->version < 0x211a) { + DRM_ERROR("Not support DRM Infoframe\n"); + return; + } + if (!hdmi->plat_data->use_drm_infoframe) return; hdmi_modb(hdmi, HDMI_FC_PACKET_TX_EN_DRM_DISABLE, HDMI_FC_PACKET_TX_EN_DRM_MASK, HDMI_FC_PACKET_TX_EN); + if (!hdmi->connector.hdr_sink_metadata.hdmi_type1.eotf) { + DRM_DEBUG("No need to set HDR metadata in infoframe\n"); + return; + } + + if (!conn_state->hdr_output_metadata) { + DRM_DEBUG("source metadata not set yet\n"); + return; + } + + hdr_metadata = (struct hdr_output_metadata *) + conn_state->hdr_output_metadata->data; + + if (!(hdmi->connector.hdr_sink_metadata.hdmi_type1.eotf & + BIT(hdr_metadata->hdmi_metadata_type1.eotf))) { + DRM_ERROR("Not support EOTF %d\n", + hdr_metadata->hdmi_metadata_type1.eotf); + return; + } + err = drm_hdmi_infoframe_set_hdr_metadata(&frame, conn_state); if (err < 0) return; @@ -1854,51 +2241,66 @@ static void hdmi_config_drm_infoframe(struct dw_hdmi *hdmi, hdmi_writeb(hdmi, 1, HDMI_FC_DRM_UP); hdmi_modb(hdmi, HDMI_FC_PACKET_TX_EN_DRM_ENABLE, HDMI_FC_PACKET_TX_EN_DRM_MASK, HDMI_FC_PACKET_TX_EN); + + DRM_DEBUG("%s eotf %d end\n", __func__, + hdr_metadata->hdmi_metadata_type1.eotf); } -static void hdmi_av_composer(struct dw_hdmi *hdmi, - const struct drm_display_info *display, - const struct drm_display_mode *mode) +static unsigned int +hdmi_get_tmdsclock(struct dw_hdmi *hdmi, unsigned long mpixelclock) { - u8 inv_val, bytes; - const struct drm_hdmi_info *hdmi_info = &display->hdmi; - struct hdmi_vmode *vmode = &hdmi->hdmi_data.video_mode; - int hblank, vblank, h_de_hs, v_de_vs, hsync_len, vsync_len; - unsigned int vdisplay, hdisplay; - - vmode->mpixelclock = mode->clock * 1000; - - dev_dbg(hdmi->dev, "final pixclk = %d\n", vmode->mpixelclock); - - vmode->mtmdsclock = vmode->mpixelclock; + unsigned int tmdsclock = mpixelclock; + unsigned int depth = + hdmi_bus_fmt_color_depth(hdmi->hdmi_data.enc_out_bus_format); if (!hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format)) { - switch (hdmi_bus_fmt_color_depth( - hdmi->hdmi_data.enc_out_bus_format)) { + switch (depth) { case 16: - vmode->mtmdsclock = vmode->mpixelclock * 2; + tmdsclock = mpixelclock * 2; break; case 12: - vmode->mtmdsclock = vmode->mpixelclock * 3 / 2; + tmdsclock = mpixelclock * 3 / 2; break; case 10: - vmode->mtmdsclock = vmode->mpixelclock * 5 / 4; + tmdsclock = mpixelclock * 5 / 4; + break; + default: break; } } + return tmdsclock; +} + +static void hdmi_av_composer(struct dw_hdmi *hdmi, + const struct drm_display_info *display, + const struct drm_display_mode *mode) +{ + u8 inv_val, bytes; + const struct drm_hdmi_info *hdmi_info = &display->hdmi; + struct hdmi_vmode *vmode = &hdmi->hdmi_data.video_mode; + int hblank, vblank, h_de_hs, v_de_vs, hsync_len, vsync_len; + unsigned int vdisplay, hdisplay; + + vmode->previous_pixelclock = vmode->mpixelclock; + vmode->mpixelclock = mode->crtc_clock * 1000; + if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == + DRM_MODE_FLAG_3D_FRAME_PACKING) + vmode->mpixelclock *= 2; + dev_dbg(hdmi->dev, "final pixclk = %d\n", vmode->mpixelclock); + + vmode->previous_tmdsclock = vmode->mtmdsclock; + vmode->mtmdsclock = hdmi_get_tmdsclock(hdmi, vmode->mpixelclock); if (hdmi_bus_fmt_is_yuv420(hdmi->hdmi_data.enc_out_bus_format)) vmode->mtmdsclock /= 2; - dev_dbg(hdmi->dev, "final tmdsclock = %d\n", vmode->mtmdsclock); - /* Set up HDMI_FC_INVIDCONF */ - inv_val = (hdmi->hdmi_data.hdcp_enable || - (dw_hdmi_support_scdc(hdmi, display) && - (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK || - hdmi_info->scdc.scrambling.low_rates)) ? - HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE : - HDMI_FC_INVIDCONF_HDCP_KEEPOUT_INACTIVE); + /* Set up HDMI_FC_INVIDCONF + * Some display equipments require that the interval + * between Video Data and Data island must be at least 58 pixels, + * and fc_invidconf.HDCP_keepout set (1'b1) can meet the requirement. + */ + inv_val = HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE; inv_val |= mode->flags & DRM_MODE_FLAG_PVSYNC ? HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_HIGH : @@ -1964,7 +2366,8 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi, /* Scrambling Control */ if (dw_hdmi_support_scdc(hdmi, display)) { if (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK || - hdmi_info->scdc.scrambling.low_rates) { + (hdmi_info->scdc.scrambling.low_rates && + hdmi->scramble_low_rates)) { /* * HDMI2.0 Specifies the following procedure: * After the Source Device has determined that @@ -1998,6 +2401,8 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi, HDMI_MC_SWRSTZ); drm_scdc_set_scrambling(hdmi->ddc, 0); } + } else { + hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL); } /* Set up horizontal active pixel width */ @@ -2055,6 +2460,12 @@ static void dw_hdmi_enable_video_path(struct dw_hdmi *hdmi) hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_TMDSCLK_DISABLE; hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); + /* Enable pixel repetition path */ + if (hdmi->hdmi_data.video_mode.mpixelrepetitioninput) { + hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_PREPCLK_DISABLE; + hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); + } + /* Enable csc path */ if (is_csc_needed(hdmi)) { hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE; @@ -2130,6 +2541,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, const struct drm_display_mode *mode) { int ret; + void *data = hdmi->plat_data->phy_data; hdmi_disable_overflow_interrupts(hdmi); @@ -2141,48 +2553,91 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, dev_dbg(hdmi->dev, "CEA mode used vic=%d\n", hdmi->vic); } - if ((hdmi->vic == 6) || (hdmi->vic == 7) || - (hdmi->vic == 21) || (hdmi->vic == 22) || - (hdmi->vic == 2) || (hdmi->vic == 3) || - (hdmi->vic == 17) || (hdmi->vic == 18)) + if (hdmi->plat_data->get_enc_out_encoding) + hdmi->hdmi_data.enc_out_encoding = + hdmi->plat_data->get_enc_out_encoding(data); + else if ((hdmi->vic == 6) || (hdmi->vic == 7) || + (hdmi->vic == 21) || (hdmi->vic == 22) || + (hdmi->vic == 2) || (hdmi->vic == 3) || + (hdmi->vic == 17) || (hdmi->vic == 18)) hdmi->hdmi_data.enc_out_encoding = V4L2_YCBCR_ENC_601; else hdmi->hdmi_data.enc_out_encoding = V4L2_YCBCR_ENC_709; - hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0; - hdmi->hdmi_data.video_mode.mpixelrepetitioninput = 0; + if (mode->flags & DRM_MODE_FLAG_DBLCLK) { + hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 1; + hdmi->hdmi_data.video_mode.mpixelrepetitioninput = 1; + } else { + hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0; + hdmi->hdmi_data.video_mode.mpixelrepetitioninput = 0; + } + /* TOFIX: Get input format from plat data or fallback to RGB888 */ + if (hdmi->plat_data->get_input_bus_format) + hdmi->hdmi_data.enc_in_bus_format = + hdmi->plat_data->get_input_bus_format(data); + else if (hdmi->plat_data->input_bus_format) + hdmi->hdmi_data.enc_in_bus_format = + hdmi->plat_data->input_bus_format; + else + hdmi->hdmi_data.enc_in_bus_format = + MEDIA_BUS_FMT_RGB888_1X24; - if (hdmi->hdmi_data.enc_in_bus_format == MEDIA_BUS_FMT_FIXED) - hdmi->hdmi_data.enc_in_bus_format = MEDIA_BUS_FMT_RGB888_1X24; + /* TOFIX: Default to RGB888 output format */ + if (hdmi->plat_data->get_output_bus_format) + hdmi->hdmi_data.enc_out_bus_format = + hdmi->plat_data->get_output_bus_format(data); + else + hdmi->hdmi_data.enc_out_bus_format = + MEDIA_BUS_FMT_RGB888_1X24; /* TOFIX: Get input encoding from plat data or fallback to none */ - if (hdmi->plat_data->input_bus_encoding) + if (hdmi->plat_data->get_enc_in_encoding) + hdmi->hdmi_data.enc_in_encoding = + hdmi->plat_data->get_enc_in_encoding(data); + else if (hdmi->plat_data->input_bus_encoding) hdmi->hdmi_data.enc_in_encoding = hdmi->plat_data->input_bus_encoding; else hdmi->hdmi_data.enc_in_encoding = V4L2_YCBCR_ENC_DEFAULT; - if (hdmi->hdmi_data.enc_out_bus_format == MEDIA_BUS_FMT_FIXED) - hdmi->hdmi_data.enc_out_bus_format = MEDIA_BUS_FMT_RGB888_1X24; + + if (hdmi->plat_data->get_quant_range) + hdmi->hdmi_data.quant_range = + hdmi->plat_data->get_quant_range(data); hdmi->hdmi_data.rgb_limited_range = hdmi->sink_is_hdmi && drm_default_rgb_quant_range(mode) == HDMI_QUANTIZATION_RANGE_LIMITED; - hdmi->hdmi_data.pix_repet_factor = 0; - hdmi->hdmi_data.hdcp_enable = 0; + if (!hdmi->sink_is_hdmi) + hdmi->hdmi_data.quant_range = HDMI_QUANTIZATION_RANGE_FULL; + + /* + * According to the dw-hdmi specification 6.4.2 + * vp_pr_cd[3:0]: + * 0000b: No pixel repetition (pixel sent only once) + * 0001b: Pixel sent two times (pixel repeated once) + */ + hdmi->hdmi_data.pix_repet_factor = + (mode->flags & DRM_MODE_FLAG_DBLCLK) ? 1 : 0; hdmi->hdmi_data.video_mode.mdataenablepolarity = true; /* HDMI Initialization Step B.1 */ hdmi_av_composer(hdmi, &connector->display_info, mode); /* HDMI Initializateion Step B.2 */ - ret = hdmi->phy.ops->init(hdmi, hdmi->phy.data, - &connector->display_info, - &hdmi->previous_mode); - if (ret) - return ret; - hdmi->phy.enabled = true; + if (!hdmi->phy.enabled || + hdmi->hdmi_data.video_mode.previous_pixelclock != + hdmi->hdmi_data.video_mode.mpixelclock || + hdmi->hdmi_data.video_mode.previous_tmdsclock != + hdmi->hdmi_data.video_mode.mtmdsclock) { + ret = hdmi->phy.ops->init(hdmi, hdmi->phy.data, + &connector->display_info, + &hdmi->previous_mode); + if (ret) + return ret; + hdmi->phy.enabled = true; + } /* HDMI Initialization Step B.3 */ dw_hdmi_enable_video_path(hdmi); @@ -2210,7 +2665,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, hdmi_video_packetize(hdmi); hdmi_video_csc(hdmi); hdmi_video_sample(hdmi); - hdmi_tx_hdcp_config(hdmi); + hdmi_tx_hdcp_config(hdmi, mode); dw_hdmi_clear_overflow(hdmi); @@ -2286,6 +2741,8 @@ static void dw_hdmi_poweroff(struct dw_hdmi *hdmi) hdmi->phy.enabled = false; } + if (hdmi->hdcp && hdmi->hdcp->hdcp_stop) + hdmi->hdcp->hdcp_stop(hdmi->hdcp); hdmi->bridge_is_on = false; } @@ -2303,6 +2760,10 @@ static void dw_hdmi_update_power(struct dw_hdmi *hdmi) } if (force == DRM_FORCE_OFF) { + if (hdmi->initialized) { + hdmi->initialized = false; + hdmi->disabled = true; + } if (hdmi->bridge_is_on) dw_hdmi_poweroff(hdmi); } else { @@ -2335,8 +2796,15 @@ static enum drm_connector_status dw_hdmi_detect(struct dw_hdmi *hdmi) { enum drm_connector_status result; - result = hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data); + if (!hdmi->force_logo) { + mutex_lock(&hdmi->mutex); + hdmi->force = DRM_FORCE_UNSPECIFIED; + dw_hdmi_update_power(hdmi); + dw_hdmi_update_phy_mask(hdmi); + mutex_unlock(&hdmi->mutex); + } + result = hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data); mutex_lock(&hdmi->mutex); if (result != hdmi->last_connector_result) { dev_dbg(hdmi->dev, "read_hpd result: %d", result); @@ -2346,6 +2814,11 @@ static enum drm_connector_status dw_hdmi_detect(struct dw_hdmi *hdmi) } mutex_unlock(&hdmi->mutex); + if (result == connector_status_connected) + extcon_set_state_sync(hdmi->extcon, EXTCON_DISP_HDMI, true); + else + extcon_set_state_sync(hdmi->extcon, EXTCON_DISP_HDMI, false); + return result; } @@ -2366,7 +2839,7 @@ static struct edid *dw_hdmi_get_edid(struct dw_hdmi *hdmi, dev_dbg(hdmi->dev, "got edid: width[%d] x height[%d]\n", edid->width_cm, edid->height_cm); - hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid); + hdmi->support_hdmi = drm_detect_hdmi_monitor(edid); hdmi->sink_has_audio = drm_detect_monitor_audio(edid); return edid; @@ -2384,21 +2857,105 @@ dw_hdmi_connector_detect(struct drm_connector *connector, bool force) return dw_hdmi_detect(hdmi); } +static int +dw_hdmi_update_hdr_property(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi, + connector); + void *data = hdmi->plat_data->phy_data; + const struct hdr_static_metadata *metadata = + &connector->hdr_sink_metadata.hdmi_type1; + size_t size = sizeof(*metadata); + struct drm_property *property; + struct drm_property_blob *blob; + int ret; + + if (hdmi->plat_data->get_hdr_property) + property = hdmi->plat_data->get_hdr_property(data); + else + return -EINVAL; + + if (hdmi->plat_data->get_hdr_blob) + blob = hdmi->plat_data->get_hdr_blob(data); + else + return -EINVAL; + + ret = drm_property_replace_global_blob(dev, &blob, size, metadata, + &connector->base, property); + return ret; +} + static int dw_hdmi_connector_get_modes(struct drm_connector *connector) { struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi, connector); + struct hdr_static_metadata *metedata = + &connector->hdr_sink_metadata.hdmi_type1; struct edid *edid; - int ret; + struct drm_display_mode *mode; + struct drm_display_info *info = &connector->display_info; + int i, ret = 0; + memset(metedata, 0, sizeof(*metedata)); edid = dw_hdmi_get_edid(hdmi, connector); - if (!edid) - return 0; + if (edid) { + dev_dbg(hdmi->dev, "got edid: width[%d] x height[%d]\n", + edid->width_cm, edid->height_cm); + drm_connector_update_edid_property(connector, edid); + cec_notifier_set_phys_addr_from_edid(hdmi->cec_notifier, edid); + ret = drm_add_edid_modes(connector, edid); + if (hdmi->plat_data->get_color_changed) + hdmi->plat_data->get_yuv422_format(connector, edid); + dw_hdmi_update_hdr_property(connector); + kfree(edid); + } else { + hdmi->support_hdmi = true; + hdmi->sink_has_audio = true; + for (i = 0; i < ARRAY_SIZE(dw_hdmi_default_modes); i++) { + const struct drm_display_mode *ptr = + &dw_hdmi_default_modes[i]; + + mode = drm_mode_duplicate(connector->dev, ptr); + if (mode) { + if (!i) { + mode->type = DRM_MODE_TYPE_PREFERRED; + mode->picture_aspect_ratio = + HDMI_PICTURE_ASPECT_NONE; + } + drm_mode_probed_add(connector, mode); + ret++; + } + } + info->edid_hdmi_dc_modes = 0; + info->hdmi.y420_dc_modes = 0; + info->color_formats = 0; + + dev_info(hdmi->dev, "failed to get edid\n"); + } + dw_hdmi_check_output_type_changed(hdmi); + + return ret; +} + +static struct drm_encoder * +dw_hdmi_connector_best_encoder(struct drm_connector *connector) +{ + struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi, + connector); + + return hdmi->bridge.encoder; +} + +static bool dw_hdmi_color_changed(struct drm_connector *connector) +{ + struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi, + connector); + void *data = hdmi->plat_data->phy_data; + bool ret = false; - drm_connector_update_edid_property(connector, edid); - cec_notifier_set_phys_addr_from_edid(hdmi->cec_notifier, edid); - ret = drm_add_edid_modes(connector, edid); - kfree(edid); + if (hdmi->plat_data->get_color_changed) + ret = hdmi->plat_data->get_color_changed(data); return ret; } @@ -2427,11 +2984,54 @@ static int dw_hdmi_connector_atomic_check(struct drm_connector *connector, drm_atomic_get_new_connector_state(state, connector); struct drm_crtc *crtc = new_state->crtc; struct drm_crtc_state *crtc_state; + struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi, + connector); + struct drm_display_mode *mode = NULL; + void *data = hdmi->plat_data->phy_data; + struct hdmi_vmode *vmode = &hdmi->hdmi_data.video_mode; + unsigned int in_bus_format = hdmi->hdmi_data.enc_in_bus_format; + unsigned int out_bus_format = hdmi->hdmi_data.enc_out_bus_format; + bool color_changed = false; if (!crtc) return 0; - if (!hdr_metadata_equal(old_state, new_state)) { + /* + * If HDMI is enabled in uboot, it's need to record + * drm_display_mode and set phy status to enabled. + */ + if (!vmode->mpixelclock) { + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (hdmi->plat_data->get_enc_in_encoding) + hdmi->hdmi_data.enc_in_encoding = + hdmi->plat_data->get_enc_in_encoding(data); + if (hdmi->plat_data->get_enc_out_encoding) + hdmi->hdmi_data.enc_out_encoding = + hdmi->plat_data->get_enc_out_encoding(data); + if (hdmi->plat_data->get_input_bus_format) + hdmi->hdmi_data.enc_in_bus_format = + hdmi->plat_data->get_input_bus_format(data); + if (hdmi->plat_data->get_output_bus_format) + hdmi->hdmi_data.enc_out_bus_format = + hdmi->plat_data->get_output_bus_format(data); + + mode = &crtc_state->mode; + memcpy(&hdmi->previous_mode, mode, sizeof(hdmi->previous_mode)); + vmode->mpixelclock = mode->crtc_clock * 1000; + vmode->previous_pixelclock = mode->clock; + vmode->previous_tmdsclock = mode->clock; + vmode->mtmdsclock = hdmi_get_tmdsclock(hdmi, + vmode->mpixelclock); + if (hdmi_bus_fmt_is_yuv420(hdmi->hdmi_data.enc_out_bus_format)) + vmode->mtmdsclock /= 2; + + if (in_bus_format != hdmi->hdmi_data.enc_in_bus_format || + out_bus_format != hdmi->hdmi_data.enc_out_bus_format) + color_changed = true; + } + + if (!hdr_metadata_equal(old_state, new_state) || + dw_hdmi_color_changed(connector) || color_changed) { crtc_state = drm_atomic_get_crtc_state(state, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); @@ -2442,15 +3042,108 @@ static int dw_hdmi_connector_atomic_check(struct drm_connector *connector, return 0; } -static void dw_hdmi_connector_force(struct drm_connector *connector) +static int +dw_hdmi_atomic_connector_set_property(struct drm_connector *connector, + struct drm_connector_state *state, + struct drm_property *property, + uint64_t val) { struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi, connector); + const struct dw_hdmi_property_ops *ops = + hdmi->plat_data->property_ops; - mutex_lock(&hdmi->mutex); - hdmi->force = connector->force; - dw_hdmi_update_power(hdmi); - dw_hdmi_update_phy_mask(hdmi); + if (ops && ops->set_property) + return ops->set_property(connector, state, property, + val, hdmi->plat_data->phy_data); + else + return -EINVAL; +} + +static int +dw_hdmi_atomic_connector_get_property(struct drm_connector *connector, + const struct drm_connector_state *state, + struct drm_property *property, + uint64_t *val) +{ + struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi, + connector); + const struct dw_hdmi_property_ops *ops = + hdmi->plat_data->property_ops; + + if (ops && ops->get_property) + return ops->get_property(connector, state, property, + val, hdmi->plat_data->phy_data); + else + return -EINVAL; +} + +static int +dw_hdmi_connector_set_property(struct drm_connector *connector, + struct drm_property *property, uint64_t val) +{ + return dw_hdmi_atomic_connector_set_property(connector, NULL, + property, val); +} + +void dw_hdmi_set_quant_range(struct dw_hdmi *hdmi) +{ + if (!hdmi->bridge_is_on) + return; + + hdmi_writeb(hdmi, HDMI_FC_GCP_SET_AVMUTE, HDMI_FC_GCP); + dw_hdmi_setup(hdmi, hdmi->curr_conn, &hdmi->previous_mode); + hdmi_writeb(hdmi, HDMI_FC_GCP_CLEAR_AVMUTE, HDMI_FC_GCP); +} +EXPORT_SYMBOL_GPL(dw_hdmi_set_quant_range); + +void dw_hdmi_set_output_type(struct dw_hdmi *hdmi, u64 val) +{ + hdmi->force_output = val; + + if (!dw_hdmi_check_output_type_changed(hdmi)) + return; + + if (!hdmi->bridge_is_on) + return; + + hdmi_writeb(hdmi, HDMI_FC_GCP_SET_AVMUTE, HDMI_FC_GCP); + dw_hdmi_setup(hdmi, hdmi->curr_conn, &hdmi->previous_mode); + hdmi_writeb(hdmi, HDMI_FC_GCP_CLEAR_AVMUTE, HDMI_FC_GCP); +} +EXPORT_SYMBOL_GPL(dw_hdmi_set_output_type); + +bool dw_hdmi_get_output_whether_hdmi(struct dw_hdmi *hdmi) +{ + return hdmi->sink_is_hdmi; +} +EXPORT_SYMBOL_GPL(dw_hdmi_get_output_whether_hdmi); + +int dw_hdmi_get_output_type_cap(struct dw_hdmi *hdmi) +{ + return hdmi->support_hdmi; +} +EXPORT_SYMBOL_GPL(dw_hdmi_get_output_type_cap); + +static void dw_hdmi_connector_force(struct drm_connector *connector) +{ + struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi, + connector); + + mutex_lock(&hdmi->mutex); + + if (hdmi->force != connector->force) { + if (!hdmi->disabled && connector->force == DRM_FORCE_OFF) + extcon_set_state_sync(hdmi->extcon, EXTCON_DISP_HDMI, + false); + else if (hdmi->disabled && connector->force == DRM_FORCE_ON) + extcon_set_state_sync(hdmi->extcon, EXTCON_DISP_HDMI, + true); + } + + hdmi->force = connector->force; + dw_hdmi_update_power(hdmi); + dw_hdmi_update_phy_mask(hdmi); mutex_unlock(&hdmi->mutex); } @@ -2460,15 +3153,98 @@ static const struct drm_connector_funcs dw_hdmi_connector_funcs = { .destroy = drm_connector_cleanup, .force = dw_hdmi_connector_force, .reset = drm_atomic_helper_connector_reset, + .set_property = dw_hdmi_connector_set_property, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_set_property = dw_hdmi_atomic_connector_set_property, + .atomic_get_property = dw_hdmi_atomic_connector_get_property, }; static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = { .get_modes = dw_hdmi_connector_get_modes, + .best_encoder = dw_hdmi_connector_best_encoder, .atomic_check = dw_hdmi_connector_atomic_check, }; +static void dw_hdmi_attach_properties(struct dw_hdmi *hdmi) +{ + unsigned int color = MEDIA_BUS_FMT_RGB888_1X24; + int video_mapping, colorspace; + enum drm_connector_status connect_status = + hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data); + const struct dw_hdmi_property_ops *ops = + hdmi->plat_data->property_ops; + + if (connect_status == connector_status_connected) { + video_mapping = (hdmi_readb(hdmi, HDMI_TX_INVID0) & + HDMI_TX_INVID0_VIDEO_MAPPING_MASK); + colorspace = (hdmi_readb(hdmi, HDMI_FC_AVICONF0) & + HDMI_FC_AVICONF0_PIX_FMT_MASK); + switch (video_mapping) { + case 0x01: + color = MEDIA_BUS_FMT_RGB888_1X24; + break; + case 0x03: + color = MEDIA_BUS_FMT_RGB101010_1X30; + break; + case 0x09: + if (colorspace == HDMI_COLORSPACE_YUV420) + color = MEDIA_BUS_FMT_UYYVYY8_0_5X24; + else if (colorspace == HDMI_COLORSPACE_YUV422) + color = MEDIA_BUS_FMT_UYVY8_1X16; + else + color = MEDIA_BUS_FMT_YUV8_1X24; + break; + case 0x0b: + if (colorspace == HDMI_COLORSPACE_YUV420) + color = MEDIA_BUS_FMT_UYYVYY10_0_5X30; + else if (colorspace == HDMI_COLORSPACE_YUV422) + color = MEDIA_BUS_FMT_UYVY10_1X20; + else + color = MEDIA_BUS_FMT_YUV10_1X30; + break; + case 0x14: + color = MEDIA_BUS_FMT_UYVY10_1X20; + break; + case 0x16: + color = MEDIA_BUS_FMT_UYVY8_1X16; + break; + default: + color = MEDIA_BUS_FMT_RGB888_1X24; + dev_err(hdmi->dev, "unexpected mapping: 0x%x\n", + video_mapping); + } + + hdmi->hdmi_data.enc_in_bus_format = color; + hdmi->hdmi_data.enc_out_bus_format = color; + /* + * input format will be set as yuv444 when output + * format is yuv420 + */ + if (color == MEDIA_BUS_FMT_UYVY10_1X20) + hdmi->hdmi_data.enc_in_bus_format = + MEDIA_BUS_FMT_YUV10_1X30; + else if (color == MEDIA_BUS_FMT_UYVY8_1X16) + hdmi->hdmi_data.enc_in_bus_format = + MEDIA_BUS_FMT_YUV8_1X24; + } + + if (ops && ops->attach_properties) + return ops->attach_properties(&hdmi->connector, + color, hdmi->version, + hdmi->plat_data->phy_data); +} + +static void dw_hdmi_destroy_properties(struct dw_hdmi *hdmi) +{ + const struct dw_hdmi_property_ops *ops = + hdmi->plat_data->property_ops; + + if (ops && ops->destroy_properties) + return ops->destroy_properties(&hdmi->connector, + hdmi->plat_data->phy_data); +} + static int dw_hdmi_connector_create(struct dw_hdmi *hdmi) { struct drm_connector *connector = &hdmi->connector; @@ -2505,6 +3281,8 @@ static int dw_hdmi_connector_create(struct dw_hdmi *hdmi) drm_connector_attach_encoder(connector, hdmi->bridge.encoder); + dw_hdmi_attach_properties(hdmi); + cec_fill_conn_info_from_drm(&conn_info, connector); notifier = cec_notifier_conn_register(hdmi->dev, NULL, &conn_info); @@ -2780,16 +3558,36 @@ static int dw_hdmi_bridge_atomic_check(struct drm_bridge *bridge, struct drm_connector_state *conn_state) { struct dw_hdmi *hdmi = bridge->driver_private; + void *data = hdmi->plat_data->phy_data; - hdmi->hdmi_data.enc_out_bus_format = - bridge_state->output_bus_cfg.format; + if (bridge_state->output_bus_cfg.format == MEDIA_BUS_FMT_FIXED) { + if (hdmi->plat_data->get_output_bus_format) + hdmi->hdmi_data.enc_out_bus_format = + hdmi->plat_data->get_output_bus_format(data); + else + hdmi->hdmi_data.enc_out_bus_format = + MEDIA_BUS_FMT_RGB888_1X24; + + if (hdmi->plat_data->get_input_bus_format) + hdmi->hdmi_data.enc_in_bus_format = + hdmi->plat_data->get_input_bus_format(data); + else if (hdmi->plat_data->input_bus_format) + hdmi->hdmi_data.enc_in_bus_format = + hdmi->plat_data->input_bus_format; + else + hdmi->hdmi_data.enc_in_bus_format = + MEDIA_BUS_FMT_RGB888_1X24; + } else { + hdmi->hdmi_data.enc_out_bus_format = + bridge_state->output_bus_cfg.format; - hdmi->hdmi_data.enc_in_bus_format = - bridge_state->input_bus_cfg.format; + hdmi->hdmi_data.enc_in_bus_format = + bridge_state->input_bus_cfg.format; - dev_dbg(hdmi->dev, "input format 0x%04x, output format 0x%04x\n", - bridge_state->input_bus_cfg.format, - bridge_state->output_bus_cfg.format); + dev_dbg(hdmi->dev, "input format 0x%04x, output format 0x%04x\n", + bridge_state->input_bus_cfg.format, + bridge_state->output_bus_cfg.format); + } return 0; } @@ -2798,10 +3596,22 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct dw_hdmi *hdmi = bridge->driver_private; + int ret; if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) return 0; + if (hdmi->next_bridge) { + hdmi->next_bridge->encoder = bridge->encoder; + ret = drm_bridge_attach(bridge->encoder, hdmi->next_bridge, bridge, flags); + if (ret) { + DRM_ERROR("Failed to attach bridge with dw-hdmi\n"); + return ret; + } + + return 0; + } + return dw_hdmi_connector_create(hdmi); } @@ -2821,17 +3631,16 @@ dw_hdmi_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_mode *mode) { struct dw_hdmi *hdmi = bridge->driver_private; + struct drm_connector *connector = &hdmi->connector; const struct dw_hdmi_plat_data *pdata = hdmi->plat_data; enum drm_mode_status mode_status = MODE_OK; - /* We don't support double-clocked modes */ - if (mode->flags & DRM_MODE_FLAG_DBLCLK) - return MODE_BAD; + if (hdmi->next_bridge) + return MODE_OK; if (pdata->mode_valid) - mode_status = pdata->mode_valid(hdmi, pdata->priv_data, info, - mode); - + mode_status = pdata->mode_valid(connector, pdata->priv_data, + info, mode); return mode_status; } @@ -2912,6 +3721,12 @@ static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = { .get_edid = dw_hdmi_bridge_get_edid, }; +void dw_hdmi_set_cec_adap(struct dw_hdmi *hdmi, struct cec_adapter *adap) +{ + hdmi->cec_adap = adap; +} +EXPORT_SYMBOL_GPL(dw_hdmi_set_cec_adap); + /* ----------------------------------------------------------------------------- * IRQ Handling */ @@ -2937,7 +3752,7 @@ static irqreturn_t dw_hdmi_i2c_irq(struct dw_hdmi *hdmi) static irqreturn_t dw_hdmi_hardirq(int irq, void *dev_id) { struct dw_hdmi *hdmi = dev_id; - u8 intr_stat; + u8 intr_stat, hdcp_stat; irqreturn_t ret = IRQ_NONE; if (hdmi->i2c) @@ -2949,6 +3764,13 @@ static irqreturn_t dw_hdmi_hardirq(int irq, void *dev_id) return IRQ_WAKE_THREAD; } + hdcp_stat = hdmi_readb(hdmi, HDMI_A_APIINTSTAT); + if (hdcp_stat) { + dev_dbg(hdmi->dev, "HDCP irq %#x\n", hdcp_stat); + hdmi_writeb(hdmi, 0xff, HDMI_A_APIINTMSK); + return IRQ_WAKE_THREAD; + } + return ret; } @@ -2956,7 +3778,7 @@ void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense) { mutex_lock(&hdmi->mutex); - if (!hdmi->force) { + if (!hdmi->force && !hdmi->force_logo) { /* * If the RX sense status indicates we're disconnected, * clear the software rxsense status. @@ -2983,8 +3805,7 @@ EXPORT_SYMBOL_GPL(dw_hdmi_setup_rx_sense); static irqreturn_t dw_hdmi_irq(int irq, void *dev_id) { struct dw_hdmi *hdmi = dev_id; - u8 intr_stat, phy_int_pol, phy_pol_mask, phy_stat; - enum drm_connector_status status = connector_status_unknown; + u8 intr_stat, phy_int_pol, phy_pol_mask, phy_stat, hdcp_stat; intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0); phy_int_pol = hdmi_readb(hdmi, HDMI_PHY_POL0); @@ -3023,29 +3844,23 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id) cec_notifier_phys_addr_invalidate(hdmi->cec_notifier); mutex_unlock(&hdmi->cec_notifier_mutex); } - - if (phy_stat & HDMI_PHY_HPD) - status = connector_status_connected; - - if (!(phy_stat & (HDMI_PHY_HPD | HDMI_PHY_RX_SENSE))) - status = connector_status_disconnected; } - if (status != connector_status_unknown) { - dev_dbg(hdmi->dev, "EVENT=%s\n", - status == connector_status_connected ? - "plugin" : "plugout"); - - if (hdmi->bridge.dev) { - drm_helper_hpd_irq_event(hdmi->bridge.dev); - drm_bridge_hpd_notify(&hdmi->bridge, status); - } - } + check_hdmi_irq(hdmi, intr_stat, phy_int_pol); hdmi_writeb(hdmi, intr_stat, HDMI_IH_PHY_STAT0); - hdmi_writeb(hdmi, ~(HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE), - HDMI_IH_MUTE_PHY_STAT0); - + if (!hdmi->next_bridge) + hdmi_writeb(hdmi, ~(HDMI_IH_PHY_STAT0_HPD | + HDMI_IH_PHY_STAT0_RX_SENSE), + HDMI_IH_MUTE_PHY_STAT0); + + hdcp_stat = hdmi_readb(hdmi, HDMI_A_APIINTSTAT); + if (hdcp_stat) { + if (hdmi->hdcp) + hdmi->hdcp->hdcp_isr(hdmi->hdcp, hdcp_stat); + hdmi_writeb(hdmi, hdcp_stat, HDMI_A_APIINTCLR); + hdmi_writeb(hdmi, 0x00, HDMI_A_APIINTMSK); + } return IRQ_HANDLED; } @@ -3179,12 +3994,363 @@ static void dw_hdmi_init_hw(struct dw_hdmi *hdmi) * Even if we are using a separate i2c adapter doing this doesn't * hurt. */ - dw_hdmi_i2c_init(hdmi); + if (hdmi->i2c) + dw_hdmi_i2c_init(hdmi); if (hdmi->phy.ops->setup_hpd) hdmi->phy.ops->setup_hpd(hdmi, hdmi->phy.data); } +static int dw_hdmi_status_show(struct seq_file *s, void *v) +{ + struct dw_hdmi *hdmi = s->private; + u32 val; + + seq_puts(s, "PHY: "); + if (!hdmi->phy.enabled) { + seq_puts(s, "disabled\n"); + return 0; + } + seq_puts(s, "enabled\t\t\tMode: "); + if (hdmi->sink_is_hdmi) + seq_puts(s, "HDMI\n"); + else + seq_puts(s, "DVI\n"); + if (hdmi->hdmi_data.video_mode.mtmdsclock > 340000000) + val = hdmi->hdmi_data.video_mode.mtmdsclock / 4; + else + val = hdmi->hdmi_data.video_mode.mtmdsclock; + seq_printf(s, "Pixel Clk: %uHz\t\tTMDS Clk: %uHz\n", + hdmi->hdmi_data.video_mode.mpixelclock, val); + seq_puts(s, "Color Format: "); + if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) + seq_puts(s, "RGB"); + else if (hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format)) + seq_puts(s, "YUV444"); + else if (hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format)) + seq_puts(s, "YUV422"); + else if (hdmi_bus_fmt_is_yuv420(hdmi->hdmi_data.enc_out_bus_format)) + seq_puts(s, "YUV420"); + else + seq_puts(s, "UNKNOWN"); + val = hdmi_bus_fmt_color_depth(hdmi->hdmi_data.enc_out_bus_format); + seq_printf(s, "\t\tColor Depth: %d bit\n", val); + seq_puts(s, "Colorimetry: "); + switch (hdmi->hdmi_data.enc_out_encoding) { + case V4L2_YCBCR_ENC_601: + seq_puts(s, "ITU.BT601"); + break; + case V4L2_YCBCR_ENC_709: + seq_puts(s, "ITU.BT709"); + break; + case V4L2_YCBCR_ENC_BT2020: + seq_puts(s, "ITU.BT2020"); + break; + default: /* Carries no data */ + seq_puts(s, "ITU.BT601"); + break; + } + + seq_puts(s, "\t\tEOTF: "); + + if (hdmi->version < 0x211a) { + seq_puts(s, "Unsupported\n"); + return 0; + } + + val = hdmi_readb(hdmi, HDMI_FC_PACKET_TX_EN); + if (!(val & HDMI_FC_PACKET_TX_EN_DRM_MASK)) { + seq_puts(s, "Off\n"); + return 0; + } + + switch (hdmi_readb(hdmi, HDMI_FC_DRM_PB0)) { + case HDMI_EOTF_TRADITIONAL_GAMMA_SDR: + seq_puts(s, "SDR"); + break; + case HDMI_EOTF_TRADITIONAL_GAMMA_HDR: + seq_puts(s, "HDR"); + break; + case HDMI_EOTF_SMPTE_ST2084: + seq_puts(s, "ST2084"); + break; + case HDMI_EOTF_BT_2100_HLG: + seq_puts(s, "HLG"); + break; + default: + seq_puts(s, "Not Defined\n"); + return 0; + } + + val = hdmi_readb(hdmi, HDMI_FC_DRM_PB3) << 8; + val |= hdmi_readb(hdmi, HDMI_FC_DRM_PB2); + seq_printf(s, "\nx0: %d", val); + val = hdmi_readb(hdmi, HDMI_FC_DRM_PB5) << 8; + val |= hdmi_readb(hdmi, HDMI_FC_DRM_PB4); + seq_printf(s, "\t\t\t\ty0: %d\n", val); + val = hdmi_readb(hdmi, HDMI_FC_DRM_PB7) << 8; + val |= hdmi_readb(hdmi, HDMI_FC_DRM_PB6); + seq_printf(s, "x1: %d", val); + val = hdmi_readb(hdmi, HDMI_FC_DRM_PB9) << 8; + val |= hdmi_readb(hdmi, HDMI_FC_DRM_PB8); + seq_printf(s, "\t\t\t\ty1: %d\n", val); + val = hdmi_readb(hdmi, HDMI_FC_DRM_PB11) << 8; + val |= hdmi_readb(hdmi, HDMI_FC_DRM_PB10); + seq_printf(s, "x2: %d", val); + val = hdmi_readb(hdmi, HDMI_FC_DRM_PB13) << 8; + val |= hdmi_readb(hdmi, HDMI_FC_DRM_PB12); + seq_printf(s, "\t\t\t\ty2: %d\n", val); + val = hdmi_readb(hdmi, HDMI_FC_DRM_PB15) << 8; + val |= hdmi_readb(hdmi, HDMI_FC_DRM_PB14); + seq_printf(s, "white x: %d", val); + val = hdmi_readb(hdmi, HDMI_FC_DRM_PB17) << 8; + val |= hdmi_readb(hdmi, HDMI_FC_DRM_PB16); + seq_printf(s, "\t\t\twhite y: %d\n", val); + val = hdmi_readb(hdmi, HDMI_FC_DRM_PB19) << 8; + val |= hdmi_readb(hdmi, HDMI_FC_DRM_PB18); + seq_printf(s, "max lum: %d", val); + val = hdmi_readb(hdmi, HDMI_FC_DRM_PB21) << 8; + val |= hdmi_readb(hdmi, HDMI_FC_DRM_PB20); + seq_printf(s, "\t\t\tmin lum: %d\n", val); + val = hdmi_readb(hdmi, HDMI_FC_DRM_PB23) << 8; + val |= hdmi_readb(hdmi, HDMI_FC_DRM_PB22); + seq_printf(s, "max cll: %d", val); + val = hdmi_readb(hdmi, HDMI_FC_DRM_PB25) << 8; + val |= hdmi_readb(hdmi, HDMI_FC_DRM_PB24); + seq_printf(s, "\t\t\tmax fall: %d\n", val); + return 0; +} + +static int dw_hdmi_status_open(struct inode *inode, struct file *file) +{ + return single_open(file, dw_hdmi_status_show, inode->i_private); +} + +static const struct file_operations dw_hdmi_status_fops = { + .owner = THIS_MODULE, + .open = dw_hdmi_status_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +#include +#include +#include + +struct dw_hdmi_reg_table { + int reg_base; + int reg_end; +}; + +static const struct dw_hdmi_reg_table hdmi_reg_table[] = { + {HDMI_DESIGN_ID, HDMI_CONFIG3_ID}, + {HDMI_IH_FC_STAT0, HDMI_IH_MUTE}, + {HDMI_TX_INVID0, HDMI_TX_BCBDATA1}, + {HDMI_VP_STATUS, HDMI_VP_POL}, + {HDMI_FC_INVIDCONF, HDMI_FC_DBGTMDS2}, + {HDMI_PHY_CONF0, HDMI_PHY_POL0}, + {HDMI_PHY_I2CM_SLAVE_ADDR, HDMI_PHY_I2CM_FS_SCL_LCNT_0_ADDR}, + {HDMI_AUD_CONF0, 0x3624}, + {HDMI_MC_SFRDIV, HDMI_MC_HEACPHY_RST}, + {HDMI_CSC_CFG, HDMI_CSC_COEF_C4_LSB}, + {HDMI_A_HDCPCFG0, 0x52bb}, + {0x7800, 0x7818}, + {0x7900, 0x790e}, + {HDMI_CEC_CTRL, HDMI_CEC_WKUPCTRL}, + {HDMI_I2CM_SLAVE, 0x7e31}, +}; + +static int dw_hdmi_ctrl_show(struct seq_file *s, void *v) +{ + struct dw_hdmi *hdmi = s->private; + u32 i = 0, j = 0, val = 0; + + seq_puts(s, "\n>>>hdmi_ctl reg "); + for (i = 0; i < 16; i++) + seq_printf(s, " %2x", i); + seq_puts(s, "\n---------------------------------------------------"); + + for (i = 0; i < ARRAY_SIZE(hdmi_reg_table); i++) { + for (j = hdmi_reg_table[i].reg_base; + j <= hdmi_reg_table[i].reg_end; j++) { + val = hdmi_readb(hdmi, j); + if ((j - hdmi_reg_table[i].reg_base) % 16 == 0) + seq_printf(s, "\n>>>hdmi_ctl %04x:", j); + seq_printf(s, " %02x", val); + } + } + seq_puts(s, "\n---------------------------------------------------\n"); + + return 0; +} + +static int dw_hdmi_ctrl_open(struct inode *inode, struct file *file) +{ + return single_open(file, dw_hdmi_ctrl_show, inode->i_private); +} + +static ssize_t +dw_hdmi_ctrl_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct dw_hdmi *hdmi = + ((struct seq_file *)file->private_data)->private; + u32 reg, val; + char kbuf[25]; + + if (copy_from_user(kbuf, buf, count)) + return -EFAULT; + if (sscanf(kbuf, "%x%x", ®, &val) == -1) + return -EFAULT; + if (reg > HDMI_I2CM_FS_SCL_LCNT_0_ADDR) { + dev_err(hdmi->dev, "it is no a hdmi register\n"); + return count; + } + dev_info(hdmi->dev, "/**********hdmi register config******/"); + dev_info(hdmi->dev, "\n reg=%x val=%x\n", reg, val); + hdmi_writeb(hdmi, val, reg); + return count; +} + +static const struct file_operations dw_hdmi_ctrl_fops = { + .owner = THIS_MODULE, + .open = dw_hdmi_ctrl_open, + .read = seq_read, + .write = dw_hdmi_ctrl_write, + .llseek = seq_lseek, + .release = single_release, +}; + +static int dw_hdmi_phy_show(struct seq_file *s, void *v) +{ + struct dw_hdmi *hdmi = s->private; + u32 i; + + seq_puts(s, "\n>>>hdmi_phy reg "); + for (i = 0; i < 0x28; i++) + seq_printf(s, "regs %02x val %04x\n", + i, hdmi_phy_i2c_read(hdmi, i)); + return 0; +} + +static int dw_hdmi_phy_open(struct inode *inode, struct file *file) +{ + return single_open(file, dw_hdmi_phy_show, inode->i_private); +} + +static ssize_t +dw_hdmi_phy_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct dw_hdmi *hdmi = + ((struct seq_file *)file->private_data)->private; + u32 reg, val; + char kbuf[25]; + + if (copy_from_user(kbuf, buf, count)) + return -EFAULT; + if (sscanf(kbuf, "%x%x", ®, &val) == -1) + return -EFAULT; + if (reg > 0x28) { + dev_err(hdmi->dev, "it is not a hdmi phy register\n"); + return count; + } + dev_info(hdmi->dev, "/*******hdmi phy register config******/"); + dev_info(hdmi->dev, "\n reg=%x val=%x\n", reg, val); + dw_hdmi_phy_i2c_write(hdmi, val, reg); + return count; +} + +static const struct file_operations dw_hdmi_phy_fops = { + .owner = THIS_MODULE, + .open = dw_hdmi_phy_open, + .read = seq_read, + .write = dw_hdmi_phy_write, + .llseek = seq_lseek, + .release = single_release, +}; + +static void dw_hdmi_register_debugfs(struct device *dev, struct dw_hdmi *hdmi) +{ + hdmi->debugfs_dir = debugfs_create_dir("dw-hdmi", NULL); + if (IS_ERR(hdmi->debugfs_dir)) { + dev_err(dev, "failed to create debugfs dir!\n"); + return; + } + debugfs_create_file("status", 0400, hdmi->debugfs_dir, + hdmi, &dw_hdmi_status_fops); + debugfs_create_file("ctrl", 0400, hdmi->debugfs_dir, + hdmi, &dw_hdmi_ctrl_fops); + debugfs_create_file("phy", 0400, hdmi->debugfs_dir, + hdmi, &dw_hdmi_phy_fops); +} + +static void dw_hdmi_register_hdcp(struct device *dev, struct dw_hdmi *hdmi, + u32 val, bool hdcp1x_enable) +{ + struct dw_hdcp hdmi_hdcp = { + .hdmi = hdmi, + .write = hdmi_writeb, + .read = hdmi_readb, + .regs = hdmi->regs, + .reg_io_width = val, + .enable = hdcp1x_enable, + }; + struct platform_device_info hdcp_device_info = { + .parent = dev, + .id = PLATFORM_DEVID_AUTO, + .res = NULL, + .num_res = 0, + .name = DW_HDCP_DRIVER_NAME, + .data = &hdmi_hdcp, + .size_data = sizeof(hdmi_hdcp), + .dma_mask = DMA_BIT_MASK(32), + }; + + hdmi->hdcp_dev = platform_device_register_full(&hdcp_device_info); + if (IS_ERR(hdmi->hdcp_dev)) + dev_err(dev, "failed to register hdcp!\n"); + else + hdmi->hdcp = hdmi->hdcp_dev->dev.platform_data; +} + +static int get_force_logo_property(struct dw_hdmi *hdmi) +{ + struct device_node *dss; + struct device_node *route; + struct device_node *route_hdmi; + + dss = of_find_node_by_name(NULL, "display-subsystem"); + if (!dss) { + dev_err(hdmi->dev, "can't find display-subsystem\n"); + return -ENODEV; + } + + route = of_find_node_by_name(dss, "route"); + if (!route) { + dev_err(hdmi->dev, "can't find route\n"); + of_node_put(dss); + return -ENODEV; + } + of_node_put(dss); + + route_hdmi = of_find_node_by_name(route, "route-hdmi"); + if (!route_hdmi) { + dev_err(hdmi->dev, "can't find route-hdmi\n"); + of_node_put(route); + return -ENODEV; + } + of_node_put(route); + + hdmi->force_logo = + of_property_read_bool(route_hdmi, "force-output"); + + of_node_put(route_hdmi); + + return 0; +} + /* ----------------------------------------------------------------------------- * Probe/remove API, used from platforms based on the DRM bridge API. */ @@ -3193,6 +4359,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; + struct device_node *endpoint; struct platform_device_info pdevinfo; struct device_node *ddc_node; struct dw_hdmi_cec_data cec; @@ -3205,11 +4372,13 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, u8 prod_id1; u8 config0; u8 config3; + bool hdcp1x_enable = 0; hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL); if (!hdmi) return ERR_PTR(-ENOMEM); + hdmi->connector.stereo_allowed = 1; hdmi->plat_data = plat_data; hdmi->dev = dev; hdmi->sample_rate = 48000; @@ -3340,7 +4509,24 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, prod_id1 & HDMI_PRODUCT_ID1_HDCP ? "with" : "without", hdmi->phy.name); - dw_hdmi_init_hw(hdmi); + ret = get_force_logo_property(hdmi); + if (ret) + goto err_iahb; + + hdmi->initialized = false; + ret = hdmi_readb(hdmi, HDMI_PHY_STAT0); + if (((ret & HDMI_PHY_TX_PHY_LOCK) && (ret & HDMI_PHY_HPD) && + hdmi_readb(hdmi, HDMI_FC_EXCTRLDUR)) || hdmi->force_logo) { + hdmi->mc_clkdis = hdmi_readb(hdmi, HDMI_MC_CLKDIS); + hdmi->disabled = false; + hdmi->bridge_is_on = true; + hdmi->phy.enabled = true; + hdmi->initialized = true; + } else if (ret & HDMI_PHY_TX_PHY_LOCK) { + hdmi->phy.ops->disable(hdmi, hdmi->phy.data); + } + + init_hpd_work(hdmi); irq = platform_get_irq(pdev, 0); if (irq < 0) { @@ -3348,6 +4534,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, goto err_iahb; } + hdmi->irq = irq; ret = devm_request_threaded_irq(dev, irq, dw_hdmi_hardirq, dw_hdmi_irq, IRQF_SHARED, dev_name(dev), hdmi); @@ -3383,8 +4570,20 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, hdmi->ddc = dw_hdmi_i2c_adapter(hdmi); if (IS_ERR(hdmi->ddc)) hdmi->ddc = NULL; + /* + * Read high and low time from device tree. If not available use + * the default timing scl clock rate is about 99.6KHz. + */ + if (of_property_read_u32(np, "ddc-i2c-scl-high-time-ns", + &hdmi->i2c->scl_high_ns)) + hdmi->i2c->scl_high_ns = 4708; + if (of_property_read_u32(np, "ddc-i2c-scl-low-time-ns", + &hdmi->i2c->scl_low_ns)) + hdmi->i2c->scl_low_ns = 4916; } + dw_hdmi_init_hw(hdmi); + hdmi->bridge.driver_private = hdmi; hdmi->bridge.funcs = &dw_hdmi_bridge_funcs; hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID @@ -3393,6 +4592,30 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, hdmi->bridge.of_node = pdev->dev.of_node; #endif + endpoint = of_graph_get_endpoint_by_regs(hdmi->dev->of_node, 1, -1); + if (endpoint && of_device_is_available(endpoint)) { + struct device_node *remote; + + remote = of_graph_get_remote_port_parent(endpoint); + of_node_put(endpoint); + if (!remote || !of_device_is_available(remote)) { + of_node_put(remote); + ret = -ENODEV; + goto err_iahb; + } + + hdmi->next_bridge = of_drm_find_bridge(remote); + of_node_put(remote); + if (!hdmi->next_bridge) { + dev_err(hdmi->dev, "can't find next bridge\n"); + ret = -EPROBE_DEFER; + goto err_iahb; + } + + hdmi->sink_is_hdmi = true; + hdmi->sink_has_audio = true; + } + memset(&pdevinfo, 0, sizeof(pdevinfo)); pdevinfo.parent = dev; pdevinfo.id = PLATFORM_DEVID_AUTO; @@ -3407,7 +4630,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, audio.base = hdmi->regs; audio.irq = irq; audio.hdmi = hdmi; - audio.get_eld = hdmi_audio_get_eld; + audio.eld = hdmi->connector.eld; hdmi->enable_audio = dw_hdmi_ahb_audio_enable; hdmi->disable_audio = dw_hdmi_ahb_audio_disable; @@ -3420,7 +4643,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, struct dw_hdmi_i2s_audio_data audio; audio.hdmi = hdmi; - audio.get_eld = hdmi_audio_get_eld; + audio.eld = hdmi->connector.eld; audio.write = hdmi_writeb; audio.read = hdmi_readb; hdmi->enable_audio = dw_hdmi_i2s_audio_enable; @@ -3446,8 +4669,40 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, hdmi->cec = platform_device_register_full(&pdevinfo); } + hdmi->extcon = devm_extcon_dev_allocate(hdmi->dev, dw_hdmi_cable); + if (IS_ERR(hdmi->extcon)) { + ret = PTR_ERR(hdmi->extcon); + dev_err(hdmi->dev, "allocate extcon failed: %d\n", ret); + goto err_iahb; + } + + ret = devm_extcon_dev_register(hdmi->dev, hdmi->extcon); + if (ret) { + dev_err(hdmi->dev, "failed to register extcon: %d\n", + ret); + goto err_iahb; + } + + ret = extcon_set_property_capability(hdmi->extcon, EXTCON_DISP_HDMI, + EXTCON_PROP_DISP_HPD); + if (ret) { + dev_err(hdmi->dev, + "failed to set USB property capability: %d\n", + ret); + goto err_iahb; + } + drm_bridge_add(&hdmi->bridge); + dw_hdmi_register_debugfs(dev, hdmi); + + if (of_property_read_bool(np, "scramble-low-rates")) + hdmi->scramble_low_rates = true; + + if (of_property_read_bool(np, "hdcp1x-enable")) + hdcp1x_enable = 1; + dw_hdmi_register_hdcp(dev, hdmi, val, hdcp1x_enable); + return hdmi; err_iahb: @@ -3457,7 +4712,10 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, err_isfr: clk_disable_unprepare(hdmi->isfr_clk); err_res: - i2c_put_adapter(hdmi->ddc); + if (hdmi->i2c) + i2c_del_adapter(&hdmi->i2c->adap); + else + i2c_put_adapter(hdmi->ddc); return ERR_PTR(ret); } @@ -3465,16 +4723,35 @@ EXPORT_SYMBOL_GPL(dw_hdmi_probe); void dw_hdmi_remove(struct dw_hdmi *hdmi) { + if (hdmi->irq) + disable_irq(hdmi->irq); + + cancel_delayed_work(&hdmi->work); + flush_workqueue(hdmi->workqueue); + destroy_workqueue(hdmi->workqueue); + + debugfs_remove_recursive(hdmi->debugfs_dir); + drm_bridge_remove(&hdmi->bridge); if (hdmi->audio && !IS_ERR(hdmi->audio)) platform_device_unregister(hdmi->audio); + if (hdmi->hdcp_dev && !IS_ERR(hdmi->hdcp_dev)) + platform_device_unregister(hdmi->hdcp_dev); if (!IS_ERR(hdmi->cec)) platform_device_unregister(hdmi->cec); /* Disable all interrupts */ hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0); + if (!hdmi->next_bridge) { + dw_hdmi_destroy_properties(hdmi); + hdmi->connector.funcs->destroy(&hdmi->connector); + } + + if (hdmi->bridge.encoder) + hdmi->bridge.encoder->funcs->destroy(hdmi->bridge.encoder); + clk_disable_unprepare(hdmi->iahb_clk); clk_disable_unprepare(hdmi->isfr_clk); if (hdmi->cec_clk) @@ -3492,7 +4769,7 @@ EXPORT_SYMBOL_GPL(dw_hdmi_remove); */ struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev, struct drm_encoder *encoder, - const struct dw_hdmi_plat_data *plat_data) + struct dw_hdmi_plat_data *plat_data) { struct dw_hdmi *hdmi; int ret; @@ -3508,6 +4785,9 @@ struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev, return ERR_PTR(ret); } + if (!hdmi->next_bridge) + plat_data->connector = &hdmi->connector; + return hdmi; } EXPORT_SYMBOL_GPL(dw_hdmi_bind); @@ -3518,9 +4798,87 @@ void dw_hdmi_unbind(struct dw_hdmi *hdmi) } EXPORT_SYMBOL_GPL(dw_hdmi_unbind); +static void dw_hdmi_reg_initial(struct dw_hdmi *hdmi) +{ + if (hdmi_readb(hdmi, HDMI_IH_MUTE)) { + initialize_hdmi_ih_mutes(hdmi); + /* unmute cec irq */ + hdmi_writeb(hdmi, 0x68, HDMI_IH_MUTE_CEC_STAT0); + + hdmi_writeb(hdmi, HDMI_PHY_I2CM_INT_ADDR_DONE_POL, + HDMI_PHY_I2CM_INT_ADDR); + + hdmi_writeb(hdmi, HDMI_PHY_I2CM_CTLINT_ADDR_NAC_POL | + HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_POL, + HDMI_PHY_I2CM_CTLINT_ADDR); + + if (!hdmi->next_bridge) { + hdmi_writeb(hdmi, HDMI_PHY_HPD | HDMI_PHY_RX_SENSE, + HDMI_PHY_POL0); + hdmi_writeb(hdmi, hdmi->phy_mask, HDMI_PHY_MASK0); + hdmi_writeb(hdmi, ~(HDMI_IH_PHY_STAT0_HPD | + HDMI_IH_PHY_STAT0_RX_SENSE), + HDMI_IH_MUTE_PHY_STAT0); + } + } +} + +void dw_hdmi_suspend(struct dw_hdmi *hdmi) +{ + if (!hdmi) + return; + + mutex_lock(&hdmi->mutex); + + /* + * When system shutdown, hdmi should be disabled. + * When system suspend, dw_hdmi_bridge_disable will disable hdmi first. + * To prevent duplicate operation, we should determine whether hdmi + * has been disabled. + */ + if (!hdmi->disabled) { + hdmi->disabled = true; + dw_hdmi_update_power(hdmi); + dw_hdmi_update_phy_mask(hdmi); + } + mutex_unlock(&hdmi->mutex); + + if (hdmi->irq) + disable_irq(hdmi->irq); + cancel_delayed_work(&hdmi->work); + flush_workqueue(hdmi->workqueue); + pinctrl_pm_select_sleep_state(hdmi->dev); +} +EXPORT_SYMBOL_GPL(dw_hdmi_suspend); + void dw_hdmi_resume(struct dw_hdmi *hdmi) { - dw_hdmi_init_hw(hdmi); + if (!hdmi) + return; + + pinctrl_pm_select_default_state(hdmi->dev); + mutex_lock(&hdmi->mutex); + dw_hdmi_reg_initial(hdmi); + if (hdmi->i2c) + dw_hdmi_i2c_init(hdmi); + if (hdmi->irq) + enable_irq(hdmi->irq); + /* + * HDMI status maybe incorrect in the following condition: + * HDMI plug in -> system sleep -> HDMI plug out -> system wake up. + * At this time, cat /sys/class/drm/card 0-HDMI-A-1/status is connected. + * There is no hpd interrupt, because HDMI is powerdown during suspend. + * So we need check the current HDMI status in this case. + */ + if (hdmi->connector.status == connector_status_connected) { + if (hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data) == + connector_status_disconnected) { + hdmi->hpd_state = false; + mod_delayed_work(hdmi->workqueue, &hdmi->work, + msecs_to_jiffies(20)); + } + } + mutex_unlock(&hdmi->mutex); } EXPORT_SYMBOL_GPL(dw_hdmi_resume); diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h index 1999db05b..509732800 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h @@ -509,6 +509,51 @@ #define HDMI_A_PRESETUP 0x501A #define HDMI_A_SRM_BASE 0x5020 +/* CEC Engine Registers */ +#define HDMI_CEC_CTRL 0x7D00 +#define HDMI_CEC_STAT 0x7D01 +#define HDMI_CEC_MASK 0x7D02 +#define HDMI_CEC_POLARITY 0x7D03 +#define HDMI_CEC_INT 0x7D04 +#define HDMI_CEC_ADDR_L 0x7D05 +#define HDMI_CEC_ADDR_H 0x7D06 +#define HDMI_CEC_TX_CNT 0x7D07 +#define HDMI_CEC_RX_CNT 0x7D08 +#define HDMI_CEC_TX_DATA0 0x7D10 +#define HDMI_CEC_TX_DATA1 0x7D11 +#define HDMI_CEC_TX_DATA2 0x7D12 +#define HDMI_CEC_TX_DATA3 0x7D13 +#define HDMI_CEC_TX_DATA4 0x7D14 +#define HDMI_CEC_TX_DATA5 0x7D15 +#define HDMI_CEC_TX_DATA6 0x7D16 +#define HDMI_CEC_TX_DATA7 0x7D17 +#define HDMI_CEC_TX_DATA8 0x7D18 +#define HDMI_CEC_TX_DATA9 0x7D19 +#define HDMI_CEC_TX_DATA10 0x7D1a +#define HDMI_CEC_TX_DATA11 0x7D1b +#define HDMI_CEC_TX_DATA12 0x7D1c +#define HDMI_CEC_TX_DATA13 0x7D1d +#define HDMI_CEC_TX_DATA14 0x7D1e +#define HDMI_CEC_TX_DATA15 0x7D1f +#define HDMI_CEC_RX_DATA0 0x7D20 +#define HDMI_CEC_RX_DATA1 0x7D21 +#define HDMI_CEC_RX_DATA2 0x7D22 +#define HDMI_CEC_RX_DATA3 0x7D23 +#define HDMI_CEC_RX_DATA4 0x7D24 +#define HDMI_CEC_RX_DATA5 0x7D25 +#define HDMI_CEC_RX_DATA6 0x7D26 +#define HDMI_CEC_RX_DATA7 0x7D27 +#define HDMI_CEC_RX_DATA8 0x7D28 +#define HDMI_CEC_RX_DATA9 0x7D29 +#define HDMI_CEC_RX_DATA10 0x7D2a +#define HDMI_CEC_RX_DATA11 0x7D2b +#define HDMI_CEC_RX_DATA12 0x7D2c +#define HDMI_CEC_RX_DATA13 0x7D2d +#define HDMI_CEC_RX_DATA14 0x7D2e +#define HDMI_CEC_RX_DATA15 0x7D2f +#define HDMI_CEC_LOCK 0x7D30 +#define HDMI_CEC_WKUPCTRL 0x7D31 + /* I2C Master Registers (E-DDC) */ #define HDMI_I2CM_SLAVE 0x7E00 #define HDMI_I2CM_ADDRESS 0x7E01 @@ -529,6 +574,7 @@ #define HDMI_I2CM_FS_SCL_HCNT_0_ADDR 0x7E10 #define HDMI_I2CM_FS_SCL_LCNT_1_ADDR 0x7E11 #define HDMI_I2CM_FS_SCL_LCNT_0_ADDR 0x7E12 +#define HDMI_I2CM_SDA_HOLD 0x7E13 enum { /* PRODUCT_ID0 field values */ @@ -842,6 +888,10 @@ enum { HDMI_FC_AVICONF3_QUANT_RANGE_LIMITED = 0x00, HDMI_FC_AVICONF3_QUANT_RANGE_FULL = 0x04, +/* HDMI_FC_GCP */ + HDMI_FC_GCP_SET_AVMUTE = 0x2, + HDMI_FC_GCP_CLEAR_AVMUTE = 0x1, + /* FC_DBGFORCE field values */ HDMI_FC_DBGFORCE_FORCEAUDIO = 0x10, HDMI_FC_DBGFORCE_FORCEVIDEO = 0x1, @@ -1085,6 +1135,11 @@ enum { HDMI_I2CM_CTLINT_NAC_MASK = 0x40, HDMI_I2CM_CTLINT_ARB_POL = 0x8, HDMI_I2CM_CTLINT_ARB_MASK = 0x4, + +/* I2CM_DIV field values */ + HDMI_I2CM_DIV_FAST_STD_MODE = 0x8, + HDMI_I2CM_DIV_FAST_MODE = 0x8, + HDMI_I2CM_DIV_STD_MODE = 0, }; /* diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c index 376fa6eb4..163dcc03b 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c @@ -244,7 +244,7 @@ struct dw_mipi_dsi { struct device *dev; void __iomem *base; - struct clk *pclk; + struct reset_control *apb_rst; unsigned int lane_mbps; /* per lane */ u32 channel; @@ -316,15 +316,10 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host, const struct dw_mipi_dsi_plat_data *pdata = dsi->plat_data; struct drm_bridge *bridge; struct drm_panel *panel; + int max_data_lanes = dsi->plat_data->max_data_lanes; int ret; - if (device->lanes > dsi->plat_data->max_data_lanes) { - dev_err(dsi->dev, "the number of data lanes(%u) is too many\n", - device->lanes); - return -EINVAL; - } - - dsi->lanes = device->lanes; + dsi->lanes = (device->lanes > max_data_lanes) ? device->lanes / 2 : device->lanes; dsi->channel = device->channel; dsi->format = device->format; dsi->mode_flags = device->mode_flags; @@ -599,8 +594,14 @@ static void dw_mipi_dsi_set_mode(struct dw_mipi_dsi *dsi, static void dw_mipi_dsi_disable(struct dw_mipi_dsi *dsi) { + const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops; + + if (phy_ops->power_off) + phy_ops->power_off(dsi->plat_data->priv_data); + dsi_write(dsi, DSI_PWR_UP, RESET); dsi_write(dsi, DSI_PHY_RSTZ, PHY_RSTZ); + pm_runtime_put(dsi->dev); } static void dw_mipi_dsi_init(struct dw_mipi_dsi *dsi) @@ -715,16 +716,16 @@ static u32 dw_mipi_dsi_get_hcomponent_lbcc(struct dw_mipi_dsi *dsi, const struct drm_display_mode *mode, u32 hcomponent) { - u32 frac, lbcc; + u32 lbcc; lbcc = hcomponent * dsi->lane_mbps * MSEC_PER_SEC / 8; - frac = lbcc % mode->clock; - lbcc = lbcc / mode->clock; - if (frac) - lbcc++; + if (mode->clock == 0) { + DRM_ERROR("dsi mode clock is 0!\n"); + return 0; + } - return lbcc; + return DIV_ROUND_CLOSEST_ULL(lbcc, mode->clock); } static void dw_mipi_dsi_line_timer_config(struct dw_mipi_dsi *dsi, @@ -837,13 +838,13 @@ static void dw_mipi_dsi_dphy_enable(struct dw_mipi_dsi *dsi) ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS, val, val & PHY_LOCK, 1000, PHY_STATUS_TIMEOUT_US); if (ret) - DRM_DEBUG_DRIVER("failed to wait phy lock state\n"); + DRM_ERROR("failed to wait phy lock state\n"); ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS, val, val & PHY_STOP_STATE_CLK_LANE, 1000, PHY_STATUS_TIMEOUT_US); if (ret) - DRM_DEBUG_DRIVER("failed to wait phy clk lane stop state\n"); + DRM_ERROR("failed to wait phy clk lane stop state\n"); } static void dw_mipi_dsi_clear_err(struct dw_mipi_dsi *dsi) @@ -857,7 +858,6 @@ static void dw_mipi_dsi_clear_err(struct dw_mipi_dsi *dsi) static void dw_mipi_dsi_bridge_post_disable(struct drm_bridge *bridge) { struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge); - const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops; /* * Switch to command mode before panel-bridge post_disable & @@ -866,6 +866,8 @@ static void dw_mipi_dsi_bridge_post_disable(struct drm_bridge *bridge) * before by the drm framework. */ dw_mipi_dsi_set_mode(dsi, 0); + if (dsi->slave) + dw_mipi_dsi_set_mode(dsi->slave, 0); /* * TODO Only way found to call panel-bridge post_disable & @@ -876,18 +878,10 @@ static void dw_mipi_dsi_bridge_post_disable(struct drm_bridge *bridge) if (dsi->panel_bridge->funcs->post_disable) dsi->panel_bridge->funcs->post_disable(dsi->panel_bridge); - if (phy_ops->power_off) - phy_ops->power_off(dsi->plat_data->priv_data); - - if (dsi->slave) { + if (dsi->slave) dw_mipi_dsi_disable(dsi->slave); - clk_disable_unprepare(dsi->slave->pclk); - pm_runtime_put(dsi->slave->dev); - } - dw_mipi_dsi_disable(dsi); - clk_disable_unprepare(dsi->pclk); - pm_runtime_put(dsi->dev); + dw_mipi_dsi_disable(dsi); } static unsigned int dw_mipi_dsi_get_lanes(struct dw_mipi_dsi *dsi) @@ -912,7 +906,11 @@ static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi, int ret; u32 lanes = dw_mipi_dsi_get_lanes(dsi); - clk_prepare_enable(dsi->pclk); + if (dsi->apb_rst) { + reset_control_assert(dsi->apb_rst); + usleep_range(10, 20); + reset_control_deassert(dsi->apb_rst); + } ret = phy_ops->get_lane_mbps(priv_data, adjusted_mode, dsi->mode_flags, lanes, dsi->format, &dsi->lane_mbps); @@ -939,15 +937,15 @@ static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi, if (ret) DRM_DEBUG_DRIVER("Phy init() failed\n"); + if (phy_ops->power_on) + phy_ops->power_on(dsi->plat_data->priv_data); + dw_mipi_dsi_dphy_enable(dsi); dw_mipi_dsi_wait_for_two_frames(adjusted_mode); /* Switch to cmd mode for panel-bridge pre_enable & panel prepare */ dw_mipi_dsi_set_mode(dsi, 0); - - if (phy_ops->power_on) - phy_ops->power_on(dsi->plat_data->priv_data); } static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge, @@ -959,16 +957,25 @@ static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge, dw_mipi_dsi_mode_set(dsi, adjusted_mode); if (dsi->slave) dw_mipi_dsi_mode_set(dsi->slave, adjusted_mode); + + DRM_DEV_INFO(dsi->dev, "final DSI-Link bandwidth: %u x %d Mbps\n", + dsi->lane_mbps, dsi->slave ? dsi->lanes * 2 : dsi->lanes); } static void dw_mipi_dsi_bridge_enable(struct drm_bridge *bridge) { struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge); - /* Switch to video mode for panel-bridge enable & panel enable */ - dw_mipi_dsi_set_mode(dsi, MIPI_DSI_MODE_VIDEO); - if (dsi->slave) - dw_mipi_dsi_set_mode(dsi->slave, MIPI_DSI_MODE_VIDEO); + /* Switch to video/cmd mode for panel-bridge enable & panel enable */ + if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) { + dw_mipi_dsi_set_mode(dsi, MIPI_DSI_MODE_VIDEO); + if (dsi->slave) + dw_mipi_dsi_set_mode(dsi->slave, MIPI_DSI_MODE_VIDEO); + } else { + dw_mipi_dsi_set_mode(dsi, 0); + if (dsi->slave) + dw_mipi_dsi_set_mode(dsi->slave, 0); + } } static enum drm_mode_status @@ -1103,7 +1110,6 @@ __dw_mipi_dsi_probe(struct platform_device *pdev, const struct dw_mipi_dsi_plat_data *plat_data) { struct device *dev = &pdev->dev; - struct reset_control *apb_rst; struct dw_mipi_dsi *dsi; int ret; @@ -1129,20 +1135,13 @@ __dw_mipi_dsi_probe(struct platform_device *pdev, dsi->base = plat_data->base; } - dsi->pclk = devm_clk_get(dev, "pclk"); - if (IS_ERR(dsi->pclk)) { - ret = PTR_ERR(dsi->pclk); - dev_err(dev, "Unable to get pclk: %d\n", ret); - return ERR_PTR(ret); - } - /* * Note that the reset was not defined in the initial device tree, so * we have to be prepared for it not being found. */ - apb_rst = devm_reset_control_get_optional_exclusive(dev, "apb"); - if (IS_ERR(apb_rst)) { - ret = PTR_ERR(apb_rst); + dsi->apb_rst = devm_reset_control_get_optional_exclusive(dev, "apb"); + if (IS_ERR(dsi->apb_rst)) { + ret = PTR_ERR(dsi->apb_rst); if (ret != -EPROBE_DEFER) dev_err(dev, "Unable to get reset control: %d\n", ret); @@ -1150,20 +1149,6 @@ __dw_mipi_dsi_probe(struct platform_device *pdev, return ERR_PTR(ret); } - if (apb_rst) { - ret = clk_prepare_enable(dsi->pclk); - if (ret) { - dev_err(dev, "%s: Failed to enable pclk\n", __func__); - return ERR_PTR(ret); - } - - reset_control_assert(apb_rst); - usleep_range(10, 20); - reset_control_deassert(apb_rst); - - clk_disable_unprepare(dsi->pclk); - } - dw_mipi_dsi_debugfs_init(dsi); pm_runtime_enable(dev); @@ -1247,6 +1232,12 @@ void dw_mipi_dsi_unbind(struct dw_mipi_dsi *dsi) } EXPORT_SYMBOL_GPL(dw_mipi_dsi_unbind); +struct drm_connector *dw_mipi_dsi_get_connector(struct dw_mipi_dsi *dsi) +{ + return drm_panel_bridge_connector(dsi->panel_bridge); +} +EXPORT_SYMBOL_GPL(dw_mipi_dsi_get_connector); + MODULE_AUTHOR("Chris Zhong "); MODULE_AUTHOR("Philippe Cornu "); MODULE_DESCRIPTION("DW MIPI DSI host controller driver"); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 7fc8e7000..4108c7265 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -296,12 +296,14 @@ update_connector_routing(struct drm_atomic_state *state, if (old_connector_state->crtc != new_connector_state->crtc) { if (old_connector_state->crtc) { crtc_state = drm_atomic_get_new_crtc_state(state, old_connector_state->crtc); - crtc_state->connectors_changed = true; + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) + crtc_state->connectors_changed = true; } if (new_connector_state->crtc) { crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc); - crtc_state->connectors_changed = true; + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) + crtc_state->connectors_changed = true; } } @@ -386,7 +388,8 @@ update_connector_routing(struct drm_atomic_state *state, set_best_encoder(state, new_connector_state, new_encoder); - crtc_state->connectors_changed = true; + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) + crtc_state->connectors_changed = true; DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n", connector->base.id, @@ -3554,6 +3557,9 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL); replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL); replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob); +#if defined(CONFIG_ROCKCHIP_DRM_CUBIC_LUT) + replaced |= drm_property_replace_blob(&crtc_state->cubic_lut, NULL); +#endif crtc_state->color_mgmt_changed |= replaced; ret = drm_atomic_commit(state); diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c index 9ad740451..c29183d2a 100644 --- a/drivers/gpu/drm/drm_atomic_state_helper.c +++ b/drivers/gpu/drm/drm_atomic_state_helper.c @@ -141,6 +141,10 @@ void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc, drm_property_blob_get(state->ctm); if (state->gamma_lut) drm_property_blob_get(state->gamma_lut); +#if defined(CONFIG_ROCKCHIP_DRM_CUBIC_LUT) + if (state->cubic_lut) + drm_property_blob_get(state->cubic_lut); +#endif state->mode_changed = false; state->active_changed = false; state->planes_changed = false; @@ -213,6 +217,9 @@ void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state) drm_property_blob_put(state->degamma_lut); drm_property_blob_put(state->ctm); drm_property_blob_put(state->gamma_lut); +#if defined(CONFIG_ROCKCHIP_DRM_CUBIC_LUT) + drm_property_blob_put(state->cubic_lut); +#endif } EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state); diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 25c269bc4..975ece7e0 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -459,6 +459,16 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc, &replaced); state->color_mgmt_changed |= replaced; return ret; +#if defined(CONFIG_ROCKCHIP_DRM_CUBIC_LUT) + } else if (property == config->cubic_lut_property) { + ret = drm_atomic_replace_property_blob_from_id(dev, + &state->cubic_lut, + val, + -1, sizeof(struct drm_color_lut), + &replaced); + state->color_mgmt_changed |= replaced; + return ret; +#endif } else if (property == config->prop_out_fence_ptr) { s32 __user *fence_ptr = u64_to_user_ptr(val); @@ -501,6 +511,10 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc, *val = (state->ctm) ? state->ctm->base.id : 0; else if (property == config->gamma_lut_property) *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0; +#if defined(CONFIG_ROCKCHIP_DRM_CUBIC_LUT) + else if (property == config->cubic_lut_property) + *val = (state->cubic_lut) ? state->cubic_lut->base.id : 0; +#endif else if (property == config->prop_out_fence_ptr) *val = 0; else if (crtc->funcs->atomic_get_property) diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c index c7adbeaf1..232abbba3 100644 --- a/drivers/gpu/drm/drm_auth.c +++ b/drivers/gpu/drm/drm_auth.c @@ -135,18 +135,16 @@ static void drm_set_master(struct drm_device *dev, struct drm_file *fpriv, static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv) { struct drm_master *old_master; - struct drm_master *new_master; lockdep_assert_held_once(&dev->master_mutex); WARN_ON(fpriv->is_master); old_master = fpriv->master; - new_master = drm_master_create(dev); - if (!new_master) + fpriv->master = drm_master_create(dev); + if (!fpriv->master) { + fpriv->master = old_master; return -ENOMEM; - spin_lock(&fpriv->master_lookup_lock); - fpriv->master = new_master; - spin_unlock(&fpriv->master_lookup_lock); + } fpriv->is_master = 1; fpriv->authenticated = 1; @@ -304,13 +302,10 @@ int drm_master_open(struct drm_file *file_priv) /* if there is no current master make this fd it, but do not create * any master object for render clients */ mutex_lock(&dev->master_mutex); - if (!dev->master) { + if (!dev->master) ret = drm_new_set_master(dev, file_priv); - } else { - spin_lock(&file_priv->master_lookup_lock); + else file_priv->master = drm_master_get(dev->master); - spin_unlock(&file_priv->master_lookup_lock); - } mutex_unlock(&dev->master_mutex); return ret; @@ -376,31 +371,6 @@ struct drm_master *drm_master_get(struct drm_master *master) } EXPORT_SYMBOL(drm_master_get); -/** - * drm_file_get_master - reference &drm_file.master of @file_priv - * @file_priv: DRM file private - * - * Increments the reference count of @file_priv's &drm_file.master and returns - * the &drm_file.master. If @file_priv has no &drm_file.master, returns NULL. - * - * Master pointers returned from this function should be unreferenced using - * drm_master_put(). - */ -struct drm_master *drm_file_get_master(struct drm_file *file_priv) -{ - struct drm_master *master = NULL; - - spin_lock(&file_priv->master_lookup_lock); - if (!file_priv->master) - goto unlock; - master = drm_master_get(file_priv->master); - -unlock: - spin_unlock(&file_priv->master_lookup_lock); - return master; -} -EXPORT_SYMBOL(drm_file_get_master); - static void drm_master_destroy(struct kref *kref) { struct drm_master *master = container_of(kref, struct drm_master, refcount); diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c index 138ff34b3..7b270b68a 100644 --- a/drivers/gpu/drm/drm_color_mgmt.c +++ b/drivers/gpu/drm/drm_color_mgmt.c @@ -33,7 +33,7 @@ /** * DOC: overview * - * Color management or color space adjustments is supported through a set of 5 + * Color management or color space adjustments is supported through a set of 7 * properties on the &drm_crtc object. They are set up by calling * drm_crtc_enable_color_mgmt(). * @@ -60,7 +60,7 @@ * “CTM”: * Blob property to set the current transformation matrix (CTM) apply to * pixel data after the lookup through the degamma LUT and before the - * lookup through the gamma LUT. The data is interpreted as a struct + * lookup through the cubic LUT. The data is interpreted as a struct * &drm_color_ctm. * * Setting this to NULL (blob property value set to 0) means a @@ -68,13 +68,40 @@ * boot-up state too. Drivers can access the blob for the color conversion * matrix through &drm_crtc_state.ctm. * + * ”CUBIC_LUT”: + * Blob property to set the cubic (3D) lookup table performing color + * mapping after the transformation matrix and before the lookup through + * the gamma LUT. Unlike the degamma and gamma LUTs that map color + * components independently, the 3D LUT converts an input color to an + * output color by indexing into the 3D table using the color components + * as a 3D coordinate. The LUT is subsampled as 8-bit (or more) precision + * would require too much storage space in the hardware, so the precision + * of the color components is reduced before the look up, and the low + * order bits may be used to interpolate between the nearest points in 3D + * space. + * + * The data is interpreted as an array of &struct drm_color_lut elements. + * Hardware might choose not to use the full precision of the LUT + * elements. + * + * Setting this to NULL (blob property value set to 0) means the output + * color is identical to the input color. This is generally the driver + * boot-up state too. Drivers can access this blob through + * &drm_crtc_state.cubic_lut. + * + * ”CUBIC_LUT_SIZE”: + * Unsigned range property to give the size of the lookup table to be set + * on the CUBIC_LUT property (the size depends on the underlying hardware). + * If drivers support multiple LUT sizes then they should publish the + * largest size, and sub-sample smaller sized LUTs appropriately. + * * “GAMMA_LUT”: * Blob property to set the gamma lookup table (LUT) mapping pixel data - * after the transformation matrix to data sent to the connector. The - * data is interpreted as an array of &struct drm_color_lut elements. - * Hardware might choose not to use the full precision of the LUT elements - * nor use all the elements of the LUT (for example the hardware might - * choose to interpolate between LUT[0] and LUT[4]). + * after the cubic LUT to data sent to the connector. The data is + * interpreted as an array of &struct drm_color_lut elements. Hardware + * might choose not to use the full precision of the LUT elements nor use + * all the elements of the LUT (for example the hardware might choose to + * interpolate between LUT[0] and LUT[4]). * * Setting this to NULL (blob property value set to 0) means a * linear/pass-thru gamma table should be used. This is generally the diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index b0a826489..3d7182001 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -91,7 +91,6 @@ static int drm_clients_info(struct seq_file *m, void *data) mutex_lock(&dev->filelist_mutex); list_for_each_entry_reverse(priv, &dev->filelist, lhead) { struct task_struct *task; - bool is_current_master = drm_is_current_master(priv); rcu_read_lock(); /* locks pid_task()->comm */ task = pid_task(priv->pid, PIDTYPE_PID); @@ -100,7 +99,7 @@ static int drm_clients_info(struct seq_file *m, void *data) task ? task->comm : "", pid_vnr(priv->pid), priv->minor->index, - is_current_master ? 'y' : 'n', + drm_is_current_master(priv) ? 'y' : 'n', priv->authenticated ? 'y' : 'n', from_kuid_munged(seq_user_ns(m), uid), priv->magic); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 4334e466b..ab52f7fed 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1835,20 +1835,11 @@ static void connector_bad_edid(struct drm_connector *connector, u8 *edid, int num_blocks) { int i; - u8 last_block; - - /* - * 0x7e in the EDID is the number of extension blocks. The EDID - * is 1 (base block) + num_ext_blocks big. That means we can think - * of 0x7e in the EDID of the _index_ of the last block in the - * combined chunk of memory. - */ - last_block = edid[0x7e]; + u8 num_of_ext = edid[0x7e]; /* Calculate real checksum for the last edid extension block data */ - if (last_block < num_blocks) - connector->real_edid_checksum = - drm_edid_block_checksum(edid + last_block * EDID_LENGTH); + connector->real_edid_checksum = + drm_edid_block_checksum(edid + num_of_ext * EDID_LENGTH); if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS)) return; @@ -4861,6 +4852,43 @@ static void drm_parse_vcdb(struct drm_connector *connector, const u8 *db) info->rgb_quant_range_selectable = true; } +#ifdef CONFIG_NO_GKI +static +void drm_get_max_frl_rate(int max_frl_rate, u8 *max_lanes, u8 *max_rate_per_lane) +{ + switch (max_frl_rate) { + case 1: + *max_lanes = 3; + *max_rate_per_lane = 3; + break; + case 2: + *max_lanes = 3; + *max_rate_per_lane = 6; + break; + case 3: + *max_lanes = 4; + *max_rate_per_lane = 6; + break; + case 4: + *max_lanes = 4; + *max_rate_per_lane = 8; + break; + case 5: + *max_lanes = 4; + *max_rate_per_lane = 10; + break; + case 6: + *max_lanes = 4; + *max_rate_per_lane = 12; + break; + case 0: + default: + *max_lanes = 0; + *max_rate_per_lane = 0; + } +} +#endif + static void drm_parse_ycbcr420_deep_color_info(struct drm_connector *connector, const u8 *db) { @@ -4914,6 +4942,76 @@ static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector, } } +#ifdef CONFIG_NO_GKI + if (hf_vsdb[7]) { + u8 max_frl_rate; + u8 dsc_max_frl_rate; + u8 dsc_max_slices; + struct drm_hdmi_dsc_cap *hdmi_dsc = &hdmi->dsc_cap; + + DRM_DEBUG_KMS("hdmi_21 sink detected. parsing edid\n"); + max_frl_rate = (hf_vsdb[7] & DRM_EDID_MAX_FRL_RATE_MASK) >> 4; + drm_get_max_frl_rate(max_frl_rate, &hdmi->max_lanes, + &hdmi->max_frl_rate_per_lane); + hdmi_dsc->v_1p2 = hf_vsdb[11] & DRM_EDID_DSC_1P2; + + if (hdmi_dsc->v_1p2) { + hdmi_dsc->native_420 = hf_vsdb[11] & DRM_EDID_DSC_NATIVE_420; + hdmi_dsc->all_bpp = hf_vsdb[11] & DRM_EDID_DSC_ALL_BPP; + + if (hf_vsdb[11] & DRM_EDID_DSC_16BPC) + hdmi_dsc->bpc_supported = 16; + else if (hf_vsdb[11] & DRM_EDID_DSC_12BPC) + hdmi_dsc->bpc_supported = 12; + else if (hf_vsdb[11] & DRM_EDID_DSC_10BPC) + hdmi_dsc->bpc_supported = 10; + else + hdmi_dsc->bpc_supported = 0; + + dsc_max_frl_rate = (hf_vsdb[12] & DRM_EDID_DSC_MAX_FRL_RATE_MASK) >> 4; + drm_get_max_frl_rate(dsc_max_frl_rate, &hdmi_dsc->max_lanes, + &hdmi_dsc->max_frl_rate_per_lane); + hdmi_dsc->total_chunk_kbytes = hf_vsdb[13] & DRM_EDID_DSC_TOTAL_CHUNK_KBYTES; + + dsc_max_slices = hf_vsdb[12] & DRM_EDID_DSC_MAX_SLICES; + switch (dsc_max_slices) { + case 1: + hdmi_dsc->max_slices = 1; + hdmi_dsc->clk_per_slice = 340; + break; + case 2: + hdmi_dsc->max_slices = 2; + hdmi_dsc->clk_per_slice = 340; + break; + case 3: + hdmi_dsc->max_slices = 4; + hdmi_dsc->clk_per_slice = 340; + break; + case 4: + hdmi_dsc->max_slices = 8; + hdmi_dsc->clk_per_slice = 340; + break; + case 5: + hdmi_dsc->max_slices = 8; + hdmi_dsc->clk_per_slice = 400; + break; + case 6: + hdmi_dsc->max_slices = 12; + hdmi_dsc->clk_per_slice = 400; + break; + case 7: + hdmi_dsc->max_slices = 16; + hdmi_dsc->clk_per_slice = 400; + break; + case 0: + default: + hdmi_dsc->max_slices = 0; + hdmi_dsc->clk_per_slice = 0; + } + } + } +#endif + drm_parse_ycbcr420_deep_color_info(connector, hf_vsdb); } diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 537e7de8e..01670305d 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -177,7 +177,6 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor) init_waitqueue_head(&file->event_wait); file->event_space = 4096; /* set aside 4k for event buffer */ - spin_lock_init(&file->master_lookup_lock); mutex_init(&file->event_read_lock); if (drm_core_check_feature(dev, DRIVER_GEM)) @@ -776,20 +775,19 @@ void drm_event_cancel_free(struct drm_device *dev, EXPORT_SYMBOL(drm_event_cancel_free); /** - * drm_send_event_locked - send DRM event to file descriptor + * drm_send_event_helper - send DRM event to file descriptor * @dev: DRM device * @e: DRM event to deliver + * @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC + * time domain * - * This function sends the event @e, initialized with drm_event_reserve_init(), - * to its associated userspace DRM file. Callers must already hold - * &drm_device.event_lock, see drm_send_event() for the unlocked version. - * - * Note that the core will take care of unlinking and disarming events when the - * corresponding DRM file is closed. Drivers need not worry about whether the - * DRM file for this event still exists and can call this function upon - * completion of the asynchronous work unconditionally. + * This helper function sends the event @e, initialized with + * drm_event_reserve_init(), to its associated userspace DRM file. + * The timestamp variant of dma_fence_signal is used when the caller + * sends a valid timestamp. */ -void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e) +void drm_send_event_helper(struct drm_device *dev, + struct drm_pending_event *e, ktime_t timestamp) { assert_spin_locked(&dev->event_lock); @@ -800,7 +798,10 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e) } if (e->fence) { - dma_fence_signal(e->fence); + if (timestamp) + dma_fence_signal_timestamp(e->fence, timestamp); + else + dma_fence_signal(e->fence); dma_fence_put(e->fence); } @@ -815,6 +816,48 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e) wake_up_interruptible_poll(&e->file_priv->event_wait, EPOLLIN | EPOLLRDNORM); } + +/** + * drm_send_event_timestamp_locked - send DRM event to file descriptor + * @dev: DRM device + * @e: DRM event to deliver + * @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC + * time domain + * + * This function sends the event @e, initialized with drm_event_reserve_init(), + * to its associated userspace DRM file. Callers must already hold + * &drm_device.event_lock. + * + * Note that the core will take care of unlinking and disarming events when the + * corresponding DRM file is closed. Drivers need not worry about whether the + * DRM file for this event still exists and can call this function upon + * completion of the asynchronous work unconditionally. + */ +void drm_send_event_timestamp_locked(struct drm_device *dev, + struct drm_pending_event *e, ktime_t timestamp) +{ + drm_send_event_helper(dev, e, timestamp); +} +EXPORT_SYMBOL(drm_send_event_timestamp_locked); + +/** + * drm_send_event_locked - send DRM event to file descriptor + * @dev: DRM device + * @e: DRM event to deliver + * + * This function sends the event @e, initialized with drm_event_reserve_init(), + * to its associated userspace DRM file. Callers must already hold + * &drm_device.event_lock, see drm_send_event() for the unlocked version. + * + * Note that the core will take care of unlinking and disarming events when the + * corresponding DRM file is closed. Drivers need not worry about whether the + * DRM file for this event still exists and can call this function upon + * completion of the asynchronous work unconditionally. + */ +void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e) +{ + drm_send_event_helper(dev, e, 0); +} EXPORT_SYMBOL(drm_send_event_locked); /** @@ -837,7 +880,7 @@ void drm_send_event(struct drm_device *dev, struct drm_pending_event *e) unsigned long irqflags; spin_lock_irqsave(&dev->event_lock, irqflags); - drm_send_event_locked(dev, e); + drm_send_event_helper(dev, e, 0); spin_unlock_irqrestore(&dev->event_lock, irqflags); } EXPORT_SYMBOL(drm_send_event); diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c index 8d1064061..e4c8aa361 100644 --- a/drivers/gpu/drm/drm_fourcc.c +++ b/drivers/gpu/drm/drm_fourcc.c @@ -282,6 +282,16 @@ const struct drm_format_info *__drm_format_info(u32 format) .num_planes = 2, .char_per_block = { 5, 5, 0 }, .block_w = { 4, 2, 0 }, .block_h = { 1, 1, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true }, +#ifdef CONFIG_NO_GKI + { .format = DRM_FORMAT_NV20, .depth = 0, + .num_planes = 2, .char_per_block = { 5, 5, 0 }, + .block_w = { 4, 2, 0 }, .block_h = { 1, 1, 0 }, .hsub = 2, + .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_NV30, .depth = 0, + .num_planes = 2, .char_per_block = { 5, 5, 0 }, + .block_w = { 4, 2, 0 }, .block_h = { 1, 1, 0 }, .hsub = 1, + .vsub = 1, .is_yuv = true }, +#endif { .format = DRM_FORMAT_Q410, .depth = 0, .num_planes = 3, .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 1, diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index c160a45a4..73818ffa0 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -543,6 +543,7 @@ int drm_version(struct drm_device *dev, void *data, */ int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) { +#ifndef CONFIG_DRM_IGNORE_IOTCL_PERMIT /* ROOT_ONLY is only for CAP_SYS_ADMIN */ if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN))) return -EACCES; @@ -561,6 +562,7 @@ int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) if (unlikely(!(flags & DRM_RENDER_ALLOW) && drm_is_render_client(file_priv))) return -EACCES; +#endif return 0; } @@ -684,9 +686,9 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER), DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, 0), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, 0), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, 0), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER), DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER), diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c index aef226340..da4f085fc 100644 --- a/drivers/gpu/drm/drm_lease.c +++ b/drivers/gpu/drm/drm_lease.c @@ -107,19 +107,10 @@ static bool _drm_has_leased(struct drm_master *master, int id) */ bool _drm_lease_held(struct drm_file *file_priv, int id) { - bool ret; - struct drm_master *master; - - if (!file_priv) + if (!file_priv || !file_priv->master) return true; - master = drm_file_get_master(file_priv); - if (!master) - return true; - ret = _drm_lease_held_master(master, id); - drm_master_put(&master); - - return ret; + return _drm_lease_held_master(file_priv->master, id); } /** @@ -138,22 +129,13 @@ bool drm_lease_held(struct drm_file *file_priv, int id) struct drm_master *master; bool ret; - if (!file_priv) + if (!file_priv || !file_priv->master || !file_priv->master->lessor) return true; - master = drm_file_get_master(file_priv); - if (!master) - return true; - if (!master->lessor) { - ret = true; - goto out; - } + master = file_priv->master; mutex_lock(&master->dev->mode_config.idr_mutex); ret = _drm_lease_held_master(master, id); mutex_unlock(&master->dev->mode_config.idr_mutex); - -out: - drm_master_put(&master); return ret; } @@ -173,16 +155,10 @@ uint32_t drm_lease_filter_crtcs(struct drm_file *file_priv, uint32_t crtcs_in) int count_in, count_out; uint32_t crtcs_out = 0; - if (!file_priv) + if (!file_priv || !file_priv->master || !file_priv->master->lessor) return crtcs_in; - master = drm_file_get_master(file_priv); - if (!master) - return crtcs_in; - if (!master->lessor) { - crtcs_out = crtcs_in; - goto out; - } + master = file_priv->master; dev = master->dev; count_in = count_out = 0; @@ -201,9 +177,6 @@ uint32_t drm_lease_filter_crtcs(struct drm_file *file_priv, uint32_t crtcs_in) count_in++; } mutex_unlock(&master->dev->mode_config.idr_mutex); - -out: - drm_master_put(&master); return crtcs_out; } @@ -517,7 +490,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, size_t object_count; int ret = 0; struct idr leases; - struct drm_master *lessor; + struct drm_master *lessor = lessor_priv->master; struct drm_master *lessee = NULL; struct file *lessee_file = NULL; struct file *lessor_file = lessor_priv->filp; @@ -529,6 +502,12 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; + /* Do not allow sub-leases */ + if (lessor->lessor) { + DRM_DEBUG_LEASE("recursive leasing not allowed\n"); + return -EINVAL; + } + /* need some objects */ if (cl->object_count == 0) { DRM_DEBUG_LEASE("no objects in lease\n"); @@ -540,22 +519,12 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, return -EINVAL; } - lessor = drm_file_get_master(lessor_priv); - /* Do not allow sub-leases */ - if (lessor->lessor) { - DRM_DEBUG_LEASE("recursive leasing not allowed\n"); - ret = -EINVAL; - goto out_lessor; - } - object_count = cl->object_count; object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), array_size(object_count, sizeof(__u32))); - if (IS_ERR(object_ids)) { - ret = PTR_ERR(object_ids); - goto out_lessor; - } + if (IS_ERR(object_ids)) + return PTR_ERR(object_ids); idr_init(&leases); @@ -566,15 +535,14 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, if (ret) { DRM_DEBUG_LEASE("lease object lookup failed: %i\n", ret); idr_destroy(&leases); - goto out_lessor; + return ret; } /* Allocate a file descriptor for the lease */ fd = get_unused_fd_flags(cl->flags & (O_CLOEXEC | O_NONBLOCK)); if (fd < 0) { idr_destroy(&leases); - ret = fd; - goto out_lessor; + return fd; } DRM_DEBUG_LEASE("Creating lease\n"); @@ -610,7 +578,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, /* Hook up the fd */ fd_install(fd, lessee_file); - drm_master_put(&lessor); DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n"); return 0; @@ -620,8 +587,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, out_leases: put_unused_fd(fd); -out_lessor: - drm_master_put(&lessor); DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret); return ret; } @@ -644,7 +609,7 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev, struct drm_mode_list_lessees *arg = data; __u32 __user *lessee_ids = (__u32 __user *) (uintptr_t) (arg->lessees_ptr); __u32 count_lessees = arg->count_lessees; - struct drm_master *lessor, *lessee; + struct drm_master *lessor = lessor_priv->master, *lessee; int count; int ret = 0; @@ -655,7 +620,6 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; - lessor = drm_file_get_master(lessor_priv); DRM_DEBUG_LEASE("List lessees for %d\n", lessor->lessee_id); mutex_lock(&dev->mode_config.idr_mutex); @@ -679,7 +643,6 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev, arg->count_lessees = count; mutex_unlock(&dev->mode_config.idr_mutex); - drm_master_put(&lessor); return ret; } @@ -699,7 +662,7 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev, struct drm_mode_get_lease *arg = data; __u32 __user *object_ids = (__u32 __user *) (uintptr_t) (arg->objects_ptr); __u32 count_objects = arg->count_objects; - struct drm_master *lessee; + struct drm_master *lessee = lessee_priv->master; struct idr *object_idr; int count; void *entry; @@ -713,7 +676,6 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; - lessee = drm_file_get_master(lessee_priv); DRM_DEBUG_LEASE("get lease for %d\n", lessee->lessee_id); mutex_lock(&dev->mode_config.idr_mutex); @@ -741,7 +703,6 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev, arg->count_objects = count; mutex_unlock(&dev->mode_config.idr_mutex); - drm_master_put(&lessee); return ret; } @@ -760,7 +721,7 @@ int drm_mode_revoke_lease_ioctl(struct drm_device *dev, void *data, struct drm_file *lessor_priv) { struct drm_mode_revoke_lease *arg = data; - struct drm_master *lessor; + struct drm_master *lessor = lessor_priv->master; struct drm_master *lessee; int ret = 0; @@ -770,7 +731,6 @@ int drm_mode_revoke_lease_ioctl(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; - lessor = drm_file_get_master(lessor_priv); mutex_lock(&dev->mode_config.idr_mutex); lessee = _drm_find_lessee(lessor, arg->lessee_id); @@ -791,7 +751,6 @@ int drm_mode_revoke_lease_ioctl(struct drm_device *dev, fail: mutex_unlock(&dev->mode_config.idr_mutex); - drm_master_put(&lessor); return ret; } diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index 19fb1d93a..3caf9ff34 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -356,6 +356,7 @@ static ssize_t mipi_dsi_device_transfer(struct mipi_dsi_device *dsi, if (dsi->mode_flags & MIPI_DSI_MODE_LPM) msg->flags |= MIPI_DSI_MSG_USE_LPM; + msg->flags |= MIPI_DSI_MSG_LASTCOMMAND; return ops->transfer(dsi->host, msg); } diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c index fad2c1181..58050d4ae 100644 --- a/drivers/gpu/drm/drm_mode_config.c +++ b/drivers/gpu/drm/drm_mode_config.c @@ -364,6 +364,22 @@ static int drm_mode_create_standard_properties(struct drm_device *dev) return -ENOMEM; dev->mode_config.gamma_lut_size_property = prop; +#if defined(CONFIG_ROCKCHIP_DRM_CUBIC_LUT) + prop = drm_property_create(dev, + DRM_MODE_PROP_BLOB, + "CUBIC_LUT", 0); + if (!prop) + return -ENOMEM; + dev->mode_config.cubic_lut_property = prop; + + prop = drm_property_create_range(dev, + DRM_MODE_PROP_IMMUTABLE, + "CUBIC_LUT_SIZE", 0, UINT_MAX); + if (!prop) + return -ENOMEM; + dev->mode_config.cubic_lut_size_property = prop; +#endif + prop = drm_property_create(dev, DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_BLOB, "IN_FORMATS", 0); diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 0f99e5453..d42c7310b 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1940,6 +1940,7 @@ void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out, strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN); out->name[DRM_DISPLAY_MODE_LEN-1] = 0; } +EXPORT_SYMBOL_GPL(drm_mode_convert_to_umode); /** * drm_crtc_convert_umode - convert a modeinfo into a drm_display_mode @@ -2016,6 +2017,7 @@ int drm_mode_convert_umode(struct drm_device *dev, return 0; } +EXPORT_SYMBOL_GPL(drm_mode_convert_umode); /** * drm_mode_is_420_only - if a given videomode can be only supported in YCBCR420 diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 825499ea3..272e5cdd6 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -784,6 +784,28 @@ int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) } EXPORT_SYMBOL(drm_gem_dmabuf_mmap); +/** + * drm_gem_dmabuf_get_uuid - dma_buf get_uuid implementation for GEM + * @dma_buf: buffer to query + * @uuid: uuid outparam + * + * Queries the buffer's virtio UUID. This can be used as the + * &dma_buf_ops.get_uuid callback. Calls into &drm_driver.gem_prime_get_uuid. + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_gem_dmabuf_get_uuid(struct dma_buf *dma_buf, uuid_t *uuid) +{ + struct drm_gem_object *obj = dma_buf->priv; + struct drm_device *dev = obj->dev; + + if (!dev->driver->gem_prime_get_uuid) + return -ENODEV; + + return dev->driver->gem_prime_get_uuid(obj, uuid); +} +EXPORT_SYMBOL(drm_gem_dmabuf_get_uuid); + static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { .cache_sgt_mapping = true, .attach = drm_gem_map_attach, @@ -794,6 +816,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { .mmap = drm_gem_dmabuf_mmap, .vmap = drm_gem_dmabuf_vmap, .vunmap = drm_gem_dmabuf_vunmap, + .get_uuid = drm_gem_dmabuf_get_uuid, }; /** diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index f135b7959..286edbe1b 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -1000,7 +1000,14 @@ static void send_vblank_event(struct drm_device *dev, break; } trace_drm_vblank_event_delivered(e->base.file_priv, e->pipe, seq); - drm_send_event_locked(dev, &e->base); + /* + * Use the same timestamp for any associated fence signal to avoid + * mismatch in timestamps for vsync & fence events triggered by the + * same HW event. Frameworks like SurfaceFlinger in Android expects the + * retire-fence timestamp to match exactly with HW vsync as it uses it + * for its software vsync modeling. + */ + drm_send_event_timestamp_locked(dev, &e->base, now); } /** diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 1a87cc445..7c7d10225 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -30,6 +30,7 @@ #include #include