1/*
2 * SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018, The Linux Foundation
4 */
5
6#include <linux/clk.h>
7#include <linux/delay.h>
8#include <linux/interconnect.h>
9#include <linux/irq.h>
10#include <linux/irqchip.h>
11#include <linux/irqdesc.h>
12#include <linux/irqchip/chained_irq.h>
13#include <linux/of_platform.h>
14#include <linux/platform_device.h>
15#include <linux/pm_runtime.h>
16#include <linux/reset.h>
17
18#include "msm_mdss.h"
19#include "msm_kms.h"
20
21#define HW_REV				0x0
22#define HW_INTR_STATUS			0x0010
23
24#define UBWC_DEC_HW_VERSION		0x58
25#define UBWC_STATIC			0x144
26#define UBWC_CTRL_2			0x150
27#define UBWC_PREDICTION_MODE		0x154
28
29#define MIN_IB_BW	400000000UL /* Min ib vote 400MB */
30
31struct msm_mdss {
32	struct device *dev;
33
34	void __iomem *mmio;
35	struct clk_bulk_data *clocks;
36	size_t num_clocks;
37	bool is_mdp5;
38	struct {
39		unsigned long enabled_mask;
40		struct irq_domain *domain;
41	} irq_controller;
42	const struct msm_mdss_data *mdss_data;
43	struct icc_path *path[2];
44	u32 num_paths;
45};
46
47static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
48					    struct msm_mdss *msm_mdss)
49{
50	struct icc_path *path0;
51	struct icc_path *path1;
52
53	path0 = of_icc_get(dev, "mdp0-mem");
54	if (IS_ERR_OR_NULL(path0))
55		return PTR_ERR_OR_ZERO(path0);
56
57	msm_mdss->path[0] = path0;
58	msm_mdss->num_paths = 1;
59
60	path1 = of_icc_get(dev, "mdp1-mem");
61	if (!IS_ERR_OR_NULL(path1)) {
62		msm_mdss->path[1] = path1;
63		msm_mdss->num_paths++;
64	}
65
66	return 0;
67}
68
69static void msm_mdss_put_icc_path(void *data)
70{
71	struct msm_mdss *msm_mdss = data;
72	int i;
73
74	for (i = 0; i < msm_mdss->num_paths; i++)
75		icc_put(msm_mdss->path[i]);
76}
77
78static void msm_mdss_icc_request_bw(struct msm_mdss *msm_mdss, unsigned long bw)
79{
80	int i;
81
82	for (i = 0; i < msm_mdss->num_paths; i++)
83		icc_set_bw(msm_mdss->path[i], 0, Bps_to_icc(bw));
84}
85
86static void msm_mdss_irq(struct irq_desc *desc)
87{
88	struct msm_mdss *msm_mdss = irq_desc_get_handler_data(desc);
89	struct irq_chip *chip = irq_desc_get_chip(desc);
90	u32 interrupts;
91
92	chained_irq_enter(chip, desc);
93
94	interrupts = readl_relaxed(msm_mdss->mmio + HW_INTR_STATUS);
95
96	while (interrupts) {
97		irq_hw_number_t hwirq = fls(interrupts) - 1;
98		int rc;
99
100		rc = generic_handle_domain_irq(msm_mdss->irq_controller.domain,
101					       hwirq);
102		if (rc < 0) {
103			dev_err(msm_mdss->dev, "handle irq fail: irq=%lu rc=%d\n",
104				  hwirq, rc);
105			break;
106		}
107
108		interrupts &= ~(1 << hwirq);
109	}
110
111	chained_irq_exit(chip, desc);
112}
113
114static void msm_mdss_irq_mask(struct irq_data *irqd)
115{
116	struct msm_mdss *msm_mdss = irq_data_get_irq_chip_data(irqd);
117
118	/* memory barrier */
119	smp_mb__before_atomic();
120	clear_bit(irqd->hwirq, &msm_mdss->irq_controller.enabled_mask);
121	/* memory barrier */
122	smp_mb__after_atomic();
123}
124
125static void msm_mdss_irq_unmask(struct irq_data *irqd)
126{
127	struct msm_mdss *msm_mdss = irq_data_get_irq_chip_data(irqd);
128
129	/* memory barrier */
130	smp_mb__before_atomic();
131	set_bit(irqd->hwirq, &msm_mdss->irq_controller.enabled_mask);
132	/* memory barrier */
133	smp_mb__after_atomic();
134}
135
136static struct irq_chip msm_mdss_irq_chip = {
137	.name = "msm_mdss",
138	.irq_mask = msm_mdss_irq_mask,
139	.irq_unmask = msm_mdss_irq_unmask,
140};
141
142static struct lock_class_key msm_mdss_lock_key, msm_mdss_request_key;
143
144static int msm_mdss_irqdomain_map(struct irq_domain *domain,
145		unsigned int irq, irq_hw_number_t hwirq)
146{
147	struct msm_mdss *msm_mdss = domain->host_data;
148
149	irq_set_lockdep_class(irq, &msm_mdss_lock_key, &msm_mdss_request_key);
150	irq_set_chip_and_handler(irq, &msm_mdss_irq_chip, handle_level_irq);
151
152	return irq_set_chip_data(irq, msm_mdss);
153}
154
155static const struct irq_domain_ops msm_mdss_irqdomain_ops = {
156	.map = msm_mdss_irqdomain_map,
157	.xlate = irq_domain_xlate_onecell,
158};
159
160static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss)
161{
162	struct device *dev;
163	struct irq_domain *domain;
164
165	dev = msm_mdss->dev;
166
167	domain = irq_domain_add_linear(dev->of_node, 32,
168			&msm_mdss_irqdomain_ops, msm_mdss);
169	if (!domain) {
170		dev_err(dev, "failed to add irq_domain\n");
171		return -EINVAL;
172	}
173
174	msm_mdss->irq_controller.enabled_mask = 0;
175	msm_mdss->irq_controller.domain = domain;
176
177	return 0;
178}
179
180static void msm_mdss_setup_ubwc_dec_20(struct msm_mdss *msm_mdss)
181{
182	const struct msm_mdss_data *data = msm_mdss->mdss_data;
183
184	writel_relaxed(data->ubwc_static, msm_mdss->mmio + UBWC_STATIC);
185}
186
187static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss)
188{
189	const struct msm_mdss_data *data = msm_mdss->mdss_data;
190	u32 value = (data->ubwc_swizzle & 0x1) |
191		    (data->highest_bank_bit & 0x3) << 4 |
192		    (data->macrotile_mode & 0x1) << 12;
193
194	if (data->ubwc_enc_version == UBWC_3_0)
195		value |= BIT(10);
196
197	if (data->ubwc_enc_version == UBWC_1_0)
198		value |= BIT(8);
199
200	writel_relaxed(value, msm_mdss->mmio + UBWC_STATIC);
201}
202
203static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss)
204{
205	const struct msm_mdss_data *data = msm_mdss->mdss_data;
206	u32 value = (data->ubwc_swizzle & 0x7) |
207		    (data->ubwc_static & 0x1) << 3 |
208		    (data->highest_bank_bit & 0x7) << 4 |
209		    (data->macrotile_mode & 0x1) << 12;
210
211	writel_relaxed(value, msm_mdss->mmio + UBWC_STATIC);
212
213	if (data->ubwc_enc_version == UBWC_3_0) {
214		writel_relaxed(1, msm_mdss->mmio + UBWC_CTRL_2);
215		writel_relaxed(0, msm_mdss->mmio + UBWC_PREDICTION_MODE);
216	} else {
217		if (data->ubwc_dec_version == UBWC_4_3)
218			writel_relaxed(3, msm_mdss->mmio + UBWC_CTRL_2);
219		else
220			writel_relaxed(2, msm_mdss->mmio + UBWC_CTRL_2);
221		writel_relaxed(1, msm_mdss->mmio + UBWC_PREDICTION_MODE);
222	}
223}
224
225const struct msm_mdss_data *msm_mdss_get_mdss_data(struct device *dev)
226{
227	struct msm_mdss *mdss;
228
229	if (!dev)
230		return ERR_PTR(-EINVAL);
231
232	mdss = dev_get_drvdata(dev);
233
234	return mdss->mdss_data;
235}
236
237static int msm_mdss_enable(struct msm_mdss *msm_mdss)
238{
239	int ret;
240
241	/*
242	 * Several components have AXI clocks that can only be turned on if
243	 * the interconnect is enabled (non-zero bandwidth). Let's make sure
244	 * that the interconnects are at least at a minimum amount.
245	 */
246	msm_mdss_icc_request_bw(msm_mdss, MIN_IB_BW);
247
248	ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks);
249	if (ret) {
250		dev_err(msm_mdss->dev, "clock enable failed, ret:%d\n", ret);
251		return ret;
252	}
253
254	/*
255	 * Register access requires MDSS_MDP_CLK, which is not enabled by the
256	 * mdss on mdp5 hardware. Skip it for now.
257	 */
258	if (msm_mdss->is_mdp5 || !msm_mdss->mdss_data)
259		return 0;
260
261	/*
262	 * ubwc config is part of the "mdss" region which is not accessible
263	 * from the rest of the driver. hardcode known configurations here
264	 *
265	 * Decoder version can be read from the UBWC_DEC_HW_VERSION reg,
266	 * UBWC_n and the rest of params comes from hw data.
267	 */
268	switch (msm_mdss->mdss_data->ubwc_dec_version) {
269	case 0: /* no UBWC */
270	case UBWC_1_0:
271		/* do nothing */
272		break;
273	case UBWC_2_0:
274		msm_mdss_setup_ubwc_dec_20(msm_mdss);
275		break;
276	case UBWC_3_0:
277		msm_mdss_setup_ubwc_dec_30(msm_mdss);
278		break;
279	case UBWC_4_0:
280	case UBWC_4_3:
281		msm_mdss_setup_ubwc_dec_40(msm_mdss);
282		break;
283	default:
284		dev_err(msm_mdss->dev, "Unsupported UBWC decoder version %x\n",
285			msm_mdss->mdss_data->ubwc_dec_version);
286		dev_err(msm_mdss->dev, "HW_REV: 0x%x\n",
287			readl_relaxed(msm_mdss->mmio + HW_REV));
288		dev_err(msm_mdss->dev, "UBWC_DEC_HW_VERSION: 0x%x\n",
289			readl_relaxed(msm_mdss->mmio + UBWC_DEC_HW_VERSION));
290		break;
291	}
292
293	return ret;
294}
295
296static int msm_mdss_disable(struct msm_mdss *msm_mdss)
297{
298	clk_bulk_disable_unprepare(msm_mdss->num_clocks, msm_mdss->clocks);
299	msm_mdss_icc_request_bw(msm_mdss, 0);
300
301	return 0;
302}
303
304static void msm_mdss_destroy(struct msm_mdss *msm_mdss)
305{
306	struct platform_device *pdev = to_platform_device(msm_mdss->dev);
307	int irq;
308
309	pm_runtime_suspend(msm_mdss->dev);
310	pm_runtime_disable(msm_mdss->dev);
311	irq_domain_remove(msm_mdss->irq_controller.domain);
312	msm_mdss->irq_controller.domain = NULL;
313	irq = platform_get_irq(pdev, 0);
314	irq_set_chained_handler_and_data(irq, NULL, NULL);
315}
316
317static int msm_mdss_reset(struct device *dev)
318{
319	struct reset_control *reset;
320
321	reset = reset_control_get_optional_exclusive(dev, NULL);
322	if (!reset) {
323		/* Optional reset not specified */
324		return 0;
325	} else if (IS_ERR(reset)) {
326		return dev_err_probe(dev, PTR_ERR(reset),
327				     "failed to acquire mdss reset\n");
328	}
329
330	reset_control_assert(reset);
331	/*
332	 * Tests indicate that reset has to be held for some period of time,
333	 * make it one frame in a typical system
334	 */
335	msleep(20);
336	reset_control_deassert(reset);
337
338	reset_control_put(reset);
339
340	return 0;
341}
342
343/*
344 * MDP5 MDSS uses at most three specified clocks.
345 */
346#define MDP5_MDSS_NUM_CLOCKS 3
347static int mdp5_mdss_parse_clock(struct platform_device *pdev, struct clk_bulk_data **clocks)
348{
349	struct clk_bulk_data *bulk;
350	int num_clocks = 0;
351	int ret;
352
353	if (!pdev)
354		return -EINVAL;
355
356	bulk = devm_kcalloc(&pdev->dev, MDP5_MDSS_NUM_CLOCKS, sizeof(struct clk_bulk_data), GFP_KERNEL);
357	if (!bulk)
358		return -ENOMEM;
359
360	bulk[num_clocks++].id = "iface";
361	bulk[num_clocks++].id = "bus";
362	bulk[num_clocks++].id = "vsync";
363
364	ret = devm_clk_bulk_get_optional(&pdev->dev, num_clocks, bulk);
365	if (ret)
366		return ret;
367
368	*clocks = bulk;
369
370	return num_clocks;
371}
372
373static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5)
374{
375	struct msm_mdss *msm_mdss;
376	int ret;
377	int irq;
378
379	ret = msm_mdss_reset(&pdev->dev);
380	if (ret)
381		return ERR_PTR(ret);
382
383	msm_mdss = devm_kzalloc(&pdev->dev, sizeof(*msm_mdss), GFP_KERNEL);
384	if (!msm_mdss)
385		return ERR_PTR(-ENOMEM);
386
387	msm_mdss->mmio = devm_platform_ioremap_resource_byname(pdev, is_mdp5 ? "mdss_phys" : "mdss");
388	if (IS_ERR(msm_mdss->mmio))
389		return ERR_CAST(msm_mdss->mmio);
390
391	dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio);
392
393	ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss);
394	if (ret)
395		return ERR_PTR(ret);
396	ret = devm_add_action_or_reset(&pdev->dev, msm_mdss_put_icc_path, msm_mdss);
397	if (ret)
398		return ERR_PTR(ret);
399
400	if (is_mdp5)
401		ret = mdp5_mdss_parse_clock(pdev, &msm_mdss->clocks);
402	else
403		ret = devm_clk_bulk_get_all(&pdev->dev, &msm_mdss->clocks);
404	if (ret < 0) {
405		dev_err(&pdev->dev, "failed to parse clocks, ret=%d\n", ret);
406		return ERR_PTR(ret);
407	}
408	msm_mdss->num_clocks = ret;
409	msm_mdss->is_mdp5 = is_mdp5;
410
411	msm_mdss->dev = &pdev->dev;
412
413	irq = platform_get_irq(pdev, 0);
414	if (irq < 0)
415		return ERR_PTR(irq);
416
417	ret = _msm_mdss_irq_domain_add(msm_mdss);
418	if (ret)
419		return ERR_PTR(ret);
420
421	irq_set_chained_handler_and_data(irq, msm_mdss_irq,
422					 msm_mdss);
423
424	pm_runtime_enable(&pdev->dev);
425
426	return msm_mdss;
427}
428
429static int __maybe_unused mdss_runtime_suspend(struct device *dev)
430{
431	struct msm_mdss *mdss = dev_get_drvdata(dev);
432
433	DBG("");
434
435	return msm_mdss_disable(mdss);
436}
437
438static int __maybe_unused mdss_runtime_resume(struct device *dev)
439{
440	struct msm_mdss *mdss = dev_get_drvdata(dev);
441
442	DBG("");
443
444	return msm_mdss_enable(mdss);
445}
446
447static int __maybe_unused mdss_pm_suspend(struct device *dev)
448{
449
450	if (pm_runtime_suspended(dev))
451		return 0;
452
453	return mdss_runtime_suspend(dev);
454}
455
456static int __maybe_unused mdss_pm_resume(struct device *dev)
457{
458	if (pm_runtime_suspended(dev))
459		return 0;
460
461	return mdss_runtime_resume(dev);
462}
463
464static const struct dev_pm_ops mdss_pm_ops = {
465	SET_SYSTEM_SLEEP_PM_OPS(mdss_pm_suspend, mdss_pm_resume)
466	SET_RUNTIME_PM_OPS(mdss_runtime_suspend, mdss_runtime_resume, NULL)
467};
468
469static int mdss_probe(struct platform_device *pdev)
470{
471	struct msm_mdss *mdss;
472	bool is_mdp5 = of_device_is_compatible(pdev->dev.of_node, "qcom,mdss");
473	struct device *dev = &pdev->dev;
474	int ret;
475
476	mdss = msm_mdss_init(pdev, is_mdp5);
477	if (IS_ERR(mdss))
478		return PTR_ERR(mdss);
479
480	mdss->mdss_data = of_device_get_match_data(&pdev->dev);
481
482	platform_set_drvdata(pdev, mdss);
483
484	/*
485	 * MDP5/DPU based devices don't have a flat hierarchy. There is a top
486	 * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc.
487	 * Populate the children devices, find the MDP5/DPU node, and then add
488	 * the interfaces to our components list.
489	 */
490	ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
491	if (ret) {
492		DRM_DEV_ERROR(dev, "failed to populate children devices\n");
493		msm_mdss_destroy(mdss);
494		return ret;
495	}
496
497	return 0;
498}
499
500static int mdss_remove(struct platform_device *pdev)
501{
502	struct msm_mdss *mdss = platform_get_drvdata(pdev);
503
504	of_platform_depopulate(&pdev->dev);
505
506	msm_mdss_destroy(mdss);
507
508	return 0;
509}
510
511static const struct msm_mdss_data msm8998_data = {
512	.ubwc_enc_version = UBWC_1_0,
513	.ubwc_dec_version = UBWC_1_0,
514	.highest_bank_bit = 2,
515};
516
517static const struct msm_mdss_data qcm2290_data = {
518	/* no UBWC */
519	.highest_bank_bit = 0x2,
520};
521
522static const struct msm_mdss_data sc7180_data = {
523	.ubwc_enc_version = UBWC_2_0,
524	.ubwc_dec_version = UBWC_2_0,
525	.ubwc_static = 0x1e,
526	.highest_bank_bit = 0x3,
527};
528
529static const struct msm_mdss_data sc7280_data = {
530	.ubwc_enc_version = UBWC_3_0,
531	.ubwc_dec_version = UBWC_4_0,
532	.ubwc_swizzle = 6,
533	.ubwc_static = 1,
534	.highest_bank_bit = 1,
535	.macrotile_mode = 1,
536};
537
538static const struct msm_mdss_data sc8180x_data = {
539	.ubwc_enc_version = UBWC_3_0,
540	.ubwc_dec_version = UBWC_3_0,
541	.highest_bank_bit = 3,
542	.macrotile_mode = 1,
543};
544
545static const struct msm_mdss_data sc8280xp_data = {
546	.ubwc_enc_version = UBWC_4_0,
547	.ubwc_dec_version = UBWC_4_0,
548	.ubwc_swizzle = 6,
549	.ubwc_static = 1,
550	.highest_bank_bit = 2,
551	.macrotile_mode = 1,
552};
553
554static const struct msm_mdss_data sdm845_data = {
555	.ubwc_enc_version = UBWC_2_0,
556	.ubwc_dec_version = UBWC_2_0,
557	.highest_bank_bit = 2,
558};
559
560static const struct msm_mdss_data sm6350_data = {
561	.ubwc_enc_version = UBWC_2_0,
562	.ubwc_dec_version = UBWC_2_0,
563	.ubwc_swizzle = 6,
564	.ubwc_static = 0x1e,
565	.highest_bank_bit = 1,
566};
567
568static const struct msm_mdss_data sm8150_data = {
569	.ubwc_enc_version = UBWC_3_0,
570	.ubwc_dec_version = UBWC_3_0,
571	.highest_bank_bit = 2,
572};
573
574static const struct msm_mdss_data sm6115_data = {
575	.ubwc_enc_version = UBWC_1_0,
576	.ubwc_dec_version = UBWC_2_0,
577	.ubwc_swizzle = 7,
578	.ubwc_static = 0x11f,
579	.highest_bank_bit = 0x1,
580};
581
582static const struct msm_mdss_data sm6125_data = {
583	.ubwc_enc_version = UBWC_1_0,
584	.ubwc_dec_version = UBWC_3_0,
585	.ubwc_swizzle = 1,
586	.highest_bank_bit = 1,
587};
588
589static const struct msm_mdss_data sm8250_data = {
590	.ubwc_enc_version = UBWC_4_0,
591	.ubwc_dec_version = UBWC_4_0,
592	.ubwc_swizzle = 6,
593	.ubwc_static = 1,
594	/* TODO: highest_bank_bit = 2 for LP_DDR4 */
595	.highest_bank_bit = 3,
596	.macrotile_mode = 1,
597};
598
599static const struct msm_mdss_data sm8550_data = {
600	.ubwc_enc_version = UBWC_4_0,
601	.ubwc_dec_version = UBWC_4_3,
602	.ubwc_swizzle = 6,
603	.ubwc_static = 1,
604	/* TODO: highest_bank_bit = 2 for LP_DDR4 */
605	.highest_bank_bit = 3,
606	.macrotile_mode = 1,
607};
608static const struct of_device_id mdss_dt_match[] = {
609	{ .compatible = "qcom,mdss" },
610	{ .compatible = "qcom,msm8998-mdss", .data = &msm8998_data },
611	{ .compatible = "qcom,qcm2290-mdss", .data = &qcm2290_data },
612	{ .compatible = "qcom,sdm845-mdss", .data = &sdm845_data },
613	{ .compatible = "qcom,sc7180-mdss", .data = &sc7180_data },
614	{ .compatible = "qcom,sc7280-mdss", .data = &sc7280_data },
615	{ .compatible = "qcom,sc8180x-mdss", .data = &sc8180x_data },
616	{ .compatible = "qcom,sc8280xp-mdss", .data = &sc8280xp_data },
617	{ .compatible = "qcom,sm6115-mdss", .data = &sm6115_data },
618	{ .compatible = "qcom,sm6125-mdss", .data = &sm6125_data },
619	{ .compatible = "qcom,sm6350-mdss", .data = &sm6350_data },
620	{ .compatible = "qcom,sm6375-mdss", .data = &sm6350_data },
621	{ .compatible = "qcom,sm8150-mdss", .data = &sm8150_data },
622	{ .compatible = "qcom,sm8250-mdss", .data = &sm8250_data },
623	{ .compatible = "qcom,sm8350-mdss", .data = &sm8250_data },
624	{ .compatible = "qcom,sm8450-mdss", .data = &sm8250_data },
625	{ .compatible = "qcom,sm8550-mdss", .data = &sm8550_data },
626	{}
627};
628MODULE_DEVICE_TABLE(of, mdss_dt_match);
629
630static struct platform_driver mdss_platform_driver = {
631	.probe      = mdss_probe,
632	.remove     = mdss_remove,
633	.driver     = {
634		.name   = "msm-mdss",
635		.of_match_table = mdss_dt_match,
636		.pm     = &mdss_pm_ops,
637	},
638};
639
640void __init msm_mdss_register(void)
641{
642	platform_driver_register(&mdss_platform_driver);
643}
644
645void __exit msm_mdss_unregister(void)
646{
647	platform_driver_unregister(&mdss_platform_driver);
648}
649