1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
4 * Author: Liviu Dudau <Liviu.Dudau@arm.com>
5 *
6 * ARM Mali DP plane manipulation routines.
7 */
8
9#include <linux/iommu.h>
10#include <linux/platform_device.h>
11
12#include <drm/drm_atomic.h>
13#include <drm/drm_atomic_helper.h>
14#include <drm/drm_blend.h>
15#include <drm/drm_drv.h>
16#include <drm/drm_fb_dma_helper.h>
17#include <drm/drm_fourcc.h>
18#include <drm/drm_framebuffer.h>
19#include <drm/drm_gem_dma_helper.h>
20#include <drm/drm_gem_framebuffer_helper.h>
21#include <drm/drm_print.h>
22
23#include "malidp_hw.h"
24#include "malidp_drv.h"
25
26/* Layer specific register offsets */
27#define MALIDP_LAYER_FORMAT		0x000
28#define   LAYER_FORMAT_MASK		0x3f
29#define MALIDP_LAYER_CONTROL		0x004
30#define   LAYER_ENABLE			(1 << 0)
31#define   LAYER_FLOWCFG_MASK		7
32#define   LAYER_FLOWCFG(x)		(((x) & LAYER_FLOWCFG_MASK) << 1)
33#define     LAYER_FLOWCFG_SCALE_SE	3
34#define   LAYER_ROT_OFFSET		8
35#define   LAYER_H_FLIP			(1 << 10)
36#define   LAYER_V_FLIP			(1 << 11)
37#define   LAYER_ROT_MASK		(0xf << 8)
38#define   LAYER_COMP_MASK		(0x3 << 12)
39#define   LAYER_COMP_PIXEL		(0x3 << 12)
40#define   LAYER_COMP_PLANE		(0x2 << 12)
41#define   LAYER_PMUL_ENABLE		(0x1 << 14)
42#define   LAYER_ALPHA_OFFSET		(16)
43#define   LAYER_ALPHA_MASK		(0xff)
44#define   LAYER_ALPHA(x)		(((x) & LAYER_ALPHA_MASK) << LAYER_ALPHA_OFFSET)
45#define MALIDP_LAYER_COMPOSE		0x008
46#define MALIDP_LAYER_SIZE		0x00c
47#define   LAYER_H_VAL(x)		(((x) & 0x1fff) << 0)
48#define   LAYER_V_VAL(x)		(((x) & 0x1fff) << 16)
49#define MALIDP_LAYER_COMP_SIZE		0x010
50#define MALIDP_LAYER_OFFSET		0x014
51#define MALIDP550_LS_ENABLE		0x01c
52#define MALIDP550_LS_R1_IN_SIZE		0x020
53
54#define MODIFIERS_COUNT_MAX		15
55
56/*
57 * This 4-entry look-up-table is used to determine the full 8-bit alpha value
58 * for formats with 1- or 2-bit alpha channels.
59 * We set it to give 100%/0% opacity for 1-bit formats and 100%/66%/33%/0%
60 * opacity for 2-bit formats.
61 */
62#define MALIDP_ALPHA_LUT 0xffaa5500
63
64/* page sizes the MMU prefetcher can support */
65#define MALIDP_MMU_PREFETCH_PARTIAL_PGSIZES	(SZ_4K | SZ_64K)
66#define MALIDP_MMU_PREFETCH_FULL_PGSIZES	(SZ_1M | SZ_2M)
67
68/* readahead for partial-frame prefetch */
69#define MALIDP_MMU_PREFETCH_READAHEAD		8
70
71/*
72 * Replicate what the default ->reset hook does: free the state pointer and
73 * allocate a new empty object. We just need enough space to store
74 * a malidp_plane_state instead of a drm_plane_state.
75 */
76static void malidp_plane_reset(struct drm_plane *plane)
77{
78	struct malidp_plane_state *state = to_malidp_plane_state(plane->state);
79
80	if (state)
81		__drm_atomic_helper_plane_destroy_state(&state->base);
82	kfree(state);
83	plane->state = NULL;
84	state = kzalloc(sizeof(*state), GFP_KERNEL);
85	if (state)
86		__drm_atomic_helper_plane_reset(plane, &state->base);
87}
88
89static struct
90drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane)
91{
92	struct malidp_plane_state *state, *m_state;
93
94	if (!plane->state)
95		return NULL;
96
97	state = kmalloc(sizeof(*state), GFP_KERNEL);
98	if (!state)
99		return NULL;
100
101	m_state = to_malidp_plane_state(plane->state);
102	__drm_atomic_helper_plane_duplicate_state(plane, &state->base);
103	state->rotmem_size = m_state->rotmem_size;
104	state->format = m_state->format;
105	state->n_planes = m_state->n_planes;
106
107	state->mmu_prefetch_mode = m_state->mmu_prefetch_mode;
108	state->mmu_prefetch_pgsize = m_state->mmu_prefetch_pgsize;
109
110	return &state->base;
111}
112
113static void malidp_destroy_plane_state(struct drm_plane *plane,
114				       struct drm_plane_state *state)
115{
116	struct malidp_plane_state *m_state = to_malidp_plane_state(state);
117
118	__drm_atomic_helper_plane_destroy_state(state);
119	kfree(m_state);
120}
121
122static const char * const prefetch_mode_names[] = {
123	[MALIDP_PREFETCH_MODE_NONE] = "MMU_PREFETCH_NONE",
124	[MALIDP_PREFETCH_MODE_PARTIAL] = "MMU_PREFETCH_PARTIAL",
125	[MALIDP_PREFETCH_MODE_FULL] = "MMU_PREFETCH_FULL",
126};
127
128static void malidp_plane_atomic_print_state(struct drm_printer *p,
129					    const struct drm_plane_state *state)
130{
131	struct malidp_plane_state *ms = to_malidp_plane_state(state);
132
133	drm_printf(p, "\trotmem_size=%u\n", ms->rotmem_size);
134	drm_printf(p, "\tformat_id=%u\n", ms->format);
135	drm_printf(p, "\tn_planes=%u\n", ms->n_planes);
136	drm_printf(p, "\tmmu_prefetch_mode=%s\n",
137		   prefetch_mode_names[ms->mmu_prefetch_mode]);
138	drm_printf(p, "\tmmu_prefetch_pgsize=%d\n", ms->mmu_prefetch_pgsize);
139}
140
141bool malidp_format_mod_supported(struct drm_device *drm,
142				 u32 format, u64 modifier)
143{
144	const struct drm_format_info *info;
145	const u64 *modifiers;
146	struct malidp_drm *malidp = drm_to_malidp(drm);
147	const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
148
149	if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
150		return false;
151
152	/* Some pixel formats are supported without any modifier */
153	if (modifier == DRM_FORMAT_MOD_LINEAR) {
154		/*
155		 * However these pixel formats need to be supported with
156		 * modifiers only
157		 */
158		return !malidp_hw_format_is_afbc_only(format);
159	}
160
161	if (!fourcc_mod_is_vendor(modifier, ARM)) {
162		DRM_ERROR("Unknown modifier (not Arm)\n");
163		return false;
164	}
165
166	if (modifier &
167	    ~DRM_FORMAT_MOD_ARM_AFBC(AFBC_MOD_VALID_BITS)) {
168		DRM_DEBUG_KMS("Unsupported modifiers\n");
169		return false;
170	}
171
172	modifiers = malidp_format_modifiers;
173
174	/* SPLIT buffers must use SPARSE layout */
175	if (WARN_ON_ONCE((modifier & AFBC_SPLIT) && !(modifier & AFBC_SPARSE)))
176		return false;
177
178	/* CBR only applies to YUV formats, where YTR should be always 0 */
179	if (WARN_ON_ONCE((modifier & AFBC_CBR) && (modifier & AFBC_YTR)))
180		return false;
181
182	while (*modifiers != DRM_FORMAT_MOD_INVALID) {
183		if (*modifiers == modifier)
184			break;
185
186		modifiers++;
187	}
188
189	/* return false, if the modifier was not found */
190	if (*modifiers == DRM_FORMAT_MOD_INVALID) {
191		DRM_DEBUG_KMS("Unsupported modifier\n");
192		return false;
193	}
194
195	info = drm_format_info(format);
196
197	if (info->num_planes != 1) {
198		DRM_DEBUG_KMS("AFBC buffers expect one plane\n");
199		return false;
200	}
201
202	if (malidp_hw_format_is_linear_only(format) == true) {
203		DRM_DEBUG_KMS("Given format (0x%x) is supported is linear mode only\n",
204			      format);
205		return false;
206	}
207
208	/*
209	 * RGB formats need to provide YTR modifier and YUV formats should not
210	 * provide YTR modifier.
211	 */
212	if (!(info->is_yuv) != !!(modifier & AFBC_FORMAT_MOD_YTR)) {
213		DRM_DEBUG_KMS("AFBC_FORMAT_MOD_YTR is %s for %s formats\n",
214			      info->is_yuv ? "disallowed" : "mandatory",
215			      info->is_yuv ? "YUV" : "RGB");
216		return false;
217	}
218
219	if (modifier & AFBC_SPLIT) {
220		if (!info->is_yuv) {
221			if (info->cpp[0] <= 2) {
222				DRM_DEBUG_KMS("RGB formats <= 16bpp are not supported with SPLIT\n");
223				return false;
224			}
225		}
226
227		if ((info->hsub != 1) || (info->vsub != 1)) {
228			if (!(format == DRM_FORMAT_YUV420_10BIT &&
229			      (map->features & MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT))) {
230				DRM_DEBUG_KMS("Formats which are sub-sampled should never be split\n");
231				return false;
232			}
233		}
234	}
235
236	if (modifier & AFBC_CBR) {
237		if ((info->hsub == 1) || (info->vsub == 1)) {
238			DRM_DEBUG_KMS("Formats which are not sub-sampled should not have CBR set\n");
239			return false;
240		}
241	}
242
243	return true;
244}
245
246static bool malidp_format_mod_supported_per_plane(struct drm_plane *plane,
247						  u32 format, u64 modifier)
248{
249	return malidp_format_mod_supported(plane->dev, format, modifier);
250}
251
252static const struct drm_plane_funcs malidp_de_plane_funcs = {
253	.update_plane = drm_atomic_helper_update_plane,
254	.disable_plane = drm_atomic_helper_disable_plane,
255	.reset = malidp_plane_reset,
256	.atomic_duplicate_state = malidp_duplicate_plane_state,
257	.atomic_destroy_state = malidp_destroy_plane_state,
258	.atomic_print_state = malidp_plane_atomic_print_state,
259	.format_mod_supported = malidp_format_mod_supported_per_plane,
260};
261
262static int malidp_se_check_scaling(struct malidp_plane *mp,
263				   struct drm_plane_state *state)
264{
265	struct drm_crtc_state *crtc_state =
266		drm_atomic_get_existing_crtc_state(state->state, state->crtc);
267	struct malidp_crtc_state *mc;
268	u32 src_w, src_h;
269	int ret;
270
271	if (!crtc_state)
272		return -EINVAL;
273
274	mc = to_malidp_crtc_state(crtc_state);
275
276	ret = drm_atomic_helper_check_plane_state(state, crtc_state,
277						  0, INT_MAX, true, true);
278	if (ret)
279		return ret;
280
281	if (state->rotation & MALIDP_ROTATED_MASK) {
282		src_w = state->src_h >> 16;
283		src_h = state->src_w >> 16;
284	} else {
285		src_w = state->src_w >> 16;
286		src_h = state->src_h >> 16;
287	}
288
289	if ((state->crtc_w == src_w) && (state->crtc_h == src_h)) {
290		/* Scaling not necessary for this plane. */
291		mc->scaled_planes_mask &= ~(mp->layer->id);
292		return 0;
293	}
294
295	if (mp->layer->id & (DE_SMART | DE_GRAPHICS2))
296		return -EINVAL;
297
298	mc->scaled_planes_mask |= mp->layer->id;
299	/* Defer scaling requirements calculation to the crtc check. */
300	return 0;
301}
302
303static u32 malidp_get_pgsize_bitmap(struct malidp_plane *mp)
304{
305	struct iommu_domain *mmu_dom;
306
307	mmu_dom = iommu_get_domain_for_dev(mp->base.dev->dev);
308	if (mmu_dom)
309		return mmu_dom->pgsize_bitmap;
310
311	return 0;
312}
313
314/*
315 * Check if the framebuffer is entirely made up of pages at least pgsize in
316 * size. Only a heuristic: assumes that each scatterlist entry has been aligned
317 * to the largest page size smaller than its length and that the MMU maps to
318 * the largest page size possible.
319 */
320static bool malidp_check_pages_threshold(struct malidp_plane_state *ms,
321					 u32 pgsize)
322{
323	int i;
324
325	for (i = 0; i < ms->n_planes; i++) {
326		struct drm_gem_object *obj;
327		struct drm_gem_dma_object *dma_obj;
328		struct sg_table *sgt;
329		struct scatterlist *sgl;
330
331		obj = drm_gem_fb_get_obj(ms->base.fb, i);
332		dma_obj = to_drm_gem_dma_obj(obj);
333
334		if (dma_obj->sgt)
335			sgt = dma_obj->sgt;
336		else
337			sgt = obj->funcs->get_sg_table(obj);
338
339		if (IS_ERR(sgt))
340			return false;
341
342		sgl = sgt->sgl;
343
344		while (sgl) {
345			if (sgl->length < pgsize) {
346				if (!dma_obj->sgt)
347					kfree(sgt);
348				return false;
349			}
350
351			sgl = sg_next(sgl);
352		}
353		if (!dma_obj->sgt)
354			kfree(sgt);
355	}
356
357	return true;
358}
359
360/*
361 * Check if it is possible to enable partial-frame MMU prefetch given the
362 * current format, AFBC state and rotation.
363 */
364static bool malidp_partial_prefetch_supported(u32 format, u64 modifier,
365					      unsigned int rotation)
366{
367	bool afbc, sparse;
368
369	/* rotation and horizontal flip not supported for partial prefetch */
370	if (rotation & (DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 |
371			DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X))
372		return false;
373
374	afbc = modifier & DRM_FORMAT_MOD_ARM_AFBC(0);
375	sparse = modifier & AFBC_FORMAT_MOD_SPARSE;
376
377	switch (format) {
378	case DRM_FORMAT_ARGB2101010:
379	case DRM_FORMAT_RGBA1010102:
380	case DRM_FORMAT_BGRA1010102:
381	case DRM_FORMAT_ARGB8888:
382	case DRM_FORMAT_RGBA8888:
383	case DRM_FORMAT_BGRA8888:
384	case DRM_FORMAT_XRGB8888:
385	case DRM_FORMAT_XBGR8888:
386	case DRM_FORMAT_RGBX8888:
387	case DRM_FORMAT_BGRX8888:
388	case DRM_FORMAT_RGB888:
389	case DRM_FORMAT_RGBA5551:
390	case DRM_FORMAT_RGB565:
391		/* always supported */
392		return true;
393
394	case DRM_FORMAT_ABGR2101010:
395	case DRM_FORMAT_ABGR8888:
396	case DRM_FORMAT_ABGR1555:
397	case DRM_FORMAT_BGR565:
398		/* supported, but if AFBC then must be sparse mode */
399		return (!afbc) || (afbc && sparse);
400
401	case DRM_FORMAT_BGR888:
402		/* supported, but not for AFBC */
403		return !afbc;
404
405	case DRM_FORMAT_YUYV:
406	case DRM_FORMAT_UYVY:
407	case DRM_FORMAT_NV12:
408	case DRM_FORMAT_YUV420:
409		/* not supported */
410		return false;
411
412	default:
413		return false;
414	}
415}
416
417/*
418 * Select the preferred MMU prefetch mode. Full-frame prefetch is preferred as
419 * long as the framebuffer is all large pages. Otherwise partial-frame prefetch
420 * is selected as long as it is supported for the current format. The selected
421 * page size for prefetch is returned in pgsize_bitmap.
422 */
423static enum mmu_prefetch_mode malidp_mmu_prefetch_select_mode
424		(struct malidp_plane_state *ms,	u32 *pgsize_bitmap)
425{
426	u32 pgsizes;
427
428	/* get the full-frame prefetch page size(s) supported by the MMU */
429	pgsizes = *pgsize_bitmap & MALIDP_MMU_PREFETCH_FULL_PGSIZES;
430
431	while (pgsizes) {
432		u32 largest_pgsize = 1 << __fls(pgsizes);
433
434		if (malidp_check_pages_threshold(ms, largest_pgsize)) {
435			*pgsize_bitmap = largest_pgsize;
436			return MALIDP_PREFETCH_MODE_FULL;
437		}
438
439		pgsizes -= largest_pgsize;
440	}
441
442	/* get the partial-frame prefetch page size(s) supported by the MMU */
443	pgsizes = *pgsize_bitmap & MALIDP_MMU_PREFETCH_PARTIAL_PGSIZES;
444
445	if (malidp_partial_prefetch_supported(ms->base.fb->format->format,
446					      ms->base.fb->modifier,
447					      ms->base.rotation)) {
448		/* partial prefetch using the smallest page size */
449		*pgsize_bitmap = 1 << __ffs(pgsizes);
450		return MALIDP_PREFETCH_MODE_PARTIAL;
451	}
452	*pgsize_bitmap = 0;
453	return MALIDP_PREFETCH_MODE_NONE;
454}
455
456static u32 malidp_calc_mmu_control_value(enum mmu_prefetch_mode mode,
457					 u8 readahead, u8 n_planes, u32 pgsize)
458{
459	u32 mmu_ctrl = 0;
460
461	if (mode != MALIDP_PREFETCH_MODE_NONE) {
462		mmu_ctrl |= MALIDP_MMU_CTRL_EN;
463
464		if (mode == MALIDP_PREFETCH_MODE_PARTIAL) {
465			mmu_ctrl |= MALIDP_MMU_CTRL_MODE;
466			mmu_ctrl |= MALIDP_MMU_CTRL_PP_NUM_REQ(readahead);
467		}
468
469		if (pgsize == SZ_64K || pgsize == SZ_2M) {
470			int i;
471
472			for (i = 0; i < n_planes; i++)
473				mmu_ctrl |= MALIDP_MMU_CTRL_PX_PS(i);
474		}
475	}
476
477	return mmu_ctrl;
478}
479
480static void malidp_de_prefetch_settings(struct malidp_plane *mp,
481					struct malidp_plane_state *ms)
482{
483	if (!mp->layer->mmu_ctrl_offset)
484		return;
485
486	/* get the page sizes supported by the MMU */
487	ms->mmu_prefetch_pgsize = malidp_get_pgsize_bitmap(mp);
488	ms->mmu_prefetch_mode  =
489		malidp_mmu_prefetch_select_mode(ms, &ms->mmu_prefetch_pgsize);
490}
491
492static int malidp_de_plane_check(struct drm_plane *plane,
493				 struct drm_atomic_state *state)
494{
495	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
496										 plane);
497	struct malidp_plane *mp = to_malidp_plane(plane);
498	struct malidp_plane_state *ms = to_malidp_plane_state(new_plane_state);
499	bool rotated = new_plane_state->rotation & MALIDP_ROTATED_MASK;
500	struct drm_framebuffer *fb;
501	u16 pixel_alpha = new_plane_state->pixel_blend_mode;
502	int i, ret;
503	unsigned int block_w, block_h;
504
505	if (!new_plane_state->crtc || WARN_ON(!new_plane_state->fb))
506		return 0;
507
508	fb = new_plane_state->fb;
509
510	ms->format = malidp_hw_get_format_id(&mp->hwdev->hw->map,
511					     mp->layer->id, fb->format->format,
512					     !!fb->modifier);
513	if (ms->format == MALIDP_INVALID_FORMAT_ID)
514		return -EINVAL;
515
516	ms->n_planes = fb->format->num_planes;
517	for (i = 0; i < ms->n_planes; i++) {
518		u8 alignment = malidp_hw_get_pitch_align(mp->hwdev, rotated);
519
520		if (((fb->pitches[i] * drm_format_info_block_height(fb->format, i))
521				& (alignment - 1)) && !(fb->modifier)) {
522			DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n",
523				      fb->pitches[i], i);
524			return -EINVAL;
525		}
526	}
527
528	block_w = drm_format_info_block_width(fb->format, 0);
529	block_h = drm_format_info_block_height(fb->format, 0);
530	if (fb->width % block_w || fb->height % block_h) {
531		DRM_DEBUG_KMS("Buffer width/height needs to be a multiple of tile sizes");
532		return -EINVAL;
533	}
534	if ((new_plane_state->src_x >> 16) % block_w || (new_plane_state->src_y >> 16) % block_h) {
535		DRM_DEBUG_KMS("Plane src_x/src_y needs to be a multiple of tile sizes");
536		return -EINVAL;
537	}
538
539	if ((new_plane_state->crtc_w > mp->hwdev->max_line_size) ||
540	    (new_plane_state->crtc_h > mp->hwdev->max_line_size) ||
541	    (new_plane_state->crtc_w < mp->hwdev->min_line_size) ||
542	    (new_plane_state->crtc_h < mp->hwdev->min_line_size))
543		return -EINVAL;
544
545	/*
546	 * DP550/650 video layers can accept 3 plane formats only if
547	 * fb->pitches[1] == fb->pitches[2] since they don't have a
548	 * third plane stride register.
549	 */
550	if (ms->n_planes == 3 &&
551	    !(mp->hwdev->hw->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) &&
552	    (new_plane_state->fb->pitches[1] != new_plane_state->fb->pitches[2]))
553		return -EINVAL;
554
555	ret = malidp_se_check_scaling(mp, new_plane_state);
556	if (ret)
557		return ret;
558
559	/* validate the rotation constraints for each layer */
560	if (new_plane_state->rotation != DRM_MODE_ROTATE_0) {
561		if (mp->layer->rot == ROTATE_NONE)
562			return -EINVAL;
563		if ((mp->layer->rot == ROTATE_COMPRESSED) && !(fb->modifier))
564			return -EINVAL;
565		/*
566		 * packed RGB888 / BGR888 can't be rotated or flipped
567		 * unless they are stored in a compressed way
568		 */
569		if ((fb->format->format == DRM_FORMAT_RGB888 ||
570		     fb->format->format == DRM_FORMAT_BGR888) && !(fb->modifier))
571			return -EINVAL;
572	}
573
574	/* SMART layer does not support AFBC */
575	if (mp->layer->id == DE_SMART && fb->modifier) {
576		DRM_ERROR("AFBC framebuffer not supported in SMART layer");
577		return -EINVAL;
578	}
579
580	ms->rotmem_size = 0;
581	if (new_plane_state->rotation & MALIDP_ROTATED_MASK) {
582		int val;
583
584		val = mp->hwdev->hw->rotmem_required(mp->hwdev, new_plane_state->crtc_w,
585						     new_plane_state->crtc_h,
586						     fb->format->format,
587						     !!(fb->modifier));
588		if (val < 0)
589			return val;
590
591		ms->rotmem_size = val;
592	}
593
594	/* HW can't support plane + pixel blending */
595	if ((new_plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE) &&
596	    (pixel_alpha != DRM_MODE_BLEND_PIXEL_NONE) &&
597	    fb->format->has_alpha)
598		return -EINVAL;
599
600	malidp_de_prefetch_settings(mp, ms);
601
602	return 0;
603}
604
605static void malidp_de_set_plane_pitches(struct malidp_plane *mp,
606					int num_planes, unsigned int pitches[3])
607{
608	int i;
609	int num_strides = num_planes;
610
611	if (!mp->layer->stride_offset)
612		return;
613
614	if (num_planes == 3)
615		num_strides = (mp->hwdev->hw->features &
616			       MALIDP_DEVICE_LV_HAS_3_STRIDES) ? 3 : 2;
617
618	/*
619	 * The drm convention for pitch is that it needs to cover width * cpp,
620	 * but our hardware wants the pitch/stride to cover all rows included
621	 * in a tile.
622	 */
623	for (i = 0; i < num_strides; ++i) {
624		unsigned int block_h = drm_format_info_block_height(mp->base.state->fb->format, i);
625
626		malidp_hw_write(mp->hwdev, pitches[i] * block_h,
627				mp->layer->base +
628				mp->layer->stride_offset + i * 4);
629	}
630}
631
632static const s16
633malidp_yuv2rgb_coeffs[][DRM_COLOR_RANGE_MAX][MALIDP_COLORADJ_NUM_COEFFS] = {
634	[DRM_COLOR_YCBCR_BT601][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
635		1192,    0, 1634,
636		1192, -401, -832,
637		1192, 2066,    0,
638		  64,  512,  512
639	},
640	[DRM_COLOR_YCBCR_BT601][DRM_COLOR_YCBCR_FULL_RANGE] = {
641		1024,    0, 1436,
642		1024, -352, -731,
643		1024, 1815,    0,
644		   0,  512,  512
645	},
646	[DRM_COLOR_YCBCR_BT709][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
647		1192,    0, 1836,
648		1192, -218, -546,
649		1192, 2163,    0,
650		  64,  512,  512
651	},
652	[DRM_COLOR_YCBCR_BT709][DRM_COLOR_YCBCR_FULL_RANGE] = {
653		1024,    0, 1613,
654		1024, -192, -479,
655		1024, 1900,    0,
656		   0,  512,  512
657	},
658	[DRM_COLOR_YCBCR_BT2020][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
659		1024,    0, 1476,
660		1024, -165, -572,
661		1024, 1884,    0,
662		   0,  512,  512
663	},
664	[DRM_COLOR_YCBCR_BT2020][DRM_COLOR_YCBCR_FULL_RANGE] = {
665		1024,    0, 1510,
666		1024, -168, -585,
667		1024, 1927,    0,
668		   0,  512,  512
669	}
670};
671
672static void malidp_de_set_color_encoding(struct malidp_plane *plane,
673					 enum drm_color_encoding enc,
674					 enum drm_color_range range)
675{
676	unsigned int i;
677
678	for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
679		/* coefficients are signed, two's complement values */
680		malidp_hw_write(plane->hwdev, malidp_yuv2rgb_coeffs[enc][range][i],
681				plane->layer->base + plane->layer->yuv2rgb_offset +
682				i * 4);
683	}
684}
685
686static void malidp_de_set_mmu_control(struct malidp_plane *mp,
687				      struct malidp_plane_state *ms)
688{
689	u32 mmu_ctrl;
690
691	/* check hardware supports MMU prefetch */
692	if (!mp->layer->mmu_ctrl_offset)
693		return;
694
695	mmu_ctrl = malidp_calc_mmu_control_value(ms->mmu_prefetch_mode,
696						 MALIDP_MMU_PREFETCH_READAHEAD,
697						 ms->n_planes,
698						 ms->mmu_prefetch_pgsize);
699
700	malidp_hw_write(mp->hwdev, mmu_ctrl,
701			mp->layer->base + mp->layer->mmu_ctrl_offset);
702}
703
704static void malidp_set_plane_base_addr(struct drm_framebuffer *fb,
705				       struct malidp_plane *mp,
706				       int plane_index)
707{
708	dma_addr_t dma_addr;
709	u16 ptr;
710	struct drm_plane *plane = &mp->base;
711	bool afbc = fb->modifier ? true : false;
712
713	ptr = mp->layer->ptr + (plane_index << 4);
714
715	/*
716	 * drm_fb_dma_get_gem_addr() alters the physical base address of the
717	 * framebuffer as per the plane's src_x, src_y co-ordinates (ie to
718	 * take care of source cropping).
719	 * For AFBC, this is not needed as the cropping is handled by _AD_CROP_H
720	 * and _AD_CROP_V registers.
721	 */
722	if (!afbc) {
723		dma_addr = drm_fb_dma_get_gem_addr(fb, plane->state,
724						   plane_index);
725	} else {
726		struct drm_gem_dma_object *obj;
727
728		obj = drm_fb_dma_get_gem_obj(fb, plane_index);
729
730		if (WARN_ON(!obj))
731			return;
732		dma_addr = obj->dma_addr;
733	}
734
735	malidp_hw_write(mp->hwdev, lower_32_bits(dma_addr), ptr);
736	malidp_hw_write(mp->hwdev, upper_32_bits(dma_addr), ptr + 4);
737}
738
739static void malidp_de_set_plane_afbc(struct drm_plane *plane)
740{
741	struct malidp_plane *mp;
742	u32 src_w, src_h, val = 0, src_x, src_y;
743	struct drm_framebuffer *fb = plane->state->fb;
744
745	mp = to_malidp_plane(plane);
746
747	/* no afbc_decoder_offset means AFBC is not supported on this plane */
748	if (!mp->layer->afbc_decoder_offset)
749		return;
750
751	if (!fb->modifier) {
752		malidp_hw_write(mp->hwdev, 0, mp->layer->afbc_decoder_offset);
753		return;
754	}
755
756	/* convert src values from Q16 fixed point to integer */
757	src_w = plane->state->src_w >> 16;
758	src_h = plane->state->src_h >> 16;
759	src_x = plane->state->src_x >> 16;
760	src_y = plane->state->src_y >> 16;
761
762	val = ((fb->width - (src_x + src_w)) << MALIDP_AD_CROP_RIGHT_OFFSET) |
763		   src_x;
764	malidp_hw_write(mp->hwdev, val,
765			mp->layer->afbc_decoder_offset + MALIDP_AD_CROP_H);
766
767	val = ((fb->height - (src_y + src_h)) << MALIDP_AD_CROP_BOTTOM_OFFSET) |
768		   src_y;
769	malidp_hw_write(mp->hwdev, val,
770			mp->layer->afbc_decoder_offset + MALIDP_AD_CROP_V);
771
772	val = MALIDP_AD_EN;
773	if (fb->modifier & AFBC_FORMAT_MOD_SPLIT)
774		val |= MALIDP_AD_BS;
775	if (fb->modifier & AFBC_FORMAT_MOD_YTR)
776		val |= MALIDP_AD_YTR;
777
778	malidp_hw_write(mp->hwdev, val, mp->layer->afbc_decoder_offset);
779}
780
781static void malidp_de_plane_update(struct drm_plane *plane,
782				   struct drm_atomic_state *state)
783{
784	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
785									   plane);
786	struct malidp_plane *mp;
787	struct malidp_plane_state *ms = to_malidp_plane_state(plane->state);
788	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
789									   plane);
790	u16 pixel_alpha = new_state->pixel_blend_mode;
791	u8 plane_alpha = new_state->alpha >> 8;
792	u32 src_w, src_h, dest_w, dest_h, val;
793	int i;
794	struct drm_framebuffer *fb = plane->state->fb;
795
796	mp = to_malidp_plane(plane);
797
798	/*
799	 * For AFBC framebuffer, use the framebuffer width and height for
800	 * configuring layer input size register.
801	 */
802	if (fb->modifier) {
803		src_w = fb->width;
804		src_h = fb->height;
805	} else {
806		/* convert src values from Q16 fixed point to integer */
807		src_w = new_state->src_w >> 16;
808		src_h = new_state->src_h >> 16;
809	}
810
811	dest_w = new_state->crtc_w;
812	dest_h = new_state->crtc_h;
813
814	val = malidp_hw_read(mp->hwdev, mp->layer->base);
815	val = (val & ~LAYER_FORMAT_MASK) | ms->format;
816	malidp_hw_write(mp->hwdev, val, mp->layer->base);
817
818	for (i = 0; i < ms->n_planes; i++)
819		malidp_set_plane_base_addr(fb, mp, i);
820
821	malidp_de_set_mmu_control(mp, ms);
822
823	malidp_de_set_plane_pitches(mp, ms->n_planes,
824				    new_state->fb->pitches);
825
826	if ((plane->state->color_encoding != old_state->color_encoding) ||
827	    (plane->state->color_range != old_state->color_range))
828		malidp_de_set_color_encoding(mp, plane->state->color_encoding,
829					     plane->state->color_range);
830
831	malidp_hw_write(mp->hwdev, LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
832			mp->layer->base + MALIDP_LAYER_SIZE);
833
834	malidp_hw_write(mp->hwdev, LAYER_H_VAL(dest_w) | LAYER_V_VAL(dest_h),
835			mp->layer->base + MALIDP_LAYER_COMP_SIZE);
836
837	malidp_hw_write(mp->hwdev, LAYER_H_VAL(new_state->crtc_x) |
838			LAYER_V_VAL(new_state->crtc_y),
839			mp->layer->base + MALIDP_LAYER_OFFSET);
840
841	if (mp->layer->id == DE_SMART) {
842		/*
843		 * Enable the first rectangle in the SMART layer to be
844		 * able to use it as a drm plane.
845		 */
846		malidp_hw_write(mp->hwdev, 1,
847				mp->layer->base + MALIDP550_LS_ENABLE);
848		malidp_hw_write(mp->hwdev,
849				LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
850				mp->layer->base + MALIDP550_LS_R1_IN_SIZE);
851	}
852
853	malidp_de_set_plane_afbc(plane);
854
855	/* first clear the rotation bits */
856	val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL);
857	val &= ~LAYER_ROT_MASK;
858
859	/* setup the rotation and axis flip bits */
860	if (new_state->rotation & DRM_MODE_ROTATE_MASK)
861		val |= ilog2(plane->state->rotation & DRM_MODE_ROTATE_MASK) <<
862		       LAYER_ROT_OFFSET;
863	if (new_state->rotation & DRM_MODE_REFLECT_X)
864		val |= LAYER_H_FLIP;
865	if (new_state->rotation & DRM_MODE_REFLECT_Y)
866		val |= LAYER_V_FLIP;
867
868	val &= ~(LAYER_COMP_MASK | LAYER_PMUL_ENABLE | LAYER_ALPHA(0xff));
869
870	if (new_state->alpha != DRM_BLEND_ALPHA_OPAQUE) {
871		val |= LAYER_COMP_PLANE;
872	} else if (new_state->fb->format->has_alpha) {
873		/* We only care about blend mode if the format has alpha */
874		switch (pixel_alpha) {
875		case DRM_MODE_BLEND_PREMULTI:
876			val |= LAYER_COMP_PIXEL | LAYER_PMUL_ENABLE;
877			break;
878		case DRM_MODE_BLEND_COVERAGE:
879			val |= LAYER_COMP_PIXEL;
880			break;
881		}
882	}
883	val |= LAYER_ALPHA(plane_alpha);
884
885	val &= ~LAYER_FLOWCFG(LAYER_FLOWCFG_MASK);
886	if (new_state->crtc) {
887		struct malidp_crtc_state *m =
888			to_malidp_crtc_state(new_state->crtc->state);
889
890		if (m->scaler_config.scale_enable &&
891		    m->scaler_config.plane_src_id == mp->layer->id)
892			val |= LAYER_FLOWCFG(LAYER_FLOWCFG_SCALE_SE);
893	}
894
895	/* set the 'enable layer' bit */
896	val |= LAYER_ENABLE;
897
898	malidp_hw_write(mp->hwdev, val,
899			mp->layer->base + MALIDP_LAYER_CONTROL);
900}
901
902static void malidp_de_plane_disable(struct drm_plane *plane,
903				    struct drm_atomic_state *state)
904{
905	struct malidp_plane *mp = to_malidp_plane(plane);
906
907	malidp_hw_clearbits(mp->hwdev,
908			    LAYER_ENABLE | LAYER_FLOWCFG(LAYER_FLOWCFG_MASK),
909			    mp->layer->base + MALIDP_LAYER_CONTROL);
910}
911
912static const struct drm_plane_helper_funcs malidp_de_plane_helper_funcs = {
913	.atomic_check = malidp_de_plane_check,
914	.atomic_update = malidp_de_plane_update,
915	.atomic_disable = malidp_de_plane_disable,
916};
917
918static const uint64_t linear_only_modifiers[] = {
919	DRM_FORMAT_MOD_LINEAR,
920	DRM_FORMAT_MOD_INVALID
921};
922
923int malidp_de_planes_init(struct drm_device *drm)
924{
925	struct malidp_drm *malidp = drm_to_malidp(drm);
926	const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
927	struct malidp_plane *plane = NULL;
928	enum drm_plane_type plane_type;
929	unsigned long crtcs = BIT(drm->mode_config.num_crtc);
930	unsigned long flags = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 |
931			      DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
932	unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
933				  BIT(DRM_MODE_BLEND_PREMULTI)   |
934				  BIT(DRM_MODE_BLEND_COVERAGE);
935	u32 *formats;
936	int ret, i = 0, j = 0, n;
937	u64 supported_modifiers[MODIFIERS_COUNT_MAX];
938	const u64 *modifiers;
939
940	modifiers = malidp_format_modifiers;
941
942	if (!(map->features & MALIDP_DEVICE_AFBC_SUPPORT_SPLIT)) {
943		/*
944		 * Since our hardware does not support SPLIT, so build the list
945		 * of supported modifiers excluding SPLIT ones.
946		 */
947		while (*modifiers != DRM_FORMAT_MOD_INVALID) {
948			if (!(*modifiers & AFBC_SPLIT))
949				supported_modifiers[j++] = *modifiers;
950
951			modifiers++;
952		}
953		supported_modifiers[j++] = DRM_FORMAT_MOD_INVALID;
954		modifiers = supported_modifiers;
955	}
956
957	formats = kcalloc(map->n_pixel_formats, sizeof(*formats), GFP_KERNEL);
958	if (!formats) {
959		ret = -ENOMEM;
960		goto cleanup;
961	}
962
963	for (i = 0; i < map->n_layers; i++) {
964		u8 id = map->layers[i].id;
965
966		/* build the list of DRM supported formats based on the map */
967		for (n = 0, j = 0;  j < map->n_pixel_formats; j++) {
968			if ((map->pixel_formats[j].layer & id) == id)
969				formats[n++] = map->pixel_formats[j].format;
970		}
971
972		plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY :
973					DRM_PLANE_TYPE_OVERLAY;
974
975		/*
976		 * All the layers except smart layer supports AFBC modifiers.
977		 */
978		plane = drmm_universal_plane_alloc(drm, struct malidp_plane, base,
979						   crtcs, &malidp_de_plane_funcs, formats, n,
980						   (id == DE_SMART) ? linear_only_modifiers :
981						   modifiers, plane_type, NULL);
982		if (IS_ERR(plane)) {
983			ret = PTR_ERR(plane);
984			goto cleanup;
985		}
986
987		drm_plane_helper_add(&plane->base,
988				     &malidp_de_plane_helper_funcs);
989		plane->hwdev = malidp->dev;
990		plane->layer = &map->layers[i];
991
992		drm_plane_create_alpha_property(&plane->base);
993		drm_plane_create_blend_mode_property(&plane->base, blend_caps);
994
995		if (id == DE_SMART) {
996			/* Skip the features which the SMART layer doesn't have. */
997			continue;
998		}
999
1000		drm_plane_create_rotation_property(&plane->base, DRM_MODE_ROTATE_0, flags);
1001		malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT,
1002				plane->layer->base + MALIDP_LAYER_COMPOSE);
1003
1004		/* Attach the YUV->RGB property only to video layers */
1005		if (id & (DE_VIDEO1 | DE_VIDEO2)) {
1006			/* default encoding for YUV->RGB is BT601 NARROW */
1007			enum drm_color_encoding enc = DRM_COLOR_YCBCR_BT601;
1008			enum drm_color_range range = DRM_COLOR_YCBCR_LIMITED_RANGE;
1009
1010			ret = drm_plane_create_color_properties(&plane->base,
1011					BIT(DRM_COLOR_YCBCR_BT601) | \
1012					BIT(DRM_COLOR_YCBCR_BT709) | \
1013					BIT(DRM_COLOR_YCBCR_BT2020),
1014					BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | \
1015					BIT(DRM_COLOR_YCBCR_FULL_RANGE),
1016					enc, range);
1017			if (!ret)
1018				/* program the HW registers */
1019				malidp_de_set_color_encoding(plane, enc, range);
1020			else
1021				DRM_WARN("Failed to create video layer %d color properties\n", id);
1022		}
1023	}
1024
1025	kfree(formats);
1026
1027	return 0;
1028
1029cleanup:
1030	kfree(formats);
1031
1032	return ret;
1033}
1034