1/*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 *	Eric Anholt <eric@anholt.net>
25 */
26
27#include <linux/i2c.h>
28#include <linux/input.h>
29#include <linux/intel-iommu.h>
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/dma-resv.h>
33#include <linux/slab.h>
34
35#include <drm/drm_atomic.h>
36#include <drm/drm_atomic_helper.h>
37#include <drm/drm_atomic_uapi.h>
38#include <drm/drm_damage_helper.h>
39#include <drm/drm_dp_helper.h>
40#include <drm/drm_edid.h>
41#include <drm/drm_fourcc.h>
42#include <drm/drm_plane_helper.h>
43#include <drm/drm_probe_helper.h>
44#include <drm/drm_rect.h>
45
46#include "display/intel_crt.h"
47#include "display/intel_ddi.h"
48#include "display/intel_dp.h"
49#include "display/intel_dp_mst.h"
50#include "display/intel_dpll_mgr.h"
51#include "display/intel_dsi.h"
52#include "display/intel_dvo.h"
53#include "display/intel_gmbus.h"
54#include "display/intel_hdmi.h"
55#include "display/intel_lvds.h"
56#include "display/intel_sdvo.h"
57#include "display/intel_tv.h"
58#include "display/intel_vdsc.h"
59
60#include "gt/intel_rps.h"
61
62#include "i915_drv.h"
63#include "i915_trace.h"
64#include "intel_acpi.h"
65#include "intel_atomic.h"
66#include "intel_atomic_plane.h"
67#include "intel_bw.h"
68#include "intel_cdclk.h"
69#include "intel_color.h"
70#include "intel_csr.h"
71#include "intel_display_types.h"
72#include "intel_dp_link_training.h"
73#include "intel_fbc.h"
74#include "intel_fbdev.h"
75#include "intel_fifo_underrun.h"
76#include "intel_frontbuffer.h"
77#include "intel_hdcp.h"
78#include "intel_hotplug.h"
79#include "intel_overlay.h"
80#include "intel_pipe_crc.h"
81#include "intel_pm.h"
82#include "intel_psr.h"
83#include "intel_quirks.h"
84#include "intel_sideband.h"
85#include "intel_sprite.h"
86#include "intel_tc.h"
87#include "intel_vga.h"
88
89/* Primary plane formats for gen <= 3 */
90static const u32 i8xx_primary_formats[] = {
91	DRM_FORMAT_C8,
92	DRM_FORMAT_XRGB1555,
93	DRM_FORMAT_RGB565,
94	DRM_FORMAT_XRGB8888,
95};
96
97/* Primary plane formats for ivb (no fp16 due to hw issue) */
98static const u32 ivb_primary_formats[] = {
99	DRM_FORMAT_C8,
100	DRM_FORMAT_RGB565,
101	DRM_FORMAT_XRGB8888,
102	DRM_FORMAT_XBGR8888,
103	DRM_FORMAT_XRGB2101010,
104	DRM_FORMAT_XBGR2101010,
105};
106
107/* Primary plane formats for gen >= 4, except ivb */
108static const u32 i965_primary_formats[] = {
109	DRM_FORMAT_C8,
110	DRM_FORMAT_RGB565,
111	DRM_FORMAT_XRGB8888,
112	DRM_FORMAT_XBGR8888,
113	DRM_FORMAT_XRGB2101010,
114	DRM_FORMAT_XBGR2101010,
115	DRM_FORMAT_XBGR16161616F,
116};
117
118/* Primary plane formats for vlv/chv */
119static const u32 vlv_primary_formats[] = {
120	DRM_FORMAT_C8,
121	DRM_FORMAT_RGB565,
122	DRM_FORMAT_XRGB8888,
123	DRM_FORMAT_XBGR8888,
124	DRM_FORMAT_ARGB8888,
125	DRM_FORMAT_ABGR8888,
126	DRM_FORMAT_XRGB2101010,
127	DRM_FORMAT_XBGR2101010,
128	DRM_FORMAT_ARGB2101010,
129	DRM_FORMAT_ABGR2101010,
130	DRM_FORMAT_XBGR16161616F,
131};
132
133static const u64 i9xx_format_modifiers[] = {
134	I915_FORMAT_MOD_X_TILED,
135	DRM_FORMAT_MOD_LINEAR,
136	DRM_FORMAT_MOD_INVALID
137};
138
139/* Cursor formats */
140static const u32 intel_cursor_formats[] = {
141	DRM_FORMAT_ARGB8888,
142};
143
144static const u64 cursor_format_modifiers[] = {
145	DRM_FORMAT_MOD_LINEAR,
146	DRM_FORMAT_MOD_INVALID
147};
148
149static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
150				struct intel_crtc_state *pipe_config);
151static void ilk_pch_clock_get(struct intel_crtc *crtc,
152			      struct intel_crtc_state *pipe_config);
153
154static int intel_framebuffer_init(struct intel_framebuffer *ifb,
155				  struct drm_i915_gem_object *obj,
156				  struct drm_mode_fb_cmd2 *mode_cmd);
157static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
158static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
159static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
160					 const struct intel_link_m_n *m_n,
161					 const struct intel_link_m_n *m2_n2);
162static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
163static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
164static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
165static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
166static void vlv_prepare_pll(struct intel_crtc *crtc,
167			    const struct intel_crtc_state *pipe_config);
168static void chv_prepare_pll(struct intel_crtc *crtc,
169			    const struct intel_crtc_state *pipe_config);
170static void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
171static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
172static void intel_modeset_setup_hw_state(struct drm_device *dev,
173					 struct drm_modeset_acquire_ctx *ctx);
174static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
175
176struct intel_limit {
177	struct {
178		int min, max;
179	} dot, vco, n, m, m1, m2, p, p1;
180
181	struct {
182		int dot_limit;
183		int p2_slow, p2_fast;
184	} p2;
185};
186
187/* returns HPLL frequency in kHz */
188int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
189{
190	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
191
192	/* Obtain SKU information */
193	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
194		CCK_FUSE_HPLL_FREQ_MASK;
195
196	return vco_freq[hpll_freq] * 1000;
197}
198
199int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
200		      const char *name, u32 reg, int ref_freq)
201{
202	u32 val;
203	int divider;
204
205	val = vlv_cck_read(dev_priv, reg);
206	divider = val & CCK_FREQUENCY_VALUES;
207
208	drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
209		 (divider << CCK_FREQUENCY_STATUS_SHIFT),
210		 "%s change in progress\n", name);
211
212	return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
213}
214
215int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
216			   const char *name, u32 reg)
217{
218	int hpll;
219
220	vlv_cck_get(dev_priv);
221
222	if (dev_priv->hpll_freq == 0)
223		dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
224
225	hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
226
227	vlv_cck_put(dev_priv);
228
229	return hpll;
230}
231
232static void intel_update_czclk(struct drm_i915_private *dev_priv)
233{
234	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
235		return;
236
237	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
238						      CCK_CZ_CLOCK_CONTROL);
239
240	drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
241		dev_priv->czclk_freq);
242}
243
244/* units of 100MHz */
245static u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
246			       const struct intel_crtc_state *pipe_config)
247{
248	if (HAS_DDI(dev_priv))
249		return pipe_config->port_clock; /* SPLL */
250	else
251		return dev_priv->fdi_pll_freq;
252}
253
254static const struct intel_limit intel_limits_i8xx_dac = {
255	.dot = { .min = 25000, .max = 350000 },
256	.vco = { .min = 908000, .max = 1512000 },
257	.n = { .min = 2, .max = 16 },
258	.m = { .min = 96, .max = 140 },
259	.m1 = { .min = 18, .max = 26 },
260	.m2 = { .min = 6, .max = 16 },
261	.p = { .min = 4, .max = 128 },
262	.p1 = { .min = 2, .max = 33 },
263	.p2 = { .dot_limit = 165000,
264		.p2_slow = 4, .p2_fast = 2 },
265};
266
267static const struct intel_limit intel_limits_i8xx_dvo = {
268	.dot = { .min = 25000, .max = 350000 },
269	.vco = { .min = 908000, .max = 1512000 },
270	.n = { .min = 2, .max = 16 },
271	.m = { .min = 96, .max = 140 },
272	.m1 = { .min = 18, .max = 26 },
273	.m2 = { .min = 6, .max = 16 },
274	.p = { .min = 4, .max = 128 },
275	.p1 = { .min = 2, .max = 33 },
276	.p2 = { .dot_limit = 165000,
277		.p2_slow = 4, .p2_fast = 4 },
278};
279
280static const struct intel_limit intel_limits_i8xx_lvds = {
281	.dot = { .min = 25000, .max = 350000 },
282	.vco = { .min = 908000, .max = 1512000 },
283	.n = { .min = 2, .max = 16 },
284	.m = { .min = 96, .max = 140 },
285	.m1 = { .min = 18, .max = 26 },
286	.m2 = { .min = 6, .max = 16 },
287	.p = { .min = 4, .max = 128 },
288	.p1 = { .min = 1, .max = 6 },
289	.p2 = { .dot_limit = 165000,
290		.p2_slow = 14, .p2_fast = 7 },
291};
292
293static const struct intel_limit intel_limits_i9xx_sdvo = {
294	.dot = { .min = 20000, .max = 400000 },
295	.vco = { .min = 1400000, .max = 2800000 },
296	.n = { .min = 1, .max = 6 },
297	.m = { .min = 70, .max = 120 },
298	.m1 = { .min = 8, .max = 18 },
299	.m2 = { .min = 3, .max = 7 },
300	.p = { .min = 5, .max = 80 },
301	.p1 = { .min = 1, .max = 8 },
302	.p2 = { .dot_limit = 200000,
303		.p2_slow = 10, .p2_fast = 5 },
304};
305
306static const struct intel_limit intel_limits_i9xx_lvds = {
307	.dot = { .min = 20000, .max = 400000 },
308	.vco = { .min = 1400000, .max = 2800000 },
309	.n = { .min = 1, .max = 6 },
310	.m = { .min = 70, .max = 120 },
311	.m1 = { .min = 8, .max = 18 },
312	.m2 = { .min = 3, .max = 7 },
313	.p = { .min = 7, .max = 98 },
314	.p1 = { .min = 1, .max = 8 },
315	.p2 = { .dot_limit = 112000,
316		.p2_slow = 14, .p2_fast = 7 },
317};
318
319
320static const struct intel_limit intel_limits_g4x_sdvo = {
321	.dot = { .min = 25000, .max = 270000 },
322	.vco = { .min = 1750000, .max = 3500000},
323	.n = { .min = 1, .max = 4 },
324	.m = { .min = 104, .max = 138 },
325	.m1 = { .min = 17, .max = 23 },
326	.m2 = { .min = 5, .max = 11 },
327	.p = { .min = 10, .max = 30 },
328	.p1 = { .min = 1, .max = 3},
329	.p2 = { .dot_limit = 270000,
330		.p2_slow = 10,
331		.p2_fast = 10
332	},
333};
334
335static const struct intel_limit intel_limits_g4x_hdmi = {
336	.dot = { .min = 22000, .max = 400000 },
337	.vco = { .min = 1750000, .max = 3500000},
338	.n = { .min = 1, .max = 4 },
339	.m = { .min = 104, .max = 138 },
340	.m1 = { .min = 16, .max = 23 },
341	.m2 = { .min = 5, .max = 11 },
342	.p = { .min = 5, .max = 80 },
343	.p1 = { .min = 1, .max = 8},
344	.p2 = { .dot_limit = 165000,
345		.p2_slow = 10, .p2_fast = 5 },
346};
347
348static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
349	.dot = { .min = 20000, .max = 115000 },
350	.vco = { .min = 1750000, .max = 3500000 },
351	.n = { .min = 1, .max = 3 },
352	.m = { .min = 104, .max = 138 },
353	.m1 = { .min = 17, .max = 23 },
354	.m2 = { .min = 5, .max = 11 },
355	.p = { .min = 28, .max = 112 },
356	.p1 = { .min = 2, .max = 8 },
357	.p2 = { .dot_limit = 0,
358		.p2_slow = 14, .p2_fast = 14
359	},
360};
361
362static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
363	.dot = { .min = 80000, .max = 224000 },
364	.vco = { .min = 1750000, .max = 3500000 },
365	.n = { .min = 1, .max = 3 },
366	.m = { .min = 104, .max = 138 },
367	.m1 = { .min = 17, .max = 23 },
368	.m2 = { .min = 5, .max = 11 },
369	.p = { .min = 14, .max = 42 },
370	.p1 = { .min = 2, .max = 6 },
371	.p2 = { .dot_limit = 0,
372		.p2_slow = 7, .p2_fast = 7
373	},
374};
375
376static const struct intel_limit pnv_limits_sdvo = {
377	.dot = { .min = 20000, .max = 400000},
378	.vco = { .min = 1700000, .max = 3500000 },
379	/* Pineview's Ncounter is a ring counter */
380	.n = { .min = 3, .max = 6 },
381	.m = { .min = 2, .max = 256 },
382	/* Pineview only has one combined m divider, which we treat as m2. */
383	.m1 = { .min = 0, .max = 0 },
384	.m2 = { .min = 0, .max = 254 },
385	.p = { .min = 5, .max = 80 },
386	.p1 = { .min = 1, .max = 8 },
387	.p2 = { .dot_limit = 200000,
388		.p2_slow = 10, .p2_fast = 5 },
389};
390
391static const struct intel_limit pnv_limits_lvds = {
392	.dot = { .min = 20000, .max = 400000 },
393	.vco = { .min = 1700000, .max = 3500000 },
394	.n = { .min = 3, .max = 6 },
395	.m = { .min = 2, .max = 256 },
396	.m1 = { .min = 0, .max = 0 },
397	.m2 = { .min = 0, .max = 254 },
398	.p = { .min = 7, .max = 112 },
399	.p1 = { .min = 1, .max = 8 },
400	.p2 = { .dot_limit = 112000,
401		.p2_slow = 14, .p2_fast = 14 },
402};
403
404/* Ironlake / Sandybridge
405 *
406 * We calculate clock using (register_value + 2) for N/M1/M2, so here
407 * the range value for them is (actual_value - 2).
408 */
409static const struct intel_limit ilk_limits_dac = {
410	.dot = { .min = 25000, .max = 350000 },
411	.vco = { .min = 1760000, .max = 3510000 },
412	.n = { .min = 1, .max = 5 },
413	.m = { .min = 79, .max = 127 },
414	.m1 = { .min = 12, .max = 22 },
415	.m2 = { .min = 5, .max = 9 },
416	.p = { .min = 5, .max = 80 },
417	.p1 = { .min = 1, .max = 8 },
418	.p2 = { .dot_limit = 225000,
419		.p2_slow = 10, .p2_fast = 5 },
420};
421
422static const struct intel_limit ilk_limits_single_lvds = {
423	.dot = { .min = 25000, .max = 350000 },
424	.vco = { .min = 1760000, .max = 3510000 },
425	.n = { .min = 1, .max = 3 },
426	.m = { .min = 79, .max = 118 },
427	.m1 = { .min = 12, .max = 22 },
428	.m2 = { .min = 5, .max = 9 },
429	.p = { .min = 28, .max = 112 },
430	.p1 = { .min = 2, .max = 8 },
431	.p2 = { .dot_limit = 225000,
432		.p2_slow = 14, .p2_fast = 14 },
433};
434
435static const struct intel_limit ilk_limits_dual_lvds = {
436	.dot = { .min = 25000, .max = 350000 },
437	.vco = { .min = 1760000, .max = 3510000 },
438	.n = { .min = 1, .max = 3 },
439	.m = { .min = 79, .max = 127 },
440	.m1 = { .min = 12, .max = 22 },
441	.m2 = { .min = 5, .max = 9 },
442	.p = { .min = 14, .max = 56 },
443	.p1 = { .min = 2, .max = 8 },
444	.p2 = { .dot_limit = 225000,
445		.p2_slow = 7, .p2_fast = 7 },
446};
447
448/* LVDS 100mhz refclk limits. */
449static const struct intel_limit ilk_limits_single_lvds_100m = {
450	.dot = { .min = 25000, .max = 350000 },
451	.vco = { .min = 1760000, .max = 3510000 },
452	.n = { .min = 1, .max = 2 },
453	.m = { .min = 79, .max = 126 },
454	.m1 = { .min = 12, .max = 22 },
455	.m2 = { .min = 5, .max = 9 },
456	.p = { .min = 28, .max = 112 },
457	.p1 = { .min = 2, .max = 8 },
458	.p2 = { .dot_limit = 225000,
459		.p2_slow = 14, .p2_fast = 14 },
460};
461
462static const struct intel_limit ilk_limits_dual_lvds_100m = {
463	.dot = { .min = 25000, .max = 350000 },
464	.vco = { .min = 1760000, .max = 3510000 },
465	.n = { .min = 1, .max = 3 },
466	.m = { .min = 79, .max = 126 },
467	.m1 = { .min = 12, .max = 22 },
468	.m2 = { .min = 5, .max = 9 },
469	.p = { .min = 14, .max = 42 },
470	.p1 = { .min = 2, .max = 6 },
471	.p2 = { .dot_limit = 225000,
472		.p2_slow = 7, .p2_fast = 7 },
473};
474
475static const struct intel_limit intel_limits_vlv = {
476	 /*
477	  * These are the data rate limits (measured in fast clocks)
478	  * since those are the strictest limits we have. The fast
479	  * clock and actual rate limits are more relaxed, so checking
480	  * them would make no difference.
481	  */
482	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
483	.vco = { .min = 4000000, .max = 6000000 },
484	.n = { .min = 1, .max = 7 },
485	.m1 = { .min = 2, .max = 3 },
486	.m2 = { .min = 11, .max = 156 },
487	.p1 = { .min = 2, .max = 3 },
488	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
489};
490
491static const struct intel_limit intel_limits_chv = {
492	/*
493	 * These are the data rate limits (measured in fast clocks)
494	 * since those are the strictest limits we have.  The fast
495	 * clock and actual rate limits are more relaxed, so checking
496	 * them would make no difference.
497	 */
498	.dot = { .min = 25000 * 5, .max = 540000 * 5},
499	.vco = { .min = 4800000, .max = 6480000 },
500	.n = { .min = 1, .max = 1 },
501	.m1 = { .min = 2, .max = 2 },
502	.m2 = { .min = 24 << 22, .max = 175 << 22 },
503	.p1 = { .min = 2, .max = 4 },
504	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
505};
506
507static const struct intel_limit intel_limits_bxt = {
508	/* FIXME: find real dot limits */
509	.dot = { .min = 0, .max = INT_MAX },
510	.vco = { .min = 4800000, .max = 6700000 },
511	.n = { .min = 1, .max = 1 },
512	.m1 = { .min = 2, .max = 2 },
513	/* FIXME: find real m2 limits */
514	.m2 = { .min = 2 << 22, .max = 255 << 22 },
515	.p1 = { .min = 2, .max = 4 },
516	.p2 = { .p2_slow = 1, .p2_fast = 20 },
517};
518
519/* WA Display #0827: Gen9:all */
520static void
521skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
522{
523	if (enable)
524		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
525		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
526	else
527		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
528		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
529}
530
531/* Wa_2006604312:icl,ehl */
532static void
533icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
534		       bool enable)
535{
536	if (enable)
537		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
538		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
539	else
540		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
541		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
542}
543
544static bool
545needs_modeset(const struct intel_crtc_state *state)
546{
547	return drm_atomic_crtc_needs_modeset(&state->uapi);
548}
549
550static bool
551is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
552{
553	return crtc_state->master_transcoder != INVALID_TRANSCODER;
554}
555
556static bool
557is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
558{
559	return crtc_state->sync_mode_slaves_mask != 0;
560}
561
562bool
563is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
564{
565	return is_trans_port_sync_master(crtc_state) ||
566		is_trans_port_sync_slave(crtc_state);
567}
568
569/*
570 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
571 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
572 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
573 * The helpers' return value is the rate of the clock that is fed to the
574 * display engine's pipe which can be the above fast dot clock rate or a
575 * divided-down version of it.
576 */
577/* m1 is reserved as 0 in Pineview, n is a ring counter */
578static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
579{
580	clock->m = clock->m2 + 2;
581	clock->p = clock->p1 * clock->p2;
582	if (WARN_ON(clock->n == 0 || clock->p == 0))
583		return 0;
584	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
585	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
586
587	return clock->dot;
588}
589
590static u32 i9xx_dpll_compute_m(struct dpll *dpll)
591{
592	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
593}
594
595static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
596{
597	clock->m = i9xx_dpll_compute_m(clock);
598	clock->p = clock->p1 * clock->p2;
599	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
600		return 0;
601	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
602	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
603
604	return clock->dot;
605}
606
607static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
608{
609	clock->m = clock->m1 * clock->m2;
610	clock->p = clock->p1 * clock->p2;
611	if (WARN_ON(clock->n == 0 || clock->p == 0))
612		return 0;
613	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
614	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
615
616	return clock->dot / 5;
617}
618
619int chv_calc_dpll_params(int refclk, struct dpll *clock)
620{
621	clock->m = clock->m1 * clock->m2;
622	clock->p = clock->p1 * clock->p2;
623	if (WARN_ON(clock->n == 0 || clock->p == 0))
624		return 0;
625	clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
626					   clock->n << 22);
627	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
628
629	return clock->dot / 5;
630}
631
632/*
633 * Returns whether the given set of divisors are valid for a given refclk with
634 * the given connectors.
635 */
636static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
637			       const struct intel_limit *limit,
638			       const struct dpll *clock)
639{
640	if (clock->n < limit->n.min || limit->n.max < clock->n)
641		return false;
642	if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
643		return false;
644	if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
645		return false;
646	if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
647		return false;
648
649	if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
650	    !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
651		if (clock->m1 <= clock->m2)
652			return false;
653
654	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
655	    !IS_GEN9_LP(dev_priv)) {
656		if (clock->p < limit->p.min || limit->p.max < clock->p)
657			return false;
658		if (clock->m < limit->m.min || limit->m.max < clock->m)
659			return false;
660	}
661
662	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
663		return false;
664	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
665	 * connector, etc., rather than just a single range.
666	 */
667	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
668		return false;
669
670	return true;
671}
672
673static int
674i9xx_select_p2_div(const struct intel_limit *limit,
675		   const struct intel_crtc_state *crtc_state,
676		   int target)
677{
678	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
679
680	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
681		/*
682		 * For LVDS just rely on its current settings for dual-channel.
683		 * We haven't figured out how to reliably set up different
684		 * single/dual channel state, if we even can.
685		 */
686		if (intel_is_dual_link_lvds(dev_priv))
687			return limit->p2.p2_fast;
688		else
689			return limit->p2.p2_slow;
690	} else {
691		if (target < limit->p2.dot_limit)
692			return limit->p2.p2_slow;
693		else
694			return limit->p2.p2_fast;
695	}
696}
697
698/*
699 * Returns a set of divisors for the desired target clock with the given
700 * refclk, or FALSE.  The returned values represent the clock equation:
701 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
702 *
703 * Target and reference clocks are specified in kHz.
704 *
705 * If match_clock is provided, then best_clock P divider must match the P
706 * divider from @match_clock used for LVDS downclocking.
707 */
708static bool
709i9xx_find_best_dpll(const struct intel_limit *limit,
710		    struct intel_crtc_state *crtc_state,
711		    int target, int refclk, struct dpll *match_clock,
712		    struct dpll *best_clock)
713{
714	struct drm_device *dev = crtc_state->uapi.crtc->dev;
715	struct dpll clock;
716	int err = target;
717
718	memset(best_clock, 0, sizeof(*best_clock));
719
720	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
721
722	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
723	     clock.m1++) {
724		for (clock.m2 = limit->m2.min;
725		     clock.m2 <= limit->m2.max; clock.m2++) {
726			if (clock.m2 >= clock.m1)
727				break;
728			for (clock.n = limit->n.min;
729			     clock.n <= limit->n.max; clock.n++) {
730				for (clock.p1 = limit->p1.min;
731					clock.p1 <= limit->p1.max; clock.p1++) {
732					int this_err;
733
734					i9xx_calc_dpll_params(refclk, &clock);
735					if (!intel_pll_is_valid(to_i915(dev),
736								limit,
737								&clock))
738						continue;
739					if (match_clock &&
740					    clock.p != match_clock->p)
741						continue;
742
743					this_err = abs(clock.dot - target);
744					if (this_err < err) {
745						*best_clock = clock;
746						err = this_err;
747					}
748				}
749			}
750		}
751	}
752
753	return (err != target);
754}
755
756/*
757 * Returns a set of divisors for the desired target clock with the given
758 * refclk, or FALSE.  The returned values represent the clock equation:
759 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
760 *
761 * Target and reference clocks are specified in kHz.
762 *
763 * If match_clock is provided, then best_clock P divider must match the P
764 * divider from @match_clock used for LVDS downclocking.
765 */
766static bool
767pnv_find_best_dpll(const struct intel_limit *limit,
768		   struct intel_crtc_state *crtc_state,
769		   int target, int refclk, struct dpll *match_clock,
770		   struct dpll *best_clock)
771{
772	struct drm_device *dev = crtc_state->uapi.crtc->dev;
773	struct dpll clock;
774	int err = target;
775
776	memset(best_clock, 0, sizeof(*best_clock));
777
778	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
779
780	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
781	     clock.m1++) {
782		for (clock.m2 = limit->m2.min;
783		     clock.m2 <= limit->m2.max; clock.m2++) {
784			for (clock.n = limit->n.min;
785			     clock.n <= limit->n.max; clock.n++) {
786				for (clock.p1 = limit->p1.min;
787					clock.p1 <= limit->p1.max; clock.p1++) {
788					int this_err;
789
790					pnv_calc_dpll_params(refclk, &clock);
791					if (!intel_pll_is_valid(to_i915(dev),
792								limit,
793								&clock))
794						continue;
795					if (match_clock &&
796					    clock.p != match_clock->p)
797						continue;
798
799					this_err = abs(clock.dot - target);
800					if (this_err < err) {
801						*best_clock = clock;
802						err = this_err;
803					}
804				}
805			}
806		}
807	}
808
809	return (err != target);
810}
811
812/*
813 * Returns a set of divisors for the desired target clock with the given
814 * refclk, or FALSE.  The returned values represent the clock equation:
815 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
816 *
817 * Target and reference clocks are specified in kHz.
818 *
819 * If match_clock is provided, then best_clock P divider must match the P
820 * divider from @match_clock used for LVDS downclocking.
821 */
822static bool
823g4x_find_best_dpll(const struct intel_limit *limit,
824		   struct intel_crtc_state *crtc_state,
825		   int target, int refclk, struct dpll *match_clock,
826		   struct dpll *best_clock)
827{
828	struct drm_device *dev = crtc_state->uapi.crtc->dev;
829	struct dpll clock;
830	int max_n;
831	bool found = false;
832	/* approximately equals target * 0.00585 */
833	int err_most = (target >> 8) + (target >> 9);
834
835	memset(best_clock, 0, sizeof(*best_clock));
836
837	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
838
839	max_n = limit->n.max;
840	/* based on hardware requirement, prefer smaller n to precision */
841	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
842		/* based on hardware requirement, prefere larger m1,m2 */
843		for (clock.m1 = limit->m1.max;
844		     clock.m1 >= limit->m1.min; clock.m1--) {
845			for (clock.m2 = limit->m2.max;
846			     clock.m2 >= limit->m2.min; clock.m2--) {
847				for (clock.p1 = limit->p1.max;
848				     clock.p1 >= limit->p1.min; clock.p1--) {
849					int this_err;
850
851					i9xx_calc_dpll_params(refclk, &clock);
852					if (!intel_pll_is_valid(to_i915(dev),
853								limit,
854								&clock))
855						continue;
856
857					this_err = abs(clock.dot - target);
858					if (this_err < err_most) {
859						*best_clock = clock;
860						err_most = this_err;
861						max_n = clock.n;
862						found = true;
863					}
864				}
865			}
866		}
867	}
868	return found;
869}
870
871/*
872 * Check if the calculated PLL configuration is more optimal compared to the
873 * best configuration and error found so far. Return the calculated error.
874 */
875static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
876			       const struct dpll *calculated_clock,
877			       const struct dpll *best_clock,
878			       unsigned int best_error_ppm,
879			       unsigned int *error_ppm)
880{
881	/*
882	 * For CHV ignore the error and consider only the P value.
883	 * Prefer a bigger P value based on HW requirements.
884	 */
885	if (IS_CHERRYVIEW(to_i915(dev))) {
886		*error_ppm = 0;
887
888		return calculated_clock->p > best_clock->p;
889	}
890
891	if (drm_WARN_ON_ONCE(dev, !target_freq))
892		return false;
893
894	*error_ppm = div_u64(1000000ULL *
895				abs(target_freq - calculated_clock->dot),
896			     target_freq);
897	/*
898	 * Prefer a better P value over a better (smaller) error if the error
899	 * is small. Ensure this preference for future configurations too by
900	 * setting the error to 0.
901	 */
902	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
903		*error_ppm = 0;
904
905		return true;
906	}
907
908	return *error_ppm + 10 < best_error_ppm;
909}
910
911/*
912 * Returns a set of divisors for the desired target clock with the given
913 * refclk, or FALSE.  The returned values represent the clock equation:
914 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
915 */
916static bool
917vlv_find_best_dpll(const struct intel_limit *limit,
918		   struct intel_crtc_state *crtc_state,
919		   int target, int refclk, struct dpll *match_clock,
920		   struct dpll *best_clock)
921{
922	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
923	struct drm_device *dev = crtc->base.dev;
924	struct dpll clock;
925	unsigned int bestppm = 1000000;
926	/* min update 19.2 MHz */
927	int max_n = min(limit->n.max, refclk / 19200);
928	bool found = false;
929
930	target *= 5; /* fast clock */
931
932	memset(best_clock, 0, sizeof(*best_clock));
933
934	/* based on hardware requirement, prefer smaller n to precision */
935	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
936		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
937			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
938			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
939				clock.p = clock.p1 * clock.p2;
940				/* based on hardware requirement, prefer bigger m1,m2 values */
941				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
942					unsigned int ppm;
943
944					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
945								     refclk * clock.m1);
946
947					vlv_calc_dpll_params(refclk, &clock);
948
949					if (!intel_pll_is_valid(to_i915(dev),
950								limit,
951								&clock))
952						continue;
953
954					if (!vlv_PLL_is_optimal(dev, target,
955								&clock,
956								best_clock,
957								bestppm, &ppm))
958						continue;
959
960					*best_clock = clock;
961					bestppm = ppm;
962					found = true;
963				}
964			}
965		}
966	}
967
968	return found;
969}
970
971/*
972 * Returns a set of divisors for the desired target clock with the given
973 * refclk, or FALSE.  The returned values represent the clock equation:
974 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
975 */
976static bool
977chv_find_best_dpll(const struct intel_limit *limit,
978		   struct intel_crtc_state *crtc_state,
979		   int target, int refclk, struct dpll *match_clock,
980		   struct dpll *best_clock)
981{
982	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
983	struct drm_device *dev = crtc->base.dev;
984	unsigned int best_error_ppm;
985	struct dpll clock;
986	u64 m2;
987	int found = false;
988
989	memset(best_clock, 0, sizeof(*best_clock));
990	best_error_ppm = 1000000;
991
992	/*
993	 * Based on hardware doc, the n always set to 1, and m1 always
994	 * set to 2.  If requires to support 200Mhz refclk, we need to
995	 * revisit this because n may not 1 anymore.
996	 */
997	clock.n = 1, clock.m1 = 2;
998	target *= 5;	/* fast clock */
999
1000	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
1001		for (clock.p2 = limit->p2.p2_fast;
1002				clock.p2 >= limit->p2.p2_slow;
1003				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
1004			unsigned int error_ppm;
1005
1006			clock.p = clock.p1 * clock.p2;
1007
1008			m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
1009						   refclk * clock.m1);
1010
1011			if (m2 > INT_MAX/clock.m1)
1012				continue;
1013
1014			clock.m2 = m2;
1015
1016			chv_calc_dpll_params(refclk, &clock);
1017
1018			if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
1019				continue;
1020
1021			if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1022						best_error_ppm, &error_ppm))
1023				continue;
1024
1025			*best_clock = clock;
1026			best_error_ppm = error_ppm;
1027			found = true;
1028		}
1029	}
1030
1031	return found;
1032}
1033
1034bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
1035			struct dpll *best_clock)
1036{
1037	int refclk = 100000;
1038	const struct intel_limit *limit = &intel_limits_bxt;
1039
1040	return chv_find_best_dpll(limit, crtc_state,
1041				  crtc_state->port_clock, refclk,
1042				  NULL, best_clock);
1043}
1044
1045static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1046				    enum pipe pipe)
1047{
1048	i915_reg_t reg = PIPEDSL(pipe);
1049	u32 line1, line2;
1050	u32 line_mask;
1051
1052	if (IS_GEN(dev_priv, 2))
1053		line_mask = DSL_LINEMASK_GEN2;
1054	else
1055		line_mask = DSL_LINEMASK_GEN3;
1056
1057	line1 = intel_de_read(dev_priv, reg) & line_mask;
1058	msleep(5);
1059	line2 = intel_de_read(dev_priv, reg) & line_mask;
1060
1061	return line1 != line2;
1062}
1063
1064static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1065{
1066	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1067	enum pipe pipe = crtc->pipe;
1068
1069	/* Wait for the display line to settle/start moving */
1070	if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1071		drm_err(&dev_priv->drm,
1072			"pipe %c scanline %s wait timed out\n",
1073			pipe_name(pipe), onoff(state));
1074}
1075
1076static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1077{
1078	wait_for_pipe_scanline_moving(crtc, false);
1079}
1080
1081static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1082{
1083	wait_for_pipe_scanline_moving(crtc, true);
1084}
1085
1086static void
1087intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1088{
1089	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1090	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1091
1092	if (INTEL_GEN(dev_priv) >= 4) {
1093		enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1094		i915_reg_t reg = PIPECONF(cpu_transcoder);
1095
1096		/* Wait for the Pipe State to go off */
1097		if (intel_de_wait_for_clear(dev_priv, reg,
1098					    I965_PIPECONF_ACTIVE, 100))
1099			drm_WARN(&dev_priv->drm, 1,
1100				 "pipe_off wait timed out\n");
1101	} else {
1102		intel_wait_for_pipe_scanline_stopped(crtc);
1103	}
1104}
1105
1106/* Only for pre-ILK configs */
1107void assert_pll(struct drm_i915_private *dev_priv,
1108		enum pipe pipe, bool state)
1109{
1110	u32 val;
1111	bool cur_state;
1112
1113	val = intel_de_read(dev_priv, DPLL(pipe));
1114	cur_state = !!(val & DPLL_VCO_ENABLE);
1115	I915_STATE_WARN(cur_state != state,
1116	     "PLL state assertion failure (expected %s, current %s)\n",
1117			onoff(state), onoff(cur_state));
1118}
1119
1120/* XXX: the dsi pll is shared between MIPI DSI ports */
1121void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1122{
1123	u32 val;
1124	bool cur_state;
1125
1126	vlv_cck_get(dev_priv);
1127	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1128	vlv_cck_put(dev_priv);
1129
1130	cur_state = val & DSI_PLL_VCO_EN;
1131	I915_STATE_WARN(cur_state != state,
1132	     "DSI PLL state assertion failure (expected %s, current %s)\n",
1133			onoff(state), onoff(cur_state));
1134}
1135
1136static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1137			  enum pipe pipe, bool state)
1138{
1139	bool cur_state;
1140
1141	if (HAS_DDI(dev_priv)) {
1142		/*
1143		 * DDI does not have a specific FDI_TX register.
1144		 *
1145		 * FDI is never fed from EDP transcoder
1146		 * so pipe->transcoder cast is fine here.
1147		 */
1148		enum transcoder cpu_transcoder = (enum transcoder)pipe;
1149		u32 val = intel_de_read(dev_priv,
1150					TRANS_DDI_FUNC_CTL(cpu_transcoder));
1151		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1152	} else {
1153		u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
1154		cur_state = !!(val & FDI_TX_ENABLE);
1155	}
1156	I915_STATE_WARN(cur_state != state,
1157	     "FDI TX state assertion failure (expected %s, current %s)\n",
1158			onoff(state), onoff(cur_state));
1159}
1160#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1161#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1162
1163static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1164			  enum pipe pipe, bool state)
1165{
1166	u32 val;
1167	bool cur_state;
1168
1169	val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
1170	cur_state = !!(val & FDI_RX_ENABLE);
1171	I915_STATE_WARN(cur_state != state,
1172	     "FDI RX state assertion failure (expected %s, current %s)\n",
1173			onoff(state), onoff(cur_state));
1174}
1175#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1176#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1177
1178static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1179				      enum pipe pipe)
1180{
1181	u32 val;
1182
1183	/* ILK FDI PLL is always enabled */
1184	if (IS_GEN(dev_priv, 5))
1185		return;
1186
1187	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
1188	if (HAS_DDI(dev_priv))
1189		return;
1190
1191	val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
1192	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1193}
1194
1195void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1196		       enum pipe pipe, bool state)
1197{
1198	u32 val;
1199	bool cur_state;
1200
1201	val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
1202	cur_state = !!(val & FDI_RX_PLL_ENABLE);
1203	I915_STATE_WARN(cur_state != state,
1204	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
1205			onoff(state), onoff(cur_state));
1206}
1207
1208void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1209{
1210	i915_reg_t pp_reg;
1211	u32 val;
1212	enum pipe panel_pipe = INVALID_PIPE;
1213	bool locked = true;
1214
1215	if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
1216		return;
1217
1218	if (HAS_PCH_SPLIT(dev_priv)) {
1219		u32 port_sel;
1220
1221		pp_reg = PP_CONTROL(0);
1222		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1223
1224		switch (port_sel) {
1225		case PANEL_PORT_SELECT_LVDS:
1226			intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1227			break;
1228		case PANEL_PORT_SELECT_DPA:
1229			intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1230			break;
1231		case PANEL_PORT_SELECT_DPC:
1232			intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1233			break;
1234		case PANEL_PORT_SELECT_DPD:
1235			intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1236			break;
1237		default:
1238			MISSING_CASE(port_sel);
1239			break;
1240		}
1241	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1242		/* presumably write lock depends on pipe, not port select */
1243		pp_reg = PP_CONTROL(pipe);
1244		panel_pipe = pipe;
1245	} else {
1246		u32 port_sel;
1247
1248		pp_reg = PP_CONTROL(0);
1249		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1250
1251		drm_WARN_ON(&dev_priv->drm,
1252			    port_sel != PANEL_PORT_SELECT_LVDS);
1253		intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1254	}
1255
1256	val = intel_de_read(dev_priv, pp_reg);
1257	if (!(val & PANEL_POWER_ON) ||
1258	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1259		locked = false;
1260
1261	I915_STATE_WARN(panel_pipe == pipe && locked,
1262	     "panel assertion failure, pipe %c regs locked\n",
1263	     pipe_name(pipe));
1264}
1265
1266void assert_pipe(struct drm_i915_private *dev_priv,
1267		 enum transcoder cpu_transcoder, bool state)
1268{
1269	bool cur_state;
1270	enum intel_display_power_domain power_domain;
1271	intel_wakeref_t wakeref;
1272
1273	/* we keep both pipes enabled on 830 */
1274	if (IS_I830(dev_priv))
1275		state = true;
1276
1277	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1278	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1279	if (wakeref) {
1280		u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
1281		cur_state = !!(val & PIPECONF_ENABLE);
1282
1283		intel_display_power_put(dev_priv, power_domain, wakeref);
1284	} else {
1285		cur_state = false;
1286	}
1287
1288	I915_STATE_WARN(cur_state != state,
1289			"transcoder %s assertion failure (expected %s, current %s)\n",
1290			transcoder_name(cpu_transcoder),
1291			onoff(state), onoff(cur_state));
1292}
1293
1294static void assert_plane(struct intel_plane *plane, bool state)
1295{
1296	enum pipe pipe;
1297	bool cur_state;
1298
1299	cur_state = plane->get_hw_state(plane, &pipe);
1300
1301	I915_STATE_WARN(cur_state != state,
1302			"%s assertion failure (expected %s, current %s)\n",
1303			plane->base.name, onoff(state), onoff(cur_state));
1304}
1305
1306#define assert_plane_enabled(p) assert_plane(p, true)
1307#define assert_plane_disabled(p) assert_plane(p, false)
1308
1309static void assert_planes_disabled(struct intel_crtc *crtc)
1310{
1311	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1312	struct intel_plane *plane;
1313
1314	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1315		assert_plane_disabled(plane);
1316}
1317
1318static void assert_vblank_disabled(struct drm_crtc *crtc)
1319{
1320	if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1321		drm_crtc_vblank_put(crtc);
1322}
1323
1324void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1325				    enum pipe pipe)
1326{
1327	u32 val;
1328	bool enabled;
1329
1330	val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
1331	enabled = !!(val & TRANS_ENABLE);
1332	I915_STATE_WARN(enabled,
1333	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1334	     pipe_name(pipe));
1335}
1336
1337static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1338				   enum pipe pipe, enum port port,
1339				   i915_reg_t dp_reg)
1340{
1341	enum pipe port_pipe;
1342	bool state;
1343
1344	state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1345
1346	I915_STATE_WARN(state && port_pipe == pipe,
1347			"PCH DP %c enabled on transcoder %c, should be disabled\n",
1348			port_name(port), pipe_name(pipe));
1349
1350	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1351			"IBX PCH DP %c still using transcoder B\n",
1352			port_name(port));
1353}
1354
1355static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1356				     enum pipe pipe, enum port port,
1357				     i915_reg_t hdmi_reg)
1358{
1359	enum pipe port_pipe;
1360	bool state;
1361
1362	state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1363
1364	I915_STATE_WARN(state && port_pipe == pipe,
1365			"PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1366			port_name(port), pipe_name(pipe));
1367
1368	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1369			"IBX PCH HDMI %c still using transcoder B\n",
1370			port_name(port));
1371}
1372
1373static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1374				      enum pipe pipe)
1375{
1376	enum pipe port_pipe;
1377
1378	assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1379	assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1380	assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1381
1382	I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1383			port_pipe == pipe,
1384			"PCH VGA enabled on transcoder %c, should be disabled\n",
1385			pipe_name(pipe));
1386
1387	I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1388			port_pipe == pipe,
1389			"PCH LVDS enabled on transcoder %c, should be disabled\n",
1390			pipe_name(pipe));
1391
1392	/* PCH SDVOB multiplex with HDMIB */
1393	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1394	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1395	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1396}
1397
1398static void _vlv_enable_pll(struct intel_crtc *crtc,
1399			    const struct intel_crtc_state *pipe_config)
1400{
1401	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1402	enum pipe pipe = crtc->pipe;
1403
1404	intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1405	intel_de_posting_read(dev_priv, DPLL(pipe));
1406	udelay(150);
1407
1408	if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1409		drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
1410}
1411
1412static void vlv_enable_pll(struct intel_crtc *crtc,
1413			   const struct intel_crtc_state *pipe_config)
1414{
1415	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1416	enum pipe pipe = crtc->pipe;
1417
1418	assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
1419
1420	/* PLL is protected by panel, make sure we can write it */
1421	assert_panel_unlocked(dev_priv, pipe);
1422
1423	if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1424		_vlv_enable_pll(crtc, pipe_config);
1425
1426	intel_de_write(dev_priv, DPLL_MD(pipe),
1427		       pipe_config->dpll_hw_state.dpll_md);
1428	intel_de_posting_read(dev_priv, DPLL_MD(pipe));
1429}
1430
1431
1432static void _chv_enable_pll(struct intel_crtc *crtc,
1433			    const struct intel_crtc_state *pipe_config)
1434{
1435	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1436	enum pipe pipe = crtc->pipe;
1437	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1438	u32 tmp;
1439
1440	vlv_dpio_get(dev_priv);
1441
1442	/* Enable back the 10bit clock to display controller */
1443	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1444	tmp |= DPIO_DCLKP_EN;
1445	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1446
1447	vlv_dpio_put(dev_priv);
1448
1449	/*
1450	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1451	 */
1452	udelay(1);
1453
1454	/* Enable PLL */
1455	intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1456
1457	/* Check PLL is locked */
1458	if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1459		drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
1460}
1461
1462static void chv_enable_pll(struct intel_crtc *crtc,
1463			   const struct intel_crtc_state *pipe_config)
1464{
1465	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1466	enum pipe pipe = crtc->pipe;
1467
1468	assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
1469
1470	/* PLL is protected by panel, make sure we can write it */
1471	assert_panel_unlocked(dev_priv, pipe);
1472
1473	if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1474		_chv_enable_pll(crtc, pipe_config);
1475
1476	if (pipe != PIPE_A) {
1477		/*
1478		 * WaPixelRepeatModeFixForC0:chv
1479		 *
1480		 * DPLLCMD is AWOL. Use chicken bits to propagate
1481		 * the value from DPLLBMD to either pipe B or C.
1482		 */
1483		intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1484		intel_de_write(dev_priv, DPLL_MD(PIPE_B),
1485			       pipe_config->dpll_hw_state.dpll_md);
1486		intel_de_write(dev_priv, CBR4_VLV, 0);
1487		dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1488
1489		/*
1490		 * DPLLB VGA mode also seems to cause problems.
1491		 * We should always have it disabled.
1492		 */
1493		drm_WARN_ON(&dev_priv->drm,
1494			    (intel_de_read(dev_priv, DPLL(PIPE_B)) &
1495			     DPLL_VGA_MODE_DIS) == 0);
1496	} else {
1497		intel_de_write(dev_priv, DPLL_MD(pipe),
1498			       pipe_config->dpll_hw_state.dpll_md);
1499		intel_de_posting_read(dev_priv, DPLL_MD(pipe));
1500	}
1501}
1502
1503static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1504{
1505	if (IS_I830(dev_priv))
1506		return false;
1507
1508	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1509}
1510
1511static void i9xx_enable_pll(struct intel_crtc *crtc,
1512			    const struct intel_crtc_state *crtc_state)
1513{
1514	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1515	i915_reg_t reg = DPLL(crtc->pipe);
1516	u32 dpll = crtc_state->dpll_hw_state.dpll;
1517	int i;
1518
1519	assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
1520
1521	/* PLL is protected by panel, make sure we can write it */
1522	if (i9xx_has_pps(dev_priv))
1523		assert_panel_unlocked(dev_priv, crtc->pipe);
1524
1525	/*
1526	 * Apparently we need to have VGA mode enabled prior to changing
1527	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1528	 * dividers, even though the register value does change.
1529	 */
1530	intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS);
1531	intel_de_write(dev_priv, reg, dpll);
1532
1533	/* Wait for the clocks to stabilize. */
1534	intel_de_posting_read(dev_priv, reg);
1535	udelay(150);
1536
1537	if (INTEL_GEN(dev_priv) >= 4) {
1538		intel_de_write(dev_priv, DPLL_MD(crtc->pipe),
1539			       crtc_state->dpll_hw_state.dpll_md);
1540	} else {
1541		/* The pixel multiplier can only be updated once the
1542		 * DPLL is enabled and the clocks are stable.
1543		 *
1544		 * So write it again.
1545		 */
1546		intel_de_write(dev_priv, reg, dpll);
1547	}
1548
1549	/* We do this three times for luck */
1550	for (i = 0; i < 3; i++) {
1551		intel_de_write(dev_priv, reg, dpll);
1552		intel_de_posting_read(dev_priv, reg);
1553		udelay(150); /* wait for warmup */
1554	}
1555}
1556
1557static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1558{
1559	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1560	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1561	enum pipe pipe = crtc->pipe;
1562
1563	/* Don't disable pipe or pipe PLLs if needed */
1564	if (IS_I830(dev_priv))
1565		return;
1566
1567	/* Make sure the pipe isn't still relying on us */
1568	assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
1569
1570	intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
1571	intel_de_posting_read(dev_priv, DPLL(pipe));
1572}
1573
1574static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1575{
1576	u32 val;
1577
1578	/* Make sure the pipe isn't still relying on us */
1579	assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
1580
1581	val = DPLL_INTEGRATED_REF_CLK_VLV |
1582		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1583	if (pipe != PIPE_A)
1584		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1585
1586	intel_de_write(dev_priv, DPLL(pipe), val);
1587	intel_de_posting_read(dev_priv, DPLL(pipe));
1588}
1589
1590static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1591{
1592	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1593	u32 val;
1594
1595	/* Make sure the pipe isn't still relying on us */
1596	assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
1597
1598	val = DPLL_SSC_REF_CLK_CHV |
1599		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1600	if (pipe != PIPE_A)
1601		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1602
1603	intel_de_write(dev_priv, DPLL(pipe), val);
1604	intel_de_posting_read(dev_priv, DPLL(pipe));
1605
1606	vlv_dpio_get(dev_priv);
1607
1608	/* Disable 10bit clock to display controller */
1609	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1610	val &= ~DPIO_DCLKP_EN;
1611	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1612
1613	vlv_dpio_put(dev_priv);
1614}
1615
1616void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1617			 struct intel_digital_port *dig_port,
1618			 unsigned int expected_mask)
1619{
1620	u32 port_mask;
1621	i915_reg_t dpll_reg;
1622
1623	switch (dig_port->base.port) {
1624	case PORT_B:
1625		port_mask = DPLL_PORTB_READY_MASK;
1626		dpll_reg = DPLL(0);
1627		break;
1628	case PORT_C:
1629		port_mask = DPLL_PORTC_READY_MASK;
1630		dpll_reg = DPLL(0);
1631		expected_mask <<= 4;
1632		break;
1633	case PORT_D:
1634		port_mask = DPLL_PORTD_READY_MASK;
1635		dpll_reg = DPIO_PHY_STATUS;
1636		break;
1637	default:
1638		BUG();
1639	}
1640
1641	if (intel_de_wait_for_register(dev_priv, dpll_reg,
1642				       port_mask, expected_mask, 1000))
1643		drm_WARN(&dev_priv->drm, 1,
1644			 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
1645			 dig_port->base.base.base.id, dig_port->base.base.name,
1646			 intel_de_read(dev_priv, dpll_reg) & port_mask,
1647			 expected_mask);
1648}
1649
1650static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1651{
1652	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1653	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1654	enum pipe pipe = crtc->pipe;
1655	i915_reg_t reg;
1656	u32 val, pipeconf_val;
1657
1658	/* Make sure PCH DPLL is enabled */
1659	assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1660
1661	/* FDI must be feeding us bits for PCH ports */
1662	assert_fdi_tx_enabled(dev_priv, pipe);
1663	assert_fdi_rx_enabled(dev_priv, pipe);
1664
1665	if (HAS_PCH_CPT(dev_priv)) {
1666		reg = TRANS_CHICKEN2(pipe);
1667		val = intel_de_read(dev_priv, reg);
1668		/*
1669		 * Workaround: Set the timing override bit
1670		 * before enabling the pch transcoder.
1671		 */
1672		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1673		/* Configure frame start delay to match the CPU */
1674		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1675		val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1676		intel_de_write(dev_priv, reg, val);
1677	}
1678
1679	reg = PCH_TRANSCONF(pipe);
1680	val = intel_de_read(dev_priv, reg);
1681	pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
1682
1683	if (HAS_PCH_IBX(dev_priv)) {
1684		/* Configure frame start delay to match the CPU */
1685		val &= ~TRANS_FRAME_START_DELAY_MASK;
1686		val |= TRANS_FRAME_START_DELAY(0);
1687
1688		/*
1689		 * Make the BPC in transcoder be consistent with
1690		 * that in pipeconf reg. For HDMI we must use 8bpc
1691		 * here for both 8bpc and 12bpc.
1692		 */
1693		val &= ~PIPECONF_BPC_MASK;
1694		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1695			val |= PIPECONF_8BPC;
1696		else
1697			val |= pipeconf_val & PIPECONF_BPC_MASK;
1698	}
1699
1700	val &= ~TRANS_INTERLACE_MASK;
1701	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1702		if (HAS_PCH_IBX(dev_priv) &&
1703		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1704			val |= TRANS_LEGACY_INTERLACED_ILK;
1705		else
1706			val |= TRANS_INTERLACED;
1707	} else {
1708		val |= TRANS_PROGRESSIVE;
1709	}
1710
1711	intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
1712	if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
1713		drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
1714			pipe_name(pipe));
1715}
1716
1717static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1718				      enum transcoder cpu_transcoder)
1719{
1720	u32 val, pipeconf_val;
1721
1722	/* FDI must be feeding us bits for PCH ports */
1723	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1724	assert_fdi_rx_enabled(dev_priv, PIPE_A);
1725
1726	val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
1727	/* Workaround: set timing override bit. */
1728	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1729	/* Configure frame start delay to match the CPU */
1730	val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1731	val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1732	intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
1733
1734	val = TRANS_ENABLE;
1735	pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
1736
1737	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1738	    PIPECONF_INTERLACED_ILK)
1739		val |= TRANS_INTERLACED;
1740	else
1741		val |= TRANS_PROGRESSIVE;
1742
1743	intel_de_write(dev_priv, LPT_TRANSCONF, val);
1744	if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
1745				  TRANS_STATE_ENABLE, 100))
1746		drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
1747}
1748
1749static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1750				       enum pipe pipe)
1751{
1752	i915_reg_t reg;
1753	u32 val;
1754
1755	/* FDI relies on the transcoder */
1756	assert_fdi_tx_disabled(dev_priv, pipe);
1757	assert_fdi_rx_disabled(dev_priv, pipe);
1758
1759	/* Ports must be off as well */
1760	assert_pch_ports_disabled(dev_priv, pipe);
1761
1762	reg = PCH_TRANSCONF(pipe);
1763	val = intel_de_read(dev_priv, reg);
1764	val &= ~TRANS_ENABLE;
1765	intel_de_write(dev_priv, reg, val);
1766	/* wait for PCH transcoder off, transcoder state */
1767	if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
1768		drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
1769			pipe_name(pipe));
1770
1771	if (HAS_PCH_CPT(dev_priv)) {
1772		/* Workaround: Clear the timing override chicken bit again. */
1773		reg = TRANS_CHICKEN2(pipe);
1774		val = intel_de_read(dev_priv, reg);
1775		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1776		intel_de_write(dev_priv, reg, val);
1777	}
1778}
1779
1780void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1781{
1782	u32 val;
1783
1784	val = intel_de_read(dev_priv, LPT_TRANSCONF);
1785	val &= ~TRANS_ENABLE;
1786	intel_de_write(dev_priv, LPT_TRANSCONF, val);
1787	/* wait for PCH transcoder off, transcoder state */
1788	if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
1789				    TRANS_STATE_ENABLE, 50))
1790		drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
1791
1792	/* Workaround: clear timing override bit. */
1793	val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
1794	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1795	intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
1796}
1797
1798enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1799{
1800	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1801
1802	if (HAS_PCH_LPT(dev_priv))
1803		return PIPE_A;
1804	else
1805		return crtc->pipe;
1806}
1807
1808static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1809{
1810	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1811
1812	/*
1813	 * On i965gm the hardware frame counter reads
1814	 * zero when the TV encoder is enabled :(
1815	 */
1816	if (IS_I965GM(dev_priv) &&
1817	    (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1818		return 0;
1819
1820	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1821		return 0xffffffff; /* full 32 bit counter */
1822	else if (INTEL_GEN(dev_priv) >= 3)
1823		return 0xffffff; /* only 24 bits of frame count */
1824	else
1825		return 0; /* Gen2 doesn't have a hardware frame counter */
1826}
1827
1828void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1829{
1830	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1831
1832	assert_vblank_disabled(&crtc->base);
1833	drm_crtc_set_max_vblank_count(&crtc->base,
1834				      intel_crtc_max_vblank_count(crtc_state));
1835	drm_crtc_vblank_on(&crtc->base);
1836}
1837
1838void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
1839{
1840	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1841
1842	drm_crtc_vblank_off(&crtc->base);
1843	assert_vblank_disabled(&crtc->base);
1844}
1845
1846void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1847{
1848	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1849	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1850	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1851	enum pipe pipe = crtc->pipe;
1852	i915_reg_t reg;
1853	u32 val;
1854
1855	drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
1856
1857	assert_planes_disabled(crtc);
1858
1859	/*
1860	 * A pipe without a PLL won't actually be able to drive bits from
1861	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1862	 * need the check.
1863	 */
1864	if (HAS_GMCH(dev_priv)) {
1865		if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1866			assert_dsi_pll_enabled(dev_priv);
1867		else
1868			assert_pll_enabled(dev_priv, pipe);
1869	} else {
1870		if (new_crtc_state->has_pch_encoder) {
1871			/* if driving the PCH, we need FDI enabled */
1872			assert_fdi_rx_pll_enabled(dev_priv,
1873						  intel_crtc_pch_transcoder(crtc));
1874			assert_fdi_tx_pll_enabled(dev_priv,
1875						  (enum pipe) cpu_transcoder);
1876		}
1877		/* FIXME: assert CPU port conditions for SNB+ */
1878	}
1879
1880	trace_intel_pipe_enable(crtc);
1881
1882	reg = PIPECONF(cpu_transcoder);
1883	val = intel_de_read(dev_priv, reg);
1884	if (val & PIPECONF_ENABLE) {
1885		/* we keep both pipes enabled on 830 */
1886		drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
1887		return;
1888	}
1889
1890	intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
1891	intel_de_posting_read(dev_priv, reg);
1892
1893	/*
1894	 * Until the pipe starts PIPEDSL reads will return a stale value,
1895	 * which causes an apparent vblank timestamp jump when PIPEDSL
1896	 * resets to its proper value. That also messes up the frame count
1897	 * when it's derived from the timestamps. So let's wait for the
1898	 * pipe to start properly before we call drm_crtc_vblank_on()
1899	 */
1900	if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1901		intel_wait_for_pipe_scanline_moving(crtc);
1902}
1903
1904void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1905{
1906	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1907	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1908	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1909	enum pipe pipe = crtc->pipe;
1910	i915_reg_t reg;
1911	u32 val;
1912
1913	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
1914
1915	/*
1916	 * Make sure planes won't keep trying to pump pixels to us,
1917	 * or we might hang the display.
1918	 */
1919	assert_planes_disabled(crtc);
1920
1921	trace_intel_pipe_disable(crtc);
1922
1923	reg = PIPECONF(cpu_transcoder);
1924	val = intel_de_read(dev_priv, reg);
1925	if ((val & PIPECONF_ENABLE) == 0)
1926		return;
1927
1928	/*
1929	 * Double wide has implications for planes
1930	 * so best keep it disabled when not needed.
1931	 */
1932	if (old_crtc_state->double_wide)
1933		val &= ~PIPECONF_DOUBLE_WIDE;
1934
1935	/* Don't disable pipe or pipe PLLs if needed */
1936	if (!IS_I830(dev_priv))
1937		val &= ~PIPECONF_ENABLE;
1938
1939	intel_de_write(dev_priv, reg, val);
1940	if ((val & PIPECONF_ENABLE) == 0)
1941		intel_wait_for_pipe_off(old_crtc_state);
1942}
1943
1944static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1945{
1946	return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1947}
1948
1949static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
1950{
1951	if (!is_ccs_modifier(fb->modifier))
1952		return false;
1953
1954	return plane >= fb->format->num_planes / 2;
1955}
1956
1957static bool is_gen12_ccs_modifier(u64 modifier)
1958{
1959	return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
1960	       modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
1961
1962}
1963
1964static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
1965{
1966	return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
1967}
1968
1969static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
1970{
1971	if (is_ccs_modifier(fb->modifier))
1972		return is_ccs_plane(fb, plane);
1973
1974	return plane == 1;
1975}
1976
1977static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
1978{
1979	drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
1980		    (main_plane && main_plane >= fb->format->num_planes / 2));
1981
1982	return fb->format->num_planes / 2 + main_plane;
1983}
1984
1985static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
1986{
1987	drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
1988		    ccs_plane < fb->format->num_planes / 2);
1989
1990	return ccs_plane - fb->format->num_planes / 2;
1991}
1992
1993/* Return either the main plane's CCS or - if not a CCS FB - UV plane */
1994int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
1995{
1996	if (is_ccs_modifier(fb->modifier))
1997		return main_to_ccs_plane(fb, main_plane);
1998
1999	return 1;
2000}
2001
2002bool
2003intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
2004				    uint64_t modifier)
2005{
2006	return info->is_yuv &&
2007	       info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
2008}
2009
2010static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb,
2011				   int color_plane)
2012{
2013	return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
2014	       color_plane == 1;
2015}
2016
2017static unsigned int
2018intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
2019{
2020	struct drm_i915_private *dev_priv = to_i915(fb->dev);
2021	unsigned int cpp = fb->format->cpp[color_plane];
2022
2023	switch (fb->modifier) {
2024	case DRM_FORMAT_MOD_LINEAR:
2025		return intel_tile_size(dev_priv);
2026	case I915_FORMAT_MOD_X_TILED:
2027		if (IS_GEN(dev_priv, 2))
2028			return 128;
2029		else
2030			return 512;
2031	case I915_FORMAT_MOD_Y_TILED_CCS:
2032		if (is_ccs_plane(fb, color_plane))
2033			return 128;
2034		fallthrough;
2035	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2036	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2037		if (is_ccs_plane(fb, color_plane))
2038			return 64;
2039		fallthrough;
2040	case I915_FORMAT_MOD_Y_TILED:
2041		if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
2042			return 128;
2043		else
2044			return 512;
2045	case I915_FORMAT_MOD_Yf_TILED_CCS:
2046		if (is_ccs_plane(fb, color_plane))
2047			return 128;
2048		fallthrough;
2049	case I915_FORMAT_MOD_Yf_TILED:
2050		switch (cpp) {
2051		case 1:
2052			return 64;
2053		case 2:
2054		case 4:
2055			return 128;
2056		case 8:
2057		case 16:
2058			return 256;
2059		default:
2060			MISSING_CASE(cpp);
2061			return cpp;
2062		}
2063		break;
2064	default:
2065		MISSING_CASE(fb->modifier);
2066		return cpp;
2067	}
2068}
2069
2070static unsigned int
2071intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
2072{
2073	if (is_gen12_ccs_plane(fb, color_plane))
2074		return 1;
2075
2076	return intel_tile_size(to_i915(fb->dev)) /
2077		intel_tile_width_bytes(fb, color_plane);
2078}
2079
2080/* Return the tile dimensions in pixel units */
2081static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
2082			    unsigned int *tile_width,
2083			    unsigned int *tile_height)
2084{
2085	unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
2086	unsigned int cpp = fb->format->cpp[color_plane];
2087
2088	*tile_width = tile_width_bytes / cpp;
2089	*tile_height = intel_tile_height(fb, color_plane);
2090}
2091
2092static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb,
2093					int color_plane)
2094{
2095	unsigned int tile_width, tile_height;
2096
2097	intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2098
2099	return fb->pitches[color_plane] * tile_height;
2100}
2101
2102unsigned int
2103intel_fb_align_height(const struct drm_framebuffer *fb,
2104		      int color_plane, unsigned int height)
2105{
2106	unsigned int tile_height = intel_tile_height(fb, color_plane);
2107
2108	return ALIGN(height, tile_height);
2109}
2110
2111unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2112{
2113	unsigned int size = 0;
2114	int i;
2115
2116	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2117		size += rot_info->plane[i].width * rot_info->plane[i].height;
2118
2119	return size;
2120}
2121
2122unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
2123{
2124	unsigned int size = 0;
2125	int i;
2126
2127	for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
2128		size += rem_info->plane[i].width * rem_info->plane[i].height;
2129
2130	return size;
2131}
2132
2133static void
2134intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2135			const struct drm_framebuffer *fb,
2136			unsigned int rotation)
2137{
2138	view->type = I915_GGTT_VIEW_NORMAL;
2139	if (drm_rotation_90_or_270(rotation)) {
2140		view->type = I915_GGTT_VIEW_ROTATED;
2141		view->rotated = to_intel_framebuffer(fb)->rot_info;
2142	}
2143}
2144
2145static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2146{
2147	if (IS_I830(dev_priv))
2148		return 16 * 1024;
2149	else if (IS_I85X(dev_priv))
2150		return 256;
2151	else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2152		return 32;
2153	else
2154		return 4 * 1024;
2155}
2156
2157static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2158{
2159	if (INTEL_GEN(dev_priv) >= 9)
2160		return 256 * 1024;
2161	else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2162		 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2163		return 128 * 1024;
2164	else if (INTEL_GEN(dev_priv) >= 4)
2165		return 4 * 1024;
2166	else
2167		return 0;
2168}
2169
2170static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2171					 int color_plane)
2172{
2173	struct drm_i915_private *dev_priv = to_i915(fb->dev);
2174
2175	/* AUX_DIST needs only 4K alignment */
2176	if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) ||
2177	    is_ccs_plane(fb, color_plane))
2178		return 4096;
2179
2180	switch (fb->modifier) {
2181	case DRM_FORMAT_MOD_LINEAR:
2182		return intel_linear_alignment(dev_priv);
2183	case I915_FORMAT_MOD_X_TILED:
2184		if (INTEL_GEN(dev_priv) >= 9)
2185			return 256 * 1024;
2186		return 0;
2187	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2188		if (is_semiplanar_uv_plane(fb, color_plane))
2189			return intel_tile_row_size(fb, color_plane);
2190		fallthrough;
2191	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2192		return 16 * 1024;
2193	case I915_FORMAT_MOD_Y_TILED_CCS:
2194	case I915_FORMAT_MOD_Yf_TILED_CCS:
2195	case I915_FORMAT_MOD_Y_TILED:
2196		if (INTEL_GEN(dev_priv) >= 12 &&
2197		    is_semiplanar_uv_plane(fb, color_plane))
2198			return intel_tile_row_size(fb, color_plane);
2199		fallthrough;
2200	case I915_FORMAT_MOD_Yf_TILED:
2201		return 1 * 1024 * 1024;
2202	default:
2203		MISSING_CASE(fb->modifier);
2204		return 0;
2205	}
2206}
2207
2208static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2209{
2210	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2211	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2212
2213	return INTEL_GEN(dev_priv) < 4 ||
2214		(plane->has_fbc &&
2215		 plane_state->view.type == I915_GGTT_VIEW_NORMAL);
2216}
2217
2218struct i915_vma *
2219intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2220			   const struct i915_ggtt_view *view,
2221			   bool uses_fence,
2222			   unsigned long *out_flags)
2223{
2224	struct drm_device *dev = fb->dev;
2225	struct drm_i915_private *dev_priv = to_i915(dev);
2226	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2227	intel_wakeref_t wakeref;
2228	struct i915_vma *vma;
2229	unsigned int pinctl;
2230	u32 alignment;
2231
2232	if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
2233		return ERR_PTR(-EINVAL);
2234
2235	alignment = intel_surf_alignment(fb, 0);
2236	if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
2237		return ERR_PTR(-EINVAL);
2238
2239	/* Note that the w/a also requires 64 PTE of padding following the
2240	 * bo. We currently fill all unused PTE with the shadow page and so
2241	 * we should always have valid PTE following the scanout preventing
2242	 * the VT-d warning.
2243	 */
2244	if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2245		alignment = 256 * 1024;
2246
2247	/*
2248	 * Global gtt pte registers are special registers which actually forward
2249	 * writes to a chunk of system memory. Which means that there is no risk
2250	 * that the register values disappear as soon as we call
2251	 * intel_runtime_pm_put(), so it is correct to wrap only the
2252	 * pin/unpin/fence and not more.
2253	 */
2254	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2255
2256	atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2257
2258	/*
2259	 * Valleyview is definitely limited to scanning out the first
2260	 * 512MiB. Lets presume this behaviour was inherited from the
2261	 * g4x display engine and that all earlier gen are similarly
2262	 * limited. Testing suggests that it is a little more
2263	 * complicated than this. For example, Cherryview appears quite
2264	 * happy to scanout from anywhere within its global aperture.
2265	 */
2266	pinctl = 0;
2267	if (HAS_GMCH(dev_priv))
2268		pinctl |= PIN_MAPPABLE;
2269
2270	vma = i915_gem_object_pin_to_display_plane(obj,
2271						   alignment, view, pinctl);
2272	if (IS_ERR(vma))
2273		goto err;
2274
2275	if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2276		int ret;
2277
2278		/*
2279		 * Install a fence for tiled scan-out. Pre-i965 always needs a
2280		 * fence, whereas 965+ only requires a fence if using
2281		 * framebuffer compression.  For simplicity, we always, when
2282		 * possible, install a fence as the cost is not that onerous.
2283		 *
2284		 * If we fail to fence the tiled scanout, then either the
2285		 * modeset will reject the change (which is highly unlikely as
2286		 * the affected systems, all but one, do not have unmappable
2287		 * space) or we will not be able to enable full powersaving
2288		 * techniques (also likely not to apply due to various limits
2289		 * FBC and the like impose on the size of the buffer, which
2290		 * presumably we violated anyway with this unmappable buffer).
2291		 * Anyway, it is presumably better to stumble onwards with
2292		 * something and try to run the system in a "less than optimal"
2293		 * mode that matches the user configuration.
2294		 */
2295		ret = i915_vma_pin_fence(vma);
2296		if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2297			i915_vma_unpin(vma);
2298			vma = ERR_PTR(ret);
2299			goto err;
2300		}
2301
2302		if (ret == 0 && vma->fence)
2303			*out_flags |= PLANE_HAS_FENCE;
2304	}
2305
2306	i915_vma_get(vma);
2307err:
2308	atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2309	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2310	return vma;
2311}
2312
2313void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2314{
2315	if (flags & PLANE_HAS_FENCE)
2316		i915_vma_unpin_fence(vma);
2317	i915_vma_unpin(vma);
2318	i915_vma_put(vma);
2319}
2320
2321static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2322			  unsigned int rotation)
2323{
2324	if (drm_rotation_90_or_270(rotation))
2325		return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2326	else
2327		return fb->pitches[color_plane];
2328}
2329
2330/*
2331 * Convert the x/y offsets into a linear offset.
2332 * Only valid with 0/180 degree rotation, which is fine since linear
2333 * offset is only used with linear buffers on pre-hsw and tiled buffers
2334 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2335 */
2336u32 intel_fb_xy_to_linear(int x, int y,
2337			  const struct intel_plane_state *state,
2338			  int color_plane)
2339{
2340	const struct drm_framebuffer *fb = state->hw.fb;
2341	unsigned int cpp = fb->format->cpp[color_plane];
2342	unsigned int pitch = state->color_plane[color_plane].stride;
2343
2344	return y * pitch + x * cpp;
2345}
2346
2347/*
2348 * Add the x/y offsets derived from fb->offsets[] to the user
2349 * specified plane src x/y offsets. The resulting x/y offsets
2350 * specify the start of scanout from the beginning of the gtt mapping.
2351 */
2352void intel_add_fb_offsets(int *x, int *y,
2353			  const struct intel_plane_state *state,
2354			  int color_plane)
2355
2356{
2357	*x += state->color_plane[color_plane].x;
2358	*y += state->color_plane[color_plane].y;
2359}
2360
2361static u32 intel_adjust_tile_offset(int *x, int *y,
2362				    unsigned int tile_width,
2363				    unsigned int tile_height,
2364				    unsigned int tile_size,
2365				    unsigned int pitch_tiles,
2366				    u32 old_offset,
2367				    u32 new_offset)
2368{
2369	unsigned int pitch_pixels = pitch_tiles * tile_width;
2370	unsigned int tiles;
2371
2372	WARN_ON(old_offset & (tile_size - 1));
2373	WARN_ON(new_offset & (tile_size - 1));
2374	WARN_ON(new_offset > old_offset);
2375
2376	tiles = (old_offset - new_offset) / tile_size;
2377
2378	*y += tiles / pitch_tiles * tile_height;
2379	*x += tiles % pitch_tiles * tile_width;
2380
2381	/* minimize x in case it got needlessly big */
2382	*y += *x / pitch_pixels * tile_height;
2383	*x %= pitch_pixels;
2384
2385	return new_offset;
2386}
2387
2388static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
2389{
2390	return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
2391	       is_gen12_ccs_plane(fb, color_plane);
2392}
2393
2394static u32 intel_adjust_aligned_offset(int *x, int *y,
2395				       const struct drm_framebuffer *fb,
2396				       int color_plane,
2397				       unsigned int rotation,
2398				       unsigned int pitch,
2399				       u32 old_offset, u32 new_offset)
2400{
2401	struct drm_i915_private *dev_priv = to_i915(fb->dev);
2402	unsigned int cpp = fb->format->cpp[color_plane];
2403
2404	drm_WARN_ON(&dev_priv->drm, new_offset > old_offset);
2405
2406	if (!is_surface_linear(fb, color_plane)) {
2407		unsigned int tile_size, tile_width, tile_height;
2408		unsigned int pitch_tiles;
2409
2410		tile_size = intel_tile_size(dev_priv);
2411		intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2412
2413		if (drm_rotation_90_or_270(rotation)) {
2414			pitch_tiles = pitch / tile_height;
2415			swap(tile_width, tile_height);
2416		} else {
2417			pitch_tiles = pitch / (tile_width * cpp);
2418		}
2419
2420		intel_adjust_tile_offset(x, y, tile_width, tile_height,
2421					 tile_size, pitch_tiles,
2422					 old_offset, new_offset);
2423	} else {
2424		old_offset += *y * pitch + *x * cpp;
2425
2426		*y = (old_offset - new_offset) / pitch;
2427		*x = ((old_offset - new_offset) - *y * pitch) / cpp;
2428	}
2429
2430	return new_offset;
2431}
2432
2433/*
2434 * Adjust the tile offset by moving the difference into
2435 * the x/y offsets.
2436 */
2437static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2438					     const struct intel_plane_state *state,
2439					     int color_plane,
2440					     u32 old_offset, u32 new_offset)
2441{
2442	return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
2443					   state->hw.rotation,
2444					   state->color_plane[color_plane].stride,
2445					   old_offset, new_offset);
2446}
2447
2448/*
2449 * Computes the aligned offset to the base tile and adjusts
2450 * x, y. bytes per pixel is assumed to be a power-of-two.
2451 *
2452 * In the 90/270 rotated case, x and y are assumed
2453 * to be already rotated to match the rotated GTT view, and
2454 * pitch is the tile_height aligned framebuffer height.
2455 *
2456 * This function is used when computing the derived information
2457 * under intel_framebuffer, so using any of that information
2458 * here is not allowed. Anything under drm_framebuffer can be
2459 * used. This is why the user has to pass in the pitch since it
2460 * is specified in the rotated orientation.
2461 */
2462static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2463					int *x, int *y,
2464					const struct drm_framebuffer *fb,
2465					int color_plane,
2466					unsigned int pitch,
2467					unsigned int rotation,
2468					u32 alignment)
2469{
2470	unsigned int cpp = fb->format->cpp[color_plane];
2471	u32 offset, offset_aligned;
2472
2473	if (!is_surface_linear(fb, color_plane)) {
2474		unsigned int tile_size, tile_width, tile_height;
2475		unsigned int tile_rows, tiles, pitch_tiles;
2476
2477		tile_size = intel_tile_size(dev_priv);
2478		intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2479
2480		if (drm_rotation_90_or_270(rotation)) {
2481			pitch_tiles = pitch / tile_height;
2482			swap(tile_width, tile_height);
2483		} else {
2484			pitch_tiles = pitch / (tile_width * cpp);
2485		}
2486
2487		tile_rows = *y / tile_height;
2488		*y %= tile_height;
2489
2490		tiles = *x / tile_width;
2491		*x %= tile_width;
2492
2493		offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2494
2495		offset_aligned = offset;
2496		if (alignment)
2497			offset_aligned = rounddown(offset_aligned, alignment);
2498
2499		intel_adjust_tile_offset(x, y, tile_width, tile_height,
2500					 tile_size, pitch_tiles,
2501					 offset, offset_aligned);
2502	} else {
2503		offset = *y * pitch + *x * cpp;
2504		offset_aligned = offset;
2505		if (alignment) {
2506			offset_aligned = rounddown(offset_aligned, alignment);
2507			*y = (offset % alignment) / pitch;
2508			*x = ((offset % alignment) - *y * pitch) / cpp;
2509		} else {
2510			*y = *x = 0;
2511		}
2512	}
2513
2514	return offset_aligned;
2515}
2516
2517static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2518					      const struct intel_plane_state *state,
2519					      int color_plane)
2520{
2521	struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
2522	struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2523	const struct drm_framebuffer *fb = state->hw.fb;
2524	unsigned int rotation = state->hw.rotation;
2525	int pitch = state->color_plane[color_plane].stride;
2526	u32 alignment;
2527
2528	if (intel_plane->id == PLANE_CURSOR)
2529		alignment = intel_cursor_alignment(dev_priv);
2530	else
2531		alignment = intel_surf_alignment(fb, color_plane);
2532
2533	return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2534					    pitch, rotation, alignment);
2535}
2536
2537/* Convert the fb->offset[] into x/y offsets */
2538static int intel_fb_offset_to_xy(int *x, int *y,
2539				 const struct drm_framebuffer *fb,
2540				 int color_plane)
2541{
2542	struct drm_i915_private *dev_priv = to_i915(fb->dev);
2543	unsigned int height;
2544	u32 alignment;
2545
2546	if (INTEL_GEN(dev_priv) >= 12 &&
2547	    is_semiplanar_uv_plane(fb, color_plane))
2548		alignment = intel_tile_row_size(fb, color_plane);
2549	else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
2550		alignment = intel_tile_size(dev_priv);
2551	else
2552		alignment = 0;
2553
2554	if (alignment != 0 && fb->offsets[color_plane] % alignment) {
2555		drm_dbg_kms(&dev_priv->drm,
2556			    "Misaligned offset 0x%08x for color plane %d\n",
2557			    fb->offsets[color_plane], color_plane);
2558		return -EINVAL;
2559	}
2560
2561	height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2562	height = ALIGN(height, intel_tile_height(fb, color_plane));
2563
2564	/* Catch potential overflows early */
2565	if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2566			    fb->offsets[color_plane])) {
2567		drm_dbg_kms(&dev_priv->drm,
2568			    "Bad offset 0x%08x or pitch %d for color plane %d\n",
2569			    fb->offsets[color_plane], fb->pitches[color_plane],
2570			    color_plane);
2571		return -ERANGE;
2572	}
2573
2574	*x = 0;
2575	*y = 0;
2576
2577	intel_adjust_aligned_offset(x, y,
2578				    fb, color_plane, DRM_MODE_ROTATE_0,
2579				    fb->pitches[color_plane],
2580				    fb->offsets[color_plane], 0);
2581
2582	return 0;
2583}
2584
2585static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2586{
2587	switch (fb_modifier) {
2588	case I915_FORMAT_MOD_X_TILED:
2589		return I915_TILING_X;
2590	case I915_FORMAT_MOD_Y_TILED:
2591	case I915_FORMAT_MOD_Y_TILED_CCS:
2592	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2593	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2594		return I915_TILING_Y;
2595	default:
2596		return I915_TILING_NONE;
2597	}
2598}
2599
2600/*
2601 * From the Sky Lake PRM:
2602 * "The Color Control Surface (CCS) contains the compression status of
2603 *  the cache-line pairs. The compression state of the cache-line pair
2604 *  is specified by 2 bits in the CCS. Each CCS cache-line represents
2605 *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2606 *  cache-line-pairs. CCS is always Y tiled."
2607 *
2608 * Since cache line pairs refers to horizontally adjacent cache lines,
2609 * each cache line in the CCS corresponds to an area of 32x16 cache
2610 * lines on the main surface. Since each pixel is 4 bytes, this gives
2611 * us a ratio of one byte in the CCS for each 8x16 pixels in the
2612 * main surface.
2613 */
2614static const struct drm_format_info skl_ccs_formats[] = {
2615	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2616	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2617	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2618	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2619	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2620	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2621	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2622	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2623};
2624
2625/*
2626 * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
2627 * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
2628 * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
2629 * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
2630 * the main surface.
2631 */
2632static const struct drm_format_info gen12_ccs_formats[] = {
2633	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2634	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2635	  .hsub = 1, .vsub = 1, },
2636	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2637	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2638	  .hsub = 1, .vsub = 1, },
2639	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2640	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2641	  .hsub = 1, .vsub = 1, .has_alpha = true },
2642	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2643	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2644	  .hsub = 1, .vsub = 1, .has_alpha = true },
2645	{ .format = DRM_FORMAT_YUYV, .num_planes = 2,
2646	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2647	  .hsub = 2, .vsub = 1, .is_yuv = true },
2648	{ .format = DRM_FORMAT_YVYU, .num_planes = 2,
2649	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2650	  .hsub = 2, .vsub = 1, .is_yuv = true },
2651	{ .format = DRM_FORMAT_UYVY, .num_planes = 2,
2652	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2653	  .hsub = 2, .vsub = 1, .is_yuv = true },
2654	{ .format = DRM_FORMAT_VYUY, .num_planes = 2,
2655	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2656	  .hsub = 2, .vsub = 1, .is_yuv = true },
2657	{ .format = DRM_FORMAT_NV12, .num_planes = 4,
2658	  .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
2659	  .hsub = 2, .vsub = 2, .is_yuv = true },
2660	{ .format = DRM_FORMAT_P010, .num_planes = 4,
2661	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
2662	  .hsub = 2, .vsub = 2, .is_yuv = true },
2663	{ .format = DRM_FORMAT_P012, .num_planes = 4,
2664	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
2665	  .hsub = 2, .vsub = 2, .is_yuv = true },
2666	{ .format = DRM_FORMAT_P016, .num_planes = 4,
2667	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
2668	  .hsub = 2, .vsub = 2, .is_yuv = true },
2669};
2670
2671static const struct drm_format_info *
2672lookup_format_info(const struct drm_format_info formats[],
2673		   int num_formats, u32 format)
2674{
2675	int i;
2676
2677	for (i = 0; i < num_formats; i++) {
2678		if (formats[i].format == format)
2679			return &formats[i];
2680	}
2681
2682	return NULL;
2683}
2684
2685static const struct drm_format_info *
2686intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2687{
2688	switch (cmd->modifier[0]) {
2689	case I915_FORMAT_MOD_Y_TILED_CCS:
2690	case I915_FORMAT_MOD_Yf_TILED_CCS:
2691		return lookup_format_info(skl_ccs_formats,
2692					  ARRAY_SIZE(skl_ccs_formats),
2693					  cmd->pixel_format);
2694	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2695	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2696		return lookup_format_info(gen12_ccs_formats,
2697					  ARRAY_SIZE(gen12_ccs_formats),
2698					  cmd->pixel_format);
2699	default:
2700		return NULL;
2701	}
2702}
2703
2704bool is_ccs_modifier(u64 modifier)
2705{
2706	return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
2707	       modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
2708	       modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2709	       modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2710}
2711
2712static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
2713{
2714	return DIV_ROUND_UP(fb->pitches[ccs_to_main_plane(fb, ccs_plane)],
2715			    512) * 64;
2716}
2717
2718u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2719			      u32 pixel_format, u64 modifier)
2720{
2721	struct intel_crtc *crtc;
2722	struct intel_plane *plane;
2723
2724	/*
2725	 * We assume the primary plane for pipe A has
2726	 * the highest stride limits of them all,
2727	 * if in case pipe A is disabled, use the first pipe from pipe_mask.
2728	 */
2729	crtc = intel_get_first_crtc(dev_priv);
2730	if (!crtc)
2731		return 0;
2732
2733	plane = to_intel_plane(crtc->base.primary);
2734
2735	return plane->max_stride(plane, pixel_format, modifier,
2736				 DRM_MODE_ROTATE_0);
2737}
2738
2739static
2740u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2741			u32 pixel_format, u64 modifier)
2742{
2743	/*
2744	 * Arbitrary limit for gen4+ chosen to match the
2745	 * render engine max stride.
2746	 *
2747	 * The new CCS hash mode makes remapping impossible
2748	 */
2749	if (!is_ccs_modifier(modifier)) {
2750		if (INTEL_GEN(dev_priv) >= 7)
2751			return 256*1024;
2752		else if (INTEL_GEN(dev_priv) >= 4)
2753			return 128*1024;
2754	}
2755
2756	return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2757}
2758
2759static u32
2760intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2761{
2762	struct drm_i915_private *dev_priv = to_i915(fb->dev);
2763	u32 tile_width;
2764
2765	if (is_surface_linear(fb, color_plane)) {
2766		u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2767							   fb->format->format,
2768							   fb->modifier);
2769
2770		/*
2771		 * To make remapping with linear generally feasible
2772		 * we need the stride to be page aligned.
2773		 */
2774		if (fb->pitches[color_plane] > max_stride &&
2775		    !is_ccs_modifier(fb->modifier))
2776			return intel_tile_size(dev_priv);
2777		else
2778			return 64;
2779	}
2780
2781	tile_width = intel_tile_width_bytes(fb, color_plane);
2782	if (is_ccs_modifier(fb->modifier)) {
2783		/*
2784		 * Display WA #0531: skl,bxt,kbl,glk
2785		 *
2786		 * Render decompression and plane width > 3840
2787		 * combined with horizontal panning requires the
2788		 * plane stride to be a multiple of 4. We'll just
2789		 * require the entire fb to accommodate that to avoid
2790		 * potential runtime errors at plane configuration time.
2791		 */
2792		if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840)
2793			tile_width *= 4;
2794		/*
2795		 * The main surface pitch must be padded to a multiple of four
2796		 * tile widths.
2797		 */
2798		else if (INTEL_GEN(dev_priv) >= 12)
2799			tile_width *= 4;
2800	}
2801	return tile_width;
2802}
2803
2804bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2805{
2806	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2807	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2808	const struct drm_framebuffer *fb = plane_state->hw.fb;
2809	int i;
2810
2811	/* We don't want to deal with remapping with cursors */
2812	if (plane->id == PLANE_CURSOR)
2813		return false;
2814
2815	/*
2816	 * The display engine limits already match/exceed the
2817	 * render engine limits, so not much point in remapping.
2818	 * Would also need to deal with the fence POT alignment
2819	 * and gen2 2KiB GTT tile size.
2820	 */
2821	if (INTEL_GEN(dev_priv) < 4)
2822		return false;
2823
2824	/*
2825	 * The new CCS hash mode isn't compatible with remapping as
2826	 * the virtual address of the pages affects the compressed data.
2827	 */
2828	if (is_ccs_modifier(fb->modifier))
2829		return false;
2830
2831	/* Linear needs a page aligned stride for remapping */
2832	if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2833		unsigned int alignment = intel_tile_size(dev_priv) - 1;
2834
2835		for (i = 0; i < fb->format->num_planes; i++) {
2836			if (fb->pitches[i] & alignment)
2837				return false;
2838		}
2839	}
2840
2841	return true;
2842}
2843
2844static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2845{
2846	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2847	const struct drm_framebuffer *fb = plane_state->hw.fb;
2848	unsigned int rotation = plane_state->hw.rotation;
2849	u32 stride, max_stride;
2850
2851	/*
2852	 * No remapping for invisible planes since we don't have
2853	 * an actual source viewport to remap.
2854	 */
2855	if (!plane_state->uapi.visible)
2856		return false;
2857
2858	if (!intel_plane_can_remap(plane_state))
2859		return false;
2860
2861	/*
2862	 * FIXME: aux plane limits on gen9+ are
2863	 * unclear in Bspec, for now no checking.
2864	 */
2865	stride = intel_fb_pitch(fb, 0, rotation);
2866	max_stride = plane->max_stride(plane, fb->format->format,
2867				       fb->modifier, rotation);
2868
2869	return stride > max_stride;
2870}
2871
2872static void
2873intel_fb_plane_get_subsampling(int *hsub, int *vsub,
2874			       const struct drm_framebuffer *fb,
2875			       int color_plane)
2876{
2877	int main_plane;
2878
2879	if (color_plane == 0) {
2880		*hsub = 1;
2881		*vsub = 1;
2882
2883		return;
2884	}
2885
2886	/*
2887	 * TODO: Deduct the subsampling from the char block for all CCS
2888	 * formats and planes.
2889	 */
2890	if (!is_gen12_ccs_plane(fb, color_plane)) {
2891		*hsub = fb->format->hsub;
2892		*vsub = fb->format->vsub;
2893
2894		return;
2895	}
2896
2897	main_plane = ccs_to_main_plane(fb, color_plane);
2898	*hsub = drm_format_info_block_width(fb->format, color_plane) /
2899		drm_format_info_block_width(fb->format, main_plane);
2900
2901	/*
2902	 * The min stride check in the core framebuffer_check() function
2903	 * assumes that format->hsub applies to every plane except for the
2904	 * first plane. That's incorrect for the CCS AUX plane of the first
2905	 * plane, but for the above check to pass we must define the block
2906	 * width with that subsampling applied to it. Adjust the width here
2907	 * accordingly, so we can calculate the actual subsampling factor.
2908	 */
2909	if (main_plane == 0)
2910		*hsub *= fb->format->hsub;
2911
2912	*vsub = 32;
2913}
2914static int
2915intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
2916{
2917	struct drm_i915_private *i915 = to_i915(fb->dev);
2918	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2919	int main_plane;
2920	int hsub, vsub;
2921	int tile_width, tile_height;
2922	int ccs_x, ccs_y;
2923	int main_x, main_y;
2924
2925	if (!is_ccs_plane(fb, ccs_plane))
2926		return 0;
2927
2928	intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
2929	intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
2930
2931	tile_width *= hsub;
2932	tile_height *= vsub;
2933
2934	ccs_x = (x * hsub) % tile_width;
2935	ccs_y = (y * vsub) % tile_height;
2936
2937	main_plane = ccs_to_main_plane(fb, ccs_plane);
2938	main_x = intel_fb->normal[main_plane].x % tile_width;
2939	main_y = intel_fb->normal[main_plane].y % tile_height;
2940
2941	/*
2942	 * CCS doesn't have its own x/y offset register, so the intra CCS tile
2943	 * x/y offsets must match between CCS and the main surface.
2944	 */
2945	if (main_x != ccs_x || main_y != ccs_y) {
2946		drm_dbg_kms(&i915->drm,
2947			      "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2948			      main_x, main_y,
2949			      ccs_x, ccs_y,
2950			      intel_fb->normal[main_plane].x,
2951			      intel_fb->normal[main_plane].y,
2952			      x, y);
2953		return -EINVAL;
2954	}
2955
2956	return 0;
2957}
2958
2959static void
2960intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
2961{
2962	int main_plane = is_ccs_plane(fb, color_plane) ?
2963			 ccs_to_main_plane(fb, color_plane) : 0;
2964	int main_hsub, main_vsub;
2965	int hsub, vsub;
2966
2967	intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane);
2968	intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane);
2969	*w = fb->width / main_hsub / hsub;
2970	*h = fb->height / main_vsub / vsub;
2971}
2972
2973/*
2974 * Setup the rotated view for an FB plane and return the size the GTT mapping
2975 * requires for this view.
2976 */
2977static u32
2978setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info,
2979		  u32 gtt_offset_rotated, int x, int y,
2980		  unsigned int width, unsigned int height,
2981		  unsigned int tile_size,
2982		  unsigned int tile_width, unsigned int tile_height,
2983		  struct drm_framebuffer *fb)
2984{
2985	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2986	struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2987	unsigned int pitch_tiles;
2988	struct drm_rect r;
2989
2990	/* Y or Yf modifiers required for 90/270 rotation */
2991	if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
2992	    fb->modifier != I915_FORMAT_MOD_Yf_TILED)
2993		return 0;
2994
2995	if (drm_WARN_ON(fb->dev, plane >= ARRAY_SIZE(rot_info->plane)))
2996		return 0;
2997
2998	rot_info->plane[plane] = *plane_info;
2999
3000	intel_fb->rotated[plane].pitch = plane_info->height * tile_height;
3001
3002	/* rotate the x/y offsets to match the GTT view */
3003	drm_rect_init(&r, x, y, width, height);
3004	drm_rect_rotate(&r,
3005			plane_info->width * tile_width,
3006			plane_info->height * tile_height,
3007			DRM_MODE_ROTATE_270);
3008	x = r.x1;
3009	y = r.y1;
3010
3011	/* rotate the tile dimensions to match the GTT view */
3012	pitch_tiles = intel_fb->rotated[plane].pitch / tile_height;
3013	swap(tile_width, tile_height);
3014
3015	/*
3016	 * We only keep the x/y offsets, so push all of the
3017	 * gtt offset into the x/y offsets.
3018	 */
3019	intel_adjust_tile_offset(&x, &y,
3020				 tile_width, tile_height,
3021				 tile_size, pitch_tiles,
3022				 gtt_offset_rotated * tile_size, 0);
3023
3024	/*
3025	 * First pixel of the framebuffer from
3026	 * the start of the rotated gtt mapping.
3027	 */
3028	intel_fb->rotated[plane].x = x;
3029	intel_fb->rotated[plane].y = y;
3030
3031	return plane_info->width * plane_info->height;
3032}
3033
3034static int
3035intel_fill_fb_info(struct drm_i915_private *dev_priv,
3036		   struct drm_framebuffer *fb)
3037{
3038	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
3039	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
3040	u32 gtt_offset_rotated = 0;
3041	unsigned int max_size = 0;
3042	int i, num_planes = fb->format->num_planes;
3043	unsigned int tile_size = intel_tile_size(dev_priv);
3044
3045	for (i = 0; i < num_planes; i++) {
3046		unsigned int width, height;
3047		unsigned int cpp, size;
3048		u32 offset;
3049		int x, y;
3050		int ret;
3051
3052		cpp = fb->format->cpp[i];
3053		intel_fb_plane_dims(&width, &height, fb, i);
3054
3055		ret = intel_fb_offset_to_xy(&x, &y, fb, i);
3056		if (ret) {
3057			drm_dbg_kms(&dev_priv->drm,
3058				    "bad fb plane %d offset: 0x%x\n",
3059				    i, fb->offsets[i]);
3060			return ret;
3061		}
3062
3063		ret = intel_fb_check_ccs_xy(fb, i, x, y);
3064		if (ret)
3065			return ret;
3066
3067		/*
3068		 * The fence (if used) is aligned to the start of the object
3069		 * so having the framebuffer wrap around across the edge of the
3070		 * fenced region doesn't really work. We have no API to configure
3071		 * the fence start offset within the object (nor could we probably
3072		 * on gen2/3). So it's just easier if we just require that the
3073		 * fb layout agrees with the fence layout. We already check that the
3074		 * fb stride matches the fence stride elsewhere.
3075		 */
3076		if (i == 0 && i915_gem_object_is_tiled(obj) &&
3077		    (x + width) * cpp > fb->pitches[i]) {
3078			drm_dbg_kms(&dev_priv->drm,
3079				    "bad fb plane %d offset: 0x%x\n",
3080				     i, fb->offsets[i]);
3081			return -EINVAL;
3082		}
3083
3084		/*
3085		 * First pixel of the framebuffer from
3086		 * the start of the normal gtt mapping.
3087		 */
3088		intel_fb->normal[i].x = x;
3089		intel_fb->normal[i].y = y;
3090
3091		offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
3092						      fb->pitches[i],
3093						      DRM_MODE_ROTATE_0,
3094						      tile_size);
3095		offset /= tile_size;
3096
3097		if (!is_surface_linear(fb, i)) {
3098			struct intel_remapped_plane_info plane_info;
3099			unsigned int tile_width, tile_height;
3100
3101			intel_tile_dims(fb, i, &tile_width, &tile_height);
3102
3103			plane_info.offset = offset;
3104			plane_info.stride = DIV_ROUND_UP(fb->pitches[i],
3105							 tile_width * cpp);
3106			plane_info.width = DIV_ROUND_UP(x + width, tile_width);
3107			plane_info.height = DIV_ROUND_UP(y + height,
3108							 tile_height);
3109
3110			/* how many tiles does this plane need */
3111			size = plane_info.stride * plane_info.height;
3112			/*
3113			 * If the plane isn't horizontally tile aligned,
3114			 * we need one more tile.
3115			 */
3116			if (x != 0)
3117				size++;
3118
3119			gtt_offset_rotated +=
3120				setup_fb_rotation(i, &plane_info,
3121						  gtt_offset_rotated,
3122						  x, y, width, height,
3123						  tile_size,
3124						  tile_width, tile_height,
3125						  fb);
3126		} else {
3127			size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
3128					    x * cpp, tile_size);
3129		}
3130
3131		/* how many tiles in total needed in the bo */
3132		max_size = max(max_size, offset + size);
3133	}
3134
3135	if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
3136		drm_dbg_kms(&dev_priv->drm,
3137			    "fb too big for bo (need %llu bytes, have %zu bytes)\n",
3138			    mul_u32_u32(max_size, tile_size), obj->base.size);
3139		return -EINVAL;
3140	}
3141
3142	return 0;
3143}
3144
3145static void
3146intel_plane_remap_gtt(struct intel_plane_state *plane_state)
3147{
3148	struct drm_i915_private *dev_priv =
3149		to_i915(plane_state->uapi.plane->dev);
3150	struct drm_framebuffer *fb = plane_state->hw.fb;
3151	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
3152	struct intel_rotation_info *info = &plane_state->view.rotated;
3153	unsigned int rotation = plane_state->hw.rotation;
3154	int i, num_planes = fb->format->num_planes;
3155	unsigned int tile_size = intel_tile_size(dev_priv);
3156	unsigned int src_x, src_y;
3157	unsigned int src_w, src_h;
3158	u32 gtt_offset = 0;
3159
3160	memset(&plane_state->view, 0, sizeof(plane_state->view));
3161	plane_state->view.type = drm_rotation_90_or_270(rotation) ?
3162		I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
3163
3164	src_x = plane_state->uapi.src.x1 >> 16;
3165	src_y = plane_state->uapi.src.y1 >> 16;
3166	src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
3167	src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
3168
3169	drm_WARN_ON(&dev_priv->drm, is_ccs_modifier(fb->modifier));
3170
3171	/* Make src coordinates relative to the viewport */
3172	drm_rect_translate(&plane_state->uapi.src,
3173			   -(src_x << 16), -(src_y << 16));
3174
3175	/* Rotate src coordinates to match rotated GTT view */
3176	if (drm_rotation_90_or_270(rotation))
3177		drm_rect_rotate(&plane_state->uapi.src,
3178				src_w << 16, src_h << 16,
3179				DRM_MODE_ROTATE_270);
3180
3181	for (i = 0; i < num_planes; i++) {
3182		unsigned int hsub = i ? fb->format->hsub : 1;
3183		unsigned int vsub = i ? fb->format->vsub : 1;
3184		unsigned int cpp = fb->format->cpp[i];
3185		unsigned int tile_width, tile_height;
3186		unsigned int width, height;
3187		unsigned int pitch_tiles;
3188		unsigned int x, y;
3189		u32 offset;
3190
3191		intel_tile_dims(fb, i, &tile_width, &tile_height);
3192
3193		x = src_x / hsub;
3194		y = src_y / vsub;
3195		width = src_w / hsub;
3196		height = src_h / vsub;
3197
3198		/*
3199		 * First pixel of the src viewport from the
3200		 * start of the normal gtt mapping.
3201		 */
3202		x += intel_fb->normal[i].x;
3203		y += intel_fb->normal[i].y;
3204
3205		offset = intel_compute_aligned_offset(dev_priv, &x, &y,
3206						      fb, i, fb->pitches[i],
3207						      DRM_MODE_ROTATE_0, tile_size);
3208		offset /= tile_size;
3209
3210		drm_WARN_ON(&dev_priv->drm, i >= ARRAY_SIZE(info->plane));
3211		info->plane[i].offset = offset;
3212		info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
3213						     tile_width * cpp);
3214		info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
3215		info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
3216
3217		if (drm_rotation_90_or_270(rotation)) {
3218			struct drm_rect r;
3219
3220			/* rotate the x/y offsets to match the GTT view */
3221			drm_rect_init(&r, x, y, width, height);
3222			drm_rect_rotate(&r,
3223					info->plane[i].width * tile_width,
3224					info->plane[i].height * tile_height,
3225					DRM_MODE_ROTATE_270);
3226			x = r.x1;
3227			y = r.y1;
3228
3229			pitch_tiles = info->plane[i].height;
3230			plane_state->color_plane[i].stride = pitch_tiles * tile_height;
3231
3232			/* rotate the tile dimensions to match the GTT view */
3233			swap(tile_width, tile_height);
3234		} else {
3235			pitch_tiles = info->plane[i].width;
3236			plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
3237		}
3238
3239		/*
3240		 * We only keep the x/y offsets, so push all of the
3241		 * gtt offset into the x/y offsets.
3242		 */
3243		intel_adjust_tile_offset(&x, &y,
3244					 tile_width, tile_height,
3245					 tile_size, pitch_tiles,
3246					 gtt_offset * tile_size, 0);
3247
3248		gtt_offset += info->plane[i].width * info->plane[i].height;
3249
3250		plane_state->color_plane[i].offset = 0;
3251		plane_state->color_plane[i].x = x;
3252		plane_state->color_plane[i].y = y;
3253	}
3254}
3255
3256static int
3257intel_plane_compute_gtt(struct intel_plane_state *plane_state)
3258{
3259	const struct intel_framebuffer *fb =
3260		to_intel_framebuffer(plane_state->hw.fb);
3261	unsigned int rotation = plane_state->hw.rotation;
3262	int i, num_planes;
3263
3264	if (!fb)
3265		return 0;
3266
3267	num_planes = fb->base.format->num_planes;
3268
3269	if (intel_plane_needs_remap(plane_state)) {
3270		intel_plane_remap_gtt(plane_state);
3271
3272		/*
3273		 * Sometimes even remapping can't overcome
3274		 * the stride limitations :( Can happen with
3275		 * big plane sizes and suitably misaligned
3276		 * offsets.
3277		 */
3278		return intel_plane_check_stride(plane_state);
3279	}
3280
3281	intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
3282
3283	for (i = 0; i < num_planes; i++) {
3284		plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
3285		plane_state->color_plane[i].offset = 0;
3286
3287		if (drm_rotation_90_or_270(rotation)) {
3288			plane_state->color_plane[i].x = fb->rotated[i].x;
3289			plane_state->color_plane[i].y = fb->rotated[i].y;
3290		} else {
3291			plane_state->color_plane[i].x = fb->normal[i].x;
3292			plane_state->color_plane[i].y = fb->normal[i].y;
3293		}
3294	}
3295
3296	/* Rotate src coordinates to match rotated GTT view */
3297	if (drm_rotation_90_or_270(rotation))
3298		drm_rect_rotate(&plane_state->uapi.src,
3299				fb->base.width << 16, fb->base.height << 16,
3300				DRM_MODE_ROTATE_270);
3301
3302	return intel_plane_check_stride(plane_state);
3303}
3304
3305static int i9xx_format_to_fourcc(int format)
3306{
3307	switch (format) {
3308	case DISPPLANE_8BPP:
3309		return DRM_FORMAT_C8;
3310	case DISPPLANE_BGRA555:
3311		return DRM_FORMAT_ARGB1555;
3312	case DISPPLANE_BGRX555:
3313		return DRM_FORMAT_XRGB1555;
3314	case DISPPLANE_BGRX565:
3315		return DRM_FORMAT_RGB565;
3316	default:
3317	case DISPPLANE_BGRX888:
3318		return DRM_FORMAT_XRGB8888;
3319	case DISPPLANE_RGBX888:
3320		return DRM_FORMAT_XBGR8888;
3321	case DISPPLANE_BGRA888:
3322		return DRM_FORMAT_ARGB8888;
3323	case DISPPLANE_RGBA888:
3324		return DRM_FORMAT_ABGR8888;
3325	case DISPPLANE_BGRX101010:
3326		return DRM_FORMAT_XRGB2101010;
3327	case DISPPLANE_RGBX101010:
3328		return DRM_FORMAT_XBGR2101010;
3329	case DISPPLANE_BGRA101010:
3330		return DRM_FORMAT_ARGB2101010;
3331	case DISPPLANE_RGBA101010:
3332		return DRM_FORMAT_ABGR2101010;
3333	case DISPPLANE_RGBX161616:
3334		return DRM_FORMAT_XBGR16161616F;
3335	}
3336}
3337
3338int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
3339{
3340	switch (format) {
3341	case PLANE_CTL_FORMAT_RGB_565:
3342		return DRM_FORMAT_RGB565;
3343	case PLANE_CTL_FORMAT_NV12:
3344		return DRM_FORMAT_NV12;
3345	case PLANE_CTL_FORMAT_XYUV:
3346		return DRM_FORMAT_XYUV8888;
3347	case PLANE_CTL_FORMAT_P010:
3348		return DRM_FORMAT_P010;
3349	case PLANE_CTL_FORMAT_P012:
3350		return DRM_FORMAT_P012;
3351	case PLANE_CTL_FORMAT_P016:
3352		return DRM_FORMAT_P016;
3353	case PLANE_CTL_FORMAT_Y210:
3354		return DRM_FORMAT_Y210;
3355	case PLANE_CTL_FORMAT_Y212:
3356		return DRM_FORMAT_Y212;
3357	case PLANE_CTL_FORMAT_Y216:
3358		return DRM_FORMAT_Y216;
3359	case PLANE_CTL_FORMAT_Y410:
3360		return DRM_FORMAT_XVYU2101010;
3361	case PLANE_CTL_FORMAT_Y412:
3362		return DRM_FORMAT_XVYU12_16161616;
3363	case PLANE_CTL_FORMAT_Y416:
3364		return DRM_FORMAT_XVYU16161616;
3365	default:
3366	case PLANE_CTL_FORMAT_XRGB_8888:
3367		if (rgb_order) {
3368			if (alpha)
3369				return DRM_FORMAT_ABGR8888;
3370			else
3371				return DRM_FORMAT_XBGR8888;
3372		} else {
3373			if (alpha)
3374				return DRM_FORMAT_ARGB8888;
3375			else
3376				return DRM_FORMAT_XRGB8888;
3377		}
3378	case PLANE_CTL_FORMAT_XRGB_2101010:
3379		if (rgb_order) {
3380			if (alpha)
3381				return DRM_FORMAT_ABGR2101010;
3382			else
3383				return DRM_FORMAT_XBGR2101010;
3384		} else {
3385			if (alpha)
3386				return DRM_FORMAT_ARGB2101010;
3387			else
3388				return DRM_FORMAT_XRGB2101010;
3389		}
3390	case PLANE_CTL_FORMAT_XRGB_16161616F:
3391		if (rgb_order) {
3392			if (alpha)
3393				return DRM_FORMAT_ABGR16161616F;
3394			else
3395				return DRM_FORMAT_XBGR16161616F;
3396		} else {
3397			if (alpha)
3398				return DRM_FORMAT_ARGB16161616F;
3399			else
3400				return DRM_FORMAT_XRGB16161616F;
3401		}
3402	}
3403}
3404
3405static struct i915_vma *
3406initial_plane_vma(struct drm_i915_private *i915,
3407		  struct intel_initial_plane_config *plane_config)
3408{
3409	struct drm_i915_gem_object *obj;
3410	struct i915_vma *vma;
3411	u32 base, size;
3412
3413	if (plane_config->size == 0)
3414		return NULL;
3415
3416	base = round_down(plane_config->base,
3417			  I915_GTT_MIN_ALIGNMENT);
3418	size = round_up(plane_config->base + plane_config->size,
3419			I915_GTT_MIN_ALIGNMENT);
3420	size -= base;
3421
3422	/*
3423	 * If the FB is too big, just don't use it since fbdev is not very
3424	 * important and we should probably use that space with FBC or other
3425	 * features.
3426	 */
3427	if (size * 2 > i915->stolen_usable_size)
3428		return NULL;
3429
3430	obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
3431	if (IS_ERR(obj))
3432		return NULL;
3433
3434	/*
3435	 * Mark it WT ahead of time to avoid changing the
3436	 * cache_level during fbdev initialization. The
3437	 * unbind there would get stuck waiting for rcu.
3438	 */
3439	i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
3440					    I915_CACHE_WT : I915_CACHE_NONE);
3441
3442	switch (plane_config->tiling) {
3443	case I915_TILING_NONE:
3444		break;
3445	case I915_TILING_X:
3446	case I915_TILING_Y:
3447		obj->tiling_and_stride =
3448			plane_config->fb->base.pitches[0] |
3449			plane_config->tiling;
3450		break;
3451	default:
3452		MISSING_CASE(plane_config->tiling);
3453		goto err_obj;
3454	}
3455
3456	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
3457	if (IS_ERR(vma))
3458		goto err_obj;
3459
3460	if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
3461		goto err_obj;
3462
3463	if (i915_gem_object_is_tiled(obj) &&
3464	    !i915_vma_is_map_and_fenceable(vma))
3465		goto err_obj;
3466
3467	return vma;
3468
3469err_obj:
3470	i915_gem_object_put(obj);
3471	return NULL;
3472}
3473
3474static bool
3475intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3476			      struct intel_initial_plane_config *plane_config)
3477{
3478	struct drm_device *dev = crtc->base.dev;
3479	struct drm_i915_private *dev_priv = to_i915(dev);
3480	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
3481	struct drm_framebuffer *fb = &plane_config->fb->base;
3482	struct i915_vma *vma;
3483
3484	switch (fb->modifier) {
3485	case DRM_FORMAT_MOD_LINEAR:
3486	case I915_FORMAT_MOD_X_TILED:
3487	case I915_FORMAT_MOD_Y_TILED:
3488		break;
3489	default:
3490		drm_dbg(&dev_priv->drm,
3491			"Unsupported modifier for initial FB: 0x%llx\n",
3492			fb->modifier);
3493		return false;
3494	}
3495
3496	vma = initial_plane_vma(dev_priv, plane_config);
3497	if (!vma)
3498		return false;
3499
3500	mode_cmd.pixel_format = fb->format->format;
3501	mode_cmd.width = fb->width;
3502	mode_cmd.height = fb->height;
3503	mode_cmd.pitches[0] = fb->pitches[0];
3504	mode_cmd.modifier[0] = fb->modifier;
3505	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
3506
3507	if (intel_framebuffer_init(to_intel_framebuffer(fb),
3508				   vma->obj, &mode_cmd)) {
3509		drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
3510		goto err_vma;
3511	}
3512
3513	plane_config->vma = vma;
3514	return true;
3515
3516err_vma:
3517	i915_vma_put(vma);
3518	return false;
3519}
3520
3521static void
3522intel_set_plane_visible(struct intel_crtc_state *crtc_state,
3523			struct intel_plane_state *plane_state,
3524			bool visible)
3525{
3526	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3527
3528	plane_state->uapi.visible = visible;
3529
3530	if (visible)
3531		crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
3532	else
3533		crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
3534}
3535
3536static void fixup_active_planes(struct intel_crtc_state *crtc_state)
3537{
3538	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3539	struct drm_plane *plane;
3540
3541	/*
3542	 * Active_planes aliases if multiple "primary" or cursor planes
3543	 * have been used on the same (or wrong) pipe. plane_mask uses
3544	 * unique ids, hence we can use that to reconstruct active_planes.
3545	 */
3546	crtc_state->active_planes = 0;
3547
3548	drm_for_each_plane_mask(plane, &dev_priv->drm,
3549				crtc_state->uapi.plane_mask)
3550		crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
3551}
3552
3553static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
3554					 struct intel_plane *plane)
3555{
3556	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3557	struct intel_crtc_state *crtc_state =
3558		to_intel_crtc_state(crtc->base.state);
3559	struct intel_plane_state *plane_state =
3560		to_intel_plane_state(plane->base.state);
3561
3562	drm_dbg_kms(&dev_priv->drm,
3563		    "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
3564		    plane->base.base.id, plane->base.name,
3565		    crtc->base.base.id, crtc->base.name);
3566
3567	intel_set_plane_visible(crtc_state, plane_state, false);
3568	fixup_active_planes(crtc_state);
3569	crtc_state->data_rate[plane->id] = 0;
3570	crtc_state->min_cdclk[plane->id] = 0;
3571
3572	if (plane->id == PLANE_PRIMARY)
3573		hsw_disable_ips(crtc_state);
3574
3575	/*
3576	 * Vblank time updates from the shadow to live plane control register
3577	 * are blocked if the memory self-refresh mode is active at that
3578	 * moment. So to make sure the plane gets truly disabled, disable
3579	 * first the self-refresh mode. The self-refresh enable bit in turn
3580	 * will be checked/applied by the HW only at the next frame start
3581	 * event which is after the vblank start event, so we need to have a
3582	 * wait-for-vblank between disabling the plane and the pipe.
3583	 */
3584	if (HAS_GMCH(dev_priv) &&
3585	    intel_set_memory_cxsr(dev_priv, false))
3586		intel_wait_for_vblank(dev_priv, crtc->pipe);
3587
3588	/*
3589	 * Gen2 reports pipe underruns whenever all planes are disabled.
3590	 * So disable underrun reporting before all the planes get disabled.
3591	 */
3592	if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes)
3593		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
3594
3595	intel_disable_plane(plane, crtc_state);
3596}
3597
3598static struct intel_frontbuffer *
3599to_intel_frontbuffer(struct drm_framebuffer *fb)
3600{
3601	return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
3602}
3603
3604static void
3605intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
3606			     struct intel_initial_plane_config *plane_config)
3607{
3608	struct drm_device *dev = intel_crtc->base.dev;
3609	struct drm_i915_private *dev_priv = to_i915(dev);
3610	struct drm_crtc *c;
3611	struct drm_plane *primary = intel_crtc->base.primary;
3612	struct drm_plane_state *plane_state = primary->state;
3613	struct intel_plane *intel_plane = to_intel_plane(primary);
3614	struct intel_plane_state *intel_state =
3615		to_intel_plane_state(plane_state);
3616	struct drm_framebuffer *fb;
3617	struct i915_vma *vma;
3618
3619	if (!plane_config->fb)
3620		return;
3621
3622	if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
3623		fb = &plane_config->fb->base;
3624		vma = plane_config->vma;
3625		goto valid_fb;
3626	}
3627
3628	/*
3629	 * Failed to alloc the obj, check to see if we should share
3630	 * an fb with another CRTC instead
3631	 */
3632	for_each_crtc(dev, c) {
3633		struct intel_plane_state *state;
3634
3635		if (c == &intel_crtc->base)
3636			continue;
3637
3638		if (!to_intel_crtc(c)->active)
3639			continue;
3640
3641		state = to_intel_plane_state(c->primary->state);
3642		if (!state->vma)
3643			continue;
3644
3645		if (intel_plane_ggtt_offset(state) == plane_config->base) {
3646			fb = state->hw.fb;
3647			vma = state->vma;
3648			goto valid_fb;
3649		}
3650	}
3651
3652	/*
3653	 * We've failed to reconstruct the BIOS FB.  Current display state
3654	 * indicates that the primary plane is visible, but has a NULL FB,
3655	 * which will lead to problems later if we don't fix it up.  The
3656	 * simplest solution is to just disable the primary plane now and
3657	 * pretend the BIOS never had it enabled.
3658	 */
3659	intel_plane_disable_noatomic(intel_crtc, intel_plane);
3660
3661	return;
3662
3663valid_fb:
3664	intel_state->hw.rotation = plane_config->rotation;
3665	intel_fill_fb_ggtt_view(&intel_state->view, fb,
3666				intel_state->hw.rotation);
3667	intel_state->color_plane[0].stride =
3668		intel_fb_pitch(fb, 0, intel_state->hw.rotation);
3669
3670	__i915_vma_pin(vma);
3671	intel_state->vma = i915_vma_get(vma);
3672	if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0)
3673		if (vma->fence)
3674			intel_state->flags |= PLANE_HAS_FENCE;
3675
3676	plane_state->src_x = 0;
3677	plane_state->src_y = 0;
3678	plane_state->src_w = fb->width << 16;
3679	plane_state->src_h = fb->height << 16;
3680
3681	plane_state->crtc_x = 0;
3682	plane_state->crtc_y = 0;
3683	plane_state->crtc_w = fb->width;
3684	plane_state->crtc_h = fb->height;
3685
3686	intel_state->uapi.src = drm_plane_state_src(plane_state);
3687	intel_state->uapi.dst = drm_plane_state_dest(plane_state);
3688
3689	if (plane_config->tiling)
3690		dev_priv->preserve_bios_swizzle = true;
3691
3692	plane_state->fb = fb;
3693	drm_framebuffer_get(fb);
3694
3695	plane_state->crtc = &intel_crtc->base;
3696	intel_plane_copy_uapi_to_hw_state(intel_state, intel_state);
3697
3698	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
3699
3700	atomic_or(to_intel_plane(primary)->frontbuffer_bit,
3701		  &to_intel_frontbuffer(fb)->bits);
3702}
3703
3704static int skl_max_plane_width(const struct drm_framebuffer *fb,
3705			       int color_plane,
3706			       unsigned int rotation)
3707{
3708	int cpp = fb->format->cpp[color_plane];
3709
3710	switch (fb->modifier) {
3711	case DRM_FORMAT_MOD_LINEAR:
3712	case I915_FORMAT_MOD_X_TILED:
3713		/*
3714		 * Validated limit is 4k, but has 5k should
3715		 * work apart from the following features:
3716		 * - Ytile (already limited to 4k)
3717		 * - FP16 (already limited to 4k)
3718		 * - render compression (already limited to 4k)
3719		 * - KVMR sprite and cursor (don't care)
3720		 * - horizontal panning (TODO verify this)
3721		 * - pipe and plane scaling (TODO verify this)
3722		 */
3723		if (cpp == 8)
3724			return 4096;
3725		else
3726			return 5120;
3727	case I915_FORMAT_MOD_Y_TILED_CCS:
3728	case I915_FORMAT_MOD_Yf_TILED_CCS:
3729	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
3730		/* FIXME AUX plane? */
3731	case I915_FORMAT_MOD_Y_TILED:
3732	case I915_FORMAT_MOD_Yf_TILED:
3733		if (cpp == 8)
3734			return 2048;
3735		else
3736			return 4096;
3737	default:
3738		MISSING_CASE(fb->modifier);
3739		return 2048;
3740	}
3741}
3742
3743static int glk_max_plane_width(const struct drm_framebuffer *fb,
3744			       int color_plane,
3745			       unsigned int rotation)
3746{
3747	int cpp = fb->format->cpp[color_plane];
3748
3749	switch (fb->modifier) {
3750	case DRM_FORMAT_MOD_LINEAR:
3751	case I915_FORMAT_MOD_X_TILED:
3752		if (cpp == 8)
3753			return 4096;
3754		else
3755			return 5120;
3756	case I915_FORMAT_MOD_Y_TILED_CCS:
3757	case I915_FORMAT_MOD_Yf_TILED_CCS:
3758		/* FIXME AUX plane? */
3759	case I915_FORMAT_MOD_Y_TILED:
3760	case I915_FORMAT_MOD_Yf_TILED:
3761		if (cpp == 8)
3762			return 2048;
3763		else
3764			return 5120;
3765	default:
3766		MISSING_CASE(fb->modifier);
3767		return 2048;
3768	}
3769}
3770
3771static int icl_min_plane_width(const struct drm_framebuffer *fb)
3772{
3773	/* Wa_14011264657, Wa_14011050563: gen11+ */
3774	switch (fb->format->format) {
3775	case DRM_FORMAT_C8:
3776		return 18;
3777	case DRM_FORMAT_RGB565:
3778		return 10;
3779	case DRM_FORMAT_XRGB8888:
3780	case DRM_FORMAT_XBGR8888:
3781	case DRM_FORMAT_ARGB8888:
3782	case DRM_FORMAT_ABGR8888:
3783	case DRM_FORMAT_XRGB2101010:
3784	case DRM_FORMAT_XBGR2101010:
3785	case DRM_FORMAT_ARGB2101010:
3786	case DRM_FORMAT_ABGR2101010:
3787	case DRM_FORMAT_XVYU2101010:
3788	case DRM_FORMAT_Y212:
3789	case DRM_FORMAT_Y216:
3790		return 6;
3791	case DRM_FORMAT_NV12:
3792		return 20;
3793	case DRM_FORMAT_P010:
3794	case DRM_FORMAT_P012:
3795	case DRM_FORMAT_P016:
3796		return 12;
3797	case DRM_FORMAT_XRGB16161616F:
3798	case DRM_FORMAT_XBGR16161616F:
3799	case DRM_FORMAT_ARGB16161616F:
3800	case DRM_FORMAT_ABGR16161616F:
3801	case DRM_FORMAT_XVYU12_16161616:
3802	case DRM_FORMAT_XVYU16161616:
3803		return 4;
3804	default:
3805		return 1;
3806	}
3807}
3808
3809static int icl_max_plane_width(const struct drm_framebuffer *fb,
3810			       int color_plane,
3811			       unsigned int rotation)
3812{
3813	return 5120;
3814}
3815
3816static int skl_max_plane_height(void)
3817{
3818	return 4096;
3819}
3820
3821static int icl_max_plane_height(void)
3822{
3823	return 4320;
3824}
3825
3826static bool
3827skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3828			       int main_x, int main_y, u32 main_offset,
3829			       int ccs_plane)
3830{
3831	const struct drm_framebuffer *fb = plane_state->hw.fb;
3832	int aux_x = plane_state->color_plane[ccs_plane].x;
3833	int aux_y = plane_state->color_plane[ccs_plane].y;
3834	u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
3835	u32 alignment = intel_surf_alignment(fb, ccs_plane);
3836	int hsub;
3837	int vsub;
3838
3839	intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
3840	while (aux_offset >= main_offset && aux_y <= main_y) {
3841		int x, y;
3842
3843		if (aux_x == main_x && aux_y == main_y)
3844			break;
3845
3846		if (aux_offset == 0)
3847			break;
3848
3849		x = aux_x / hsub;
3850		y = aux_y / vsub;
3851		aux_offset = intel_plane_adjust_aligned_offset(&x, &y,
3852							       plane_state,
3853							       ccs_plane,
3854							       aux_offset,
3855							       aux_offset -
3856								alignment);
3857		aux_x = x * hsub + aux_x % hsub;
3858		aux_y = y * vsub + aux_y % vsub;
3859	}
3860
3861	if (aux_x != main_x || aux_y != main_y)
3862		return false;
3863
3864	plane_state->color_plane[ccs_plane].offset = aux_offset;
3865	plane_state->color_plane[ccs_plane].x = aux_x;
3866	plane_state->color_plane[ccs_plane].y = aux_y;
3867
3868	return true;
3869}
3870
3871unsigned int
3872intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
3873{
3874	int x = 0, y = 0;
3875
3876	intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3877					  plane_state->color_plane[0].offset, 0);
3878
3879	return y;
3880}
3881
3882static int skl_check_main_surface(struct intel_plane_state *plane_state)
3883{
3884	struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
3885	const struct drm_framebuffer *fb = plane_state->hw.fb;
3886	unsigned int rotation = plane_state->hw.rotation;
3887	int x = plane_state->uapi.src.x1 >> 16;
3888	int y = plane_state->uapi.src.y1 >> 16;
3889	int w = drm_rect_width(&plane_state->uapi.src) >> 16;
3890	int h = drm_rect_height(&plane_state->uapi.src) >> 16;
3891	int max_width, min_width, max_height;
3892	u32 alignment, offset;
3893	int aux_plane = intel_main_to_aux_plane(fb, 0);
3894	u32 aux_offset = plane_state->color_plane[aux_plane].offset;
3895
3896	if (INTEL_GEN(dev_priv) >= 11) {
3897		max_width = icl_max_plane_width(fb, 0, rotation);
3898		min_width = icl_min_plane_width(fb);
3899	} else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
3900		max_width = glk_max_plane_width(fb, 0, rotation);
3901		min_width = 1;
3902	} else {
3903		max_width = skl_max_plane_width(fb, 0, rotation);
3904		min_width = 1;
3905	}
3906
3907	if (INTEL_GEN(dev_priv) >= 11)
3908		max_height = icl_max_plane_height();
3909	else
3910		max_height = skl_max_plane_height();
3911
3912	if (w > max_width || w < min_width || h > max_height) {
3913		drm_dbg_kms(&dev_priv->drm,
3914			    "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
3915			    w, h, min_width, max_width, max_height);
3916		return -EINVAL;
3917	}
3918
3919	intel_add_fb_offsets(&x, &y, plane_state, 0);
3920	offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3921	alignment = intel_surf_alignment(fb, 0);
3922	if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment)))
3923		return -EINVAL;
3924
3925	/*
3926	 * AUX surface offset is specified as the distance from the
3927	 * main surface offset, and it must be non-negative. Make
3928	 * sure that is what we will get.
3929	 */
3930	if (offset > aux_offset)
3931		offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3932							   offset, aux_offset & ~(alignment - 1));
3933
3934	/*
3935	 * When using an X-tiled surface, the plane blows up
3936	 * if the x offset + width exceed the stride.
3937	 *
3938	 * TODO: linear and Y-tiled seem fine, Yf untested,
3939	 */
3940	if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3941		int cpp = fb->format->cpp[0];
3942
3943		while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3944			if (offset == 0) {
3945				drm_dbg_kms(&dev_priv->drm,
3946					    "Unable to find suitable display surface offset due to X-tiling\n");
3947				return -EINVAL;
3948			}
3949
3950			offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3951								   offset, offset - alignment);
3952		}
3953	}
3954
3955	/*
3956	 * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3957	 * they match with the main surface x/y offsets.
3958	 */
3959	if (is_ccs_modifier(fb->modifier)) {
3960		while (!skl_check_main_ccs_coordinates(plane_state, x, y,
3961						       offset, aux_plane)) {
3962			if (offset == 0)
3963				break;
3964
3965			offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3966								   offset, offset - alignment);
3967		}
3968
3969		if (x != plane_state->color_plane[aux_plane].x ||
3970		    y != plane_state->color_plane[aux_plane].y) {
3971			drm_dbg_kms(&dev_priv->drm,
3972				    "Unable to find suitable display surface offset due to CCS\n");
3973			return -EINVAL;
3974		}
3975	}
3976
3977	plane_state->color_plane[0].offset = offset;
3978	plane_state->color_plane[0].x = x;
3979	plane_state->color_plane[0].y = y;
3980
3981	/*
3982	 * Put the final coordinates back so that the src
3983	 * coordinate checks will see the right values.
3984	 */
3985	drm_rect_translate_to(&plane_state->uapi.src,
3986			      x << 16, y << 16);
3987
3988	return 0;
3989}
3990
3991static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3992{
3993	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
3994	const struct drm_framebuffer *fb = plane_state->hw.fb;
3995	unsigned int rotation = plane_state->hw.rotation;
3996	int uv_plane = 1;
3997	int max_width = skl_max_plane_width(fb, uv_plane, rotation);
3998	int max_height = 4096;
3999	int x = plane_state->uapi.src.x1 >> 17;
4000	int y = plane_state->uapi.src.y1 >> 17;
4001	int w = drm_rect_width(&plane_state->uapi.src) >> 17;
4002	int h = drm_rect_height(&plane_state->uapi.src) >> 17;
4003	u32 offset;
4004
4005	intel_add_fb_offsets(&x, &y, plane_state, uv_plane);
4006	offset = intel_plane_compute_aligned_offset(&x, &y,
4007						    plane_state, uv_plane);
4008
4009	/* FIXME not quite sure how/if these apply to the chroma plane */
4010	if (w > max_width || h > max_height) {
4011		drm_dbg_kms(&i915->drm,
4012			    "CbCr source size %dx%d too big (limit %dx%d)\n",
4013			    w, h, max_width, max_height);
4014		return -EINVAL;
4015	}
4016
4017	if (is_ccs_modifier(fb->modifier)) {
4018		int ccs_plane = main_to_ccs_plane(fb, uv_plane);
4019		int aux_offset = plane_state->color_plane[ccs_plane].offset;
4020		int alignment = intel_surf_alignment(fb, uv_plane);
4021
4022		if (offset > aux_offset)
4023			offset = intel_plane_adjust_aligned_offset(&x, &y,
4024								   plane_state,
4025								   uv_plane,
4026								   offset,
4027								   aux_offset & ~(alignment - 1));
4028
4029		while (!skl_check_main_ccs_coordinates(plane_state, x, y,
4030						       offset, ccs_plane)) {
4031			if (offset == 0)
4032				break;
4033
4034			offset = intel_plane_adjust_aligned_offset(&x, &y,
4035								   plane_state,
4036								   uv_plane,
4037								   offset, offset - alignment);
4038		}
4039
4040		if (x != plane_state->color_plane[ccs_plane].x ||
4041		    y != plane_state->color_plane[ccs_plane].y) {
4042			drm_dbg_kms(&i915->drm,
4043				    "Unable to find suitable display surface offset due to CCS\n");
4044			return -EINVAL;
4045		}
4046	}
4047
4048	plane_state->color_plane[uv_plane].offset = offset;
4049	plane_state->color_plane[uv_plane].x = x;
4050	plane_state->color_plane[uv_plane].y = y;
4051
4052	return 0;
4053}
4054
4055static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
4056{
4057	const struct drm_framebuffer *fb = plane_state->hw.fb;
4058	int src_x = plane_state->uapi.src.x1 >> 16;
4059	int src_y = plane_state->uapi.src.y1 >> 16;
4060	u32 offset;
4061	int ccs_plane;
4062
4063	for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) {
4064		int main_hsub, main_vsub;
4065		int hsub, vsub;
4066		int x, y;
4067
4068		if (!is_ccs_plane(fb, ccs_plane))
4069			continue;
4070
4071		intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
4072					       ccs_to_main_plane(fb, ccs_plane));
4073		intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
4074
4075		hsub *= main_hsub;
4076		vsub *= main_vsub;
4077		x = src_x / hsub;
4078		y = src_y / vsub;
4079
4080		intel_add_fb_offsets(&x, &y, plane_state, ccs_plane);
4081
4082		offset = intel_plane_compute_aligned_offset(&x, &y,
4083							    plane_state,
4084							    ccs_plane);
4085
4086		plane_state->color_plane[ccs_plane].offset = offset;
4087		plane_state->color_plane[ccs_plane].x = (x * hsub +
4088							 src_x % hsub) /
4089							main_hsub;
4090		plane_state->color_plane[ccs_plane].y = (y * vsub +
4091							 src_y % vsub) /
4092							main_vsub;
4093	}
4094
4095	return 0;
4096}
4097
4098int skl_check_plane_surface(struct intel_plane_state *plane_state)
4099{
4100	const struct drm_framebuffer *fb = plane_state->hw.fb;
4101	int ret, i;
4102
4103	ret = intel_plane_compute_gtt(plane_state);
4104	if (ret)
4105		return ret;
4106
4107	if (!plane_state->uapi.visible)
4108		return 0;
4109
4110	/*
4111	 * Handle the AUX surface first since the main surface setup depends on
4112	 * it.
4113	 */
4114	if (is_ccs_modifier(fb->modifier)) {
4115		ret = skl_check_ccs_aux_surface(plane_state);
4116		if (ret)
4117			return ret;
4118	}
4119
4120	if (intel_format_info_is_yuv_semiplanar(fb->format,
4121						fb->modifier)) {
4122		ret = skl_check_nv12_aux_surface(plane_state);
4123		if (ret)
4124			return ret;
4125	}
4126
4127	for (i = fb->format->num_planes; i < ARRAY_SIZE(plane_state->color_plane); i++) {
4128		plane_state->color_plane[i].offset = ~0xfff;
4129		plane_state->color_plane[i].x = 0;
4130		plane_state->color_plane[i].y = 0;
4131	}
4132
4133	ret = skl_check_main_surface(plane_state);
4134	if (ret)
4135		return ret;
4136
4137	return 0;
4138}
4139
4140static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
4141			     const struct intel_plane_state *plane_state,
4142			     unsigned int *num, unsigned int *den)
4143{
4144	const struct drm_framebuffer *fb = plane_state->hw.fb;
4145	unsigned int cpp = fb->format->cpp[0];
4146
4147	/*
4148	 * g4x bspec says 64bpp pixel rate can't exceed 80%
4149	 * of cdclk when the sprite plane is enabled on the
4150	 * same pipe. ilk/snb bspec says 64bpp pixel rate is
4151	 * never allowed to exceed 80% of cdclk. Let's just go
4152	 * with the ilk/snb limit always.
4153	 */
4154	if (cpp == 8) {
4155		*num = 10;
4156		*den = 8;
4157	} else {
4158		*num = 1;
4159		*den = 1;
4160	}
4161}
4162
4163static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
4164				const struct intel_plane_state *plane_state)
4165{
4166	unsigned int pixel_rate;
4167	unsigned int num, den;
4168
4169	/*
4170	 * Note that crtc_state->pixel_rate accounts for both
4171	 * horizontal and vertical panel fitter downscaling factors.
4172	 * Pre-HSW bspec tells us to only consider the horizontal
4173	 * downscaling factor here. We ignore that and just consider
4174	 * both for simplicity.
4175	 */
4176	pixel_rate = crtc_state->pixel_rate;
4177
4178	i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
4179
4180	/* two pixels per clock with double wide pipe */
4181	if (crtc_state->double_wide)
4182		den *= 2;
4183
4184	return DIV_ROUND_UP(pixel_rate * num, den);
4185}
4186
4187unsigned int
4188i9xx_plane_max_stride(struct intel_plane *plane,
4189		      u32 pixel_format, u64 modifier,
4190		      unsigned int rotation)
4191{
4192	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4193
4194	if (!HAS_GMCH(dev_priv)) {
4195		return 32*1024;
4196	} else if (INTEL_GEN(dev_priv) >= 4) {
4197		if (modifier == I915_FORMAT_MOD_X_TILED)
4198			return 16*1024;
4199		else
4200			return 32*1024;
4201	} else if (INTEL_GEN(dev_priv) >= 3) {
4202		if (modifier == I915_FORMAT_MOD_X_TILED)
4203			return 8*1024;
4204		else
4205			return 16*1024;
4206	} else {
4207		if (plane->i9xx_plane == PLANE_C)
4208			return 4*1024;
4209		else
4210			return 8*1024;
4211	}
4212}
4213
4214static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4215{
4216	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4217	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4218	u32 dspcntr = 0;
4219
4220	if (crtc_state->gamma_enable)
4221		dspcntr |= DISPPLANE_GAMMA_ENABLE;
4222
4223	if (crtc_state->csc_enable)
4224		dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
4225
4226	if (INTEL_GEN(dev_priv) < 5)
4227		dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
4228
4229	return dspcntr;
4230}
4231
4232static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
4233			  const struct intel_plane_state *plane_state)
4234{
4235	struct drm_i915_private *dev_priv =
4236		to_i915(plane_state->uapi.plane->dev);
4237	const struct drm_framebuffer *fb = plane_state->hw.fb;
4238	unsigned int rotation = plane_state->hw.rotation;
4239	u32 dspcntr;
4240
4241	dspcntr = DISPLAY_PLANE_ENABLE;
4242
4243	if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
4244	    IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
4245		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
4246
4247	switch (fb->format->format) {
4248	case DRM_FORMAT_C8:
4249		dspcntr |= DISPPLANE_8BPP;
4250		break;
4251	case DRM_FORMAT_XRGB1555:
4252		dspcntr |= DISPPLANE_BGRX555;
4253		break;
4254	case DRM_FORMAT_ARGB1555:
4255		dspcntr |= DISPPLANE_BGRA555;
4256		break;
4257	case DRM_FORMAT_RGB565:
4258		dspcntr |= DISPPLANE_BGRX565;
4259		break;
4260	case DRM_FORMAT_XRGB8888:
4261		dspcntr |= DISPPLANE_BGRX888;
4262		break;
4263	case DRM_FORMAT_XBGR8888:
4264		dspcntr |= DISPPLANE_RGBX888;
4265		break;
4266	case DRM_FORMAT_ARGB8888:
4267		dspcntr |= DISPPLANE_BGRA888;
4268		break;
4269	case DRM_FORMAT_ABGR8888:
4270		dspcntr |= DISPPLANE_RGBA888;
4271		break;
4272	case DRM_FORMAT_XRGB2101010:
4273		dspcntr |= DISPPLANE_BGRX101010;
4274		break;
4275	case DRM_FORMAT_XBGR2101010:
4276		dspcntr |= DISPPLANE_RGBX101010;
4277		break;
4278	case DRM_FORMAT_ARGB2101010:
4279		dspcntr |= DISPPLANE_BGRA101010;
4280		break;
4281	case DRM_FORMAT_ABGR2101010:
4282		dspcntr |= DISPPLANE_RGBA101010;
4283		break;
4284	case DRM_FORMAT_XBGR16161616F:
4285		dspcntr |= DISPPLANE_RGBX161616;
4286		break;
4287	default:
4288		MISSING_CASE(fb->format->format);
4289		return 0;
4290	}
4291
4292	if (INTEL_GEN(dev_priv) >= 4 &&
4293	    fb->modifier == I915_FORMAT_MOD_X_TILED)
4294		dspcntr |= DISPPLANE_TILED;
4295
4296	if (rotation & DRM_MODE_ROTATE_180)
4297		dspcntr |= DISPPLANE_ROTATE_180;
4298
4299	if (rotation & DRM_MODE_REFLECT_X)
4300		dspcntr |= DISPPLANE_MIRROR;
4301
4302	return dspcntr;
4303}
4304
4305int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
4306{
4307	struct drm_i915_private *dev_priv =
4308		to_i915(plane_state->uapi.plane->dev);
4309	const struct drm_framebuffer *fb = plane_state->hw.fb;
4310	int src_x, src_y, src_w;
4311	u32 offset;
4312	int ret;
4313
4314	ret = intel_plane_compute_gtt(plane_state);
4315	if (ret)
4316		return ret;
4317
4318	if (!plane_state->uapi.visible)
4319		return 0;
4320
4321	src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
4322	src_x = plane_state->uapi.src.x1 >> 16;
4323	src_y = plane_state->uapi.src.y1 >> 16;
4324
4325	/* Undocumented hardware limit on i965/g4x/vlv/chv */
4326	if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
4327		return -EINVAL;
4328
4329	intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
4330
4331	if (INTEL_GEN(dev_priv) >= 4)
4332		offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
4333							    plane_state, 0);
4334	else
4335		offset = 0;
4336
4337	/*
4338	 * Put the final coordinates back so that the src
4339	 * coordinate checks will see the right values.
4340	 */
4341	drm_rect_translate_to(&plane_state->uapi.src,
4342			      src_x << 16, src_y << 16);
4343
4344	/* HSW/BDW do this automagically in hardware */
4345	if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
4346		unsigned int rotation = plane_state->hw.rotation;
4347		int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
4348		int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
4349
4350		if (rotation & DRM_MODE_ROTATE_180) {
4351			src_x += src_w - 1;
4352			src_y += src_h - 1;
4353		} else if (rotation & DRM_MODE_REFLECT_X) {
4354			src_x += src_w - 1;
4355		}
4356	}
4357
4358	plane_state->color_plane[0].offset = offset;
4359	plane_state->color_plane[0].x = src_x;
4360	plane_state->color_plane[0].y = src_y;
4361
4362	return 0;
4363}
4364
4365static bool i9xx_plane_has_windowing(struct intel_plane *plane)
4366{
4367	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4368	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4369
4370	if (IS_CHERRYVIEW(dev_priv))
4371		return i9xx_plane == PLANE_B;
4372	else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
4373		return false;
4374	else if (IS_GEN(dev_priv, 4))
4375		return i9xx_plane == PLANE_C;
4376	else
4377		return i9xx_plane == PLANE_B ||
4378			i9xx_plane == PLANE_C;
4379}
4380
4381static int
4382i9xx_plane_check(struct intel_crtc_state *crtc_state,
4383		 struct intel_plane_state *plane_state)
4384{
4385	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4386	int ret;
4387
4388	ret = chv_plane_check_rotation(plane_state);
4389	if (ret)
4390		return ret;
4391
4392	ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
4393						  &crtc_state->uapi,
4394						  DRM_PLANE_HELPER_NO_SCALING,
4395						  DRM_PLANE_HELPER_NO_SCALING,
4396						  i9xx_plane_has_windowing(plane),
4397						  true);
4398	if (ret)
4399		return ret;
4400
4401	ret = i9xx_check_plane_surface(plane_state);
4402	if (ret)
4403		return ret;
4404
4405	if (!plane_state->uapi.visible)
4406		return 0;
4407
4408	ret = intel_plane_check_src_coordinates(plane_state);
4409	if (ret)
4410		return ret;
4411
4412	plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
4413
4414	return 0;
4415}
4416
4417static void i9xx_update_plane(struct intel_plane *plane,
4418			      const struct intel_crtc_state *crtc_state,
4419			      const struct intel_plane_state *plane_state)
4420{
4421	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4422	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4423	u32 linear_offset;
4424	int x = plane_state->color_plane[0].x;
4425	int y = plane_state->color_plane[0].y;
4426	int crtc_x = plane_state->uapi.dst.x1;
4427	int crtc_y = plane_state->uapi.dst.y1;
4428	int crtc_w = drm_rect_width(&plane_state->uapi.dst);
4429	int crtc_h = drm_rect_height(&plane_state->uapi.dst);
4430	unsigned long irqflags;
4431	u32 dspaddr_offset;
4432	u32 dspcntr;
4433
4434	dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
4435
4436	linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
4437
4438	if (INTEL_GEN(dev_priv) >= 4)
4439		dspaddr_offset = plane_state->color_plane[0].offset;
4440	else
4441		dspaddr_offset = linear_offset;
4442
4443	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4444
4445	intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane),
4446			  plane_state->color_plane[0].stride);
4447
4448	if (INTEL_GEN(dev_priv) < 4) {
4449		/*
4450		 * PLANE_A doesn't actually have a full window
4451		 * generator but let's assume we still need to
4452		 * program whatever is there.
4453		 */
4454		intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane),
4455				  (crtc_y << 16) | crtc_x);
4456		intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane),
4457				  ((crtc_h - 1) << 16) | (crtc_w - 1));
4458	} else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
4459		intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane),
4460				  (crtc_y << 16) | crtc_x);
4461		intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane),
4462				  ((crtc_h - 1) << 16) | (crtc_w - 1));
4463		intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0);
4464	}
4465
4466	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
4467		intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane),
4468				  (y << 16) | x);
4469	} else if (INTEL_GEN(dev_priv) >= 4) {
4470		intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane),
4471				  linear_offset);
4472		intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane),
4473				  (y << 16) | x);
4474	}
4475
4476	/*
4477	 * The control register self-arms if the plane was previously
4478	 * disabled. Try to make the plane enable atomic by writing
4479	 * the control register just before the surface register.
4480	 */
4481	intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
4482	if (INTEL_GEN(dev_priv) >= 4)
4483		intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
4484				  intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
4485	else
4486		intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
4487				  intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
4488
4489	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4490}
4491
4492static void i9xx_disable_plane(struct intel_plane *plane,
4493			       const struct intel_crtc_state *crtc_state)
4494{
4495	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4496	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4497	unsigned long irqflags;
4498	u32 dspcntr;
4499
4500	/*
4501	 * DSPCNTR pipe gamma enable on g4x+ and pipe csc
4502	 * enable on ilk+ affect the pipe bottom color as
4503	 * well, so we must configure them even if the plane
4504	 * is disabled.
4505	 *
4506	 * On pre-g4x there is no way to gamma correct the
4507	 * pipe bottom color but we'll keep on doing this
4508	 * anyway so that the crtc state readout works correctly.
4509	 */
4510	dspcntr = i9xx_plane_ctl_crtc(crtc_state);
4511
4512	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4513
4514	intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
4515	if (INTEL_GEN(dev_priv) >= 4)
4516		intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0);
4517	else
4518		intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0);
4519
4520	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4521}
4522
4523static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
4524				    enum pipe *pipe)
4525{
4526	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4527	enum intel_display_power_domain power_domain;
4528	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4529	intel_wakeref_t wakeref;
4530	bool ret;
4531	u32 val;
4532
4533	/*
4534	 * Not 100% correct for planes that can move between pipes,
4535	 * but that's only the case for gen2-4 which don't have any
4536	 * display power wells.
4537	 */
4538	power_domain = POWER_DOMAIN_PIPE(plane->pipe);
4539	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4540	if (!wakeref)
4541		return false;
4542
4543	val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
4544
4545	ret = val & DISPLAY_PLANE_ENABLE;
4546
4547	if (INTEL_GEN(dev_priv) >= 5)
4548		*pipe = plane->pipe;
4549	else
4550		*pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
4551			DISPPLANE_SEL_PIPE_SHIFT;
4552
4553	intel_display_power_put(dev_priv, power_domain, wakeref);
4554
4555	return ret;
4556}
4557
4558static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
4559{
4560	struct drm_device *dev = intel_crtc->base.dev;
4561	struct drm_i915_private *dev_priv = to_i915(dev);
4562	unsigned long irqflags;
4563
4564	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4565
4566	intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0);
4567	intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
4568	intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
4569
4570	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4571}
4572
4573/*
4574 * This function detaches (aka. unbinds) unused scalers in hardware
4575 */
4576static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
4577{
4578	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
4579	const struct intel_crtc_scaler_state *scaler_state =
4580		&crtc_state->scaler_state;
4581	int i;
4582
4583	/* loop through and disable scalers that aren't in use */
4584	for (i = 0; i < intel_crtc->num_scalers; i++) {
4585		if (!scaler_state->scalers[i].in_use)
4586			skl_detach_scaler(intel_crtc, i);
4587	}
4588}
4589
4590static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
4591					  int color_plane, unsigned int rotation)
4592{
4593	/*
4594	 * The stride is either expressed as a multiple of 64 bytes chunks for
4595	 * linear buffers or in number of tiles for tiled buffers.
4596	 */
4597	if (is_surface_linear(fb, color_plane))
4598		return 64;
4599	else if (drm_rotation_90_or_270(rotation))
4600		return intel_tile_height(fb, color_plane);
4601	else
4602		return intel_tile_width_bytes(fb, color_plane);
4603}
4604
4605u32 skl_plane_stride(const struct intel_plane_state *plane_state,
4606		     int color_plane)
4607{
4608	const struct drm_framebuffer *fb = plane_state->hw.fb;
4609	unsigned int rotation = plane_state->hw.rotation;
4610	u32 stride = plane_state->color_plane[color_plane].stride;
4611
4612	if (color_plane >= fb->format->num_planes)
4613		return 0;
4614
4615	return stride / skl_plane_stride_mult(fb, color_plane, rotation);
4616}
4617
4618static u32 skl_plane_ctl_format(u32 pixel_format)
4619{
4620	switch (pixel_format) {
4621	case DRM_FORMAT_C8:
4622		return PLANE_CTL_FORMAT_INDEXED;
4623	case DRM_FORMAT_RGB565:
4624		return PLANE_CTL_FORMAT_RGB_565;
4625	case DRM_FORMAT_XBGR8888:
4626	case DRM_FORMAT_ABGR8888:
4627		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
4628	case DRM_FORMAT_XRGB8888:
4629	case DRM_FORMAT_ARGB8888:
4630		return PLANE_CTL_FORMAT_XRGB_8888;
4631	case DRM_FORMAT_XBGR2101010:
4632	case DRM_FORMAT_ABGR2101010:
4633		return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
4634	case DRM_FORMAT_XRGB2101010:
4635	case DRM_FORMAT_ARGB2101010:
4636		return PLANE_CTL_FORMAT_XRGB_2101010;
4637	case DRM_FORMAT_XBGR16161616F:
4638	case DRM_FORMAT_ABGR16161616F:
4639		return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
4640	case DRM_FORMAT_XRGB16161616F:
4641	case DRM_FORMAT_ARGB16161616F:
4642		return PLANE_CTL_FORMAT_XRGB_16161616F;
4643	case DRM_FORMAT_XYUV8888:
4644		return PLANE_CTL_FORMAT_XYUV;
4645	case DRM_FORMAT_YUYV:
4646		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
4647	case DRM_FORMAT_YVYU:
4648		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
4649	case DRM_FORMAT_UYVY:
4650		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
4651	case DRM_FORMAT_VYUY:
4652		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
4653	case DRM_FORMAT_NV12:
4654		return PLANE_CTL_FORMAT_NV12;
4655	case DRM_FORMAT_P010:
4656		return PLANE_CTL_FORMAT_P010;
4657	case DRM_FORMAT_P012:
4658		return PLANE_CTL_FORMAT_P012;
4659	case DRM_FORMAT_P016:
4660		return PLANE_CTL_FORMAT_P016;
4661	case DRM_FORMAT_Y210:
4662		return PLANE_CTL_FORMAT_Y210;
4663	case DRM_FORMAT_Y212:
4664		return PLANE_CTL_FORMAT_Y212;
4665	case DRM_FORMAT_Y216:
4666		return PLANE_CTL_FORMAT_Y216;
4667	case DRM_FORMAT_XVYU2101010:
4668		return PLANE_CTL_FORMAT_Y410;
4669	case DRM_FORMAT_XVYU12_16161616:
4670		return PLANE_CTL_FORMAT_Y412;
4671	case DRM_FORMAT_XVYU16161616:
4672		return PLANE_CTL_FORMAT_Y416;
4673	default:
4674		MISSING_CASE(pixel_format);
4675	}
4676
4677	return 0;
4678}
4679
4680static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
4681{
4682	if (!plane_state->hw.fb->format->has_alpha)
4683		return PLANE_CTL_ALPHA_DISABLE;
4684
4685	switch (plane_state->hw.pixel_blend_mode) {
4686	case DRM_MODE_BLEND_PIXEL_NONE:
4687		return PLANE_CTL_ALPHA_DISABLE;
4688	case DRM_MODE_BLEND_PREMULTI:
4689		return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
4690	case DRM_MODE_BLEND_COVERAGE:
4691		return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
4692	default:
4693		MISSING_CASE(plane_state->hw.pixel_blend_mode);
4694		return PLANE_CTL_ALPHA_DISABLE;
4695	}
4696}
4697
4698static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
4699{
4700	if (!plane_state->hw.fb->format->has_alpha)
4701		return PLANE_COLOR_ALPHA_DISABLE;
4702
4703	switch (plane_state->hw.pixel_blend_mode) {
4704	case DRM_MODE_BLEND_PIXEL_NONE:
4705		return PLANE_COLOR_ALPHA_DISABLE;
4706	case DRM_MODE_BLEND_PREMULTI:
4707		return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
4708	case DRM_MODE_BLEND_COVERAGE:
4709		return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
4710	default:
4711		MISSING_CASE(plane_state->hw.pixel_blend_mode);
4712		return PLANE_COLOR_ALPHA_DISABLE;
4713	}
4714}
4715
4716static u32 skl_plane_ctl_tiling(u64 fb_modifier)
4717{
4718	switch (fb_modifier) {
4719	case DRM_FORMAT_MOD_LINEAR:
4720		break;
4721	case I915_FORMAT_MOD_X_TILED:
4722		return PLANE_CTL_TILED_X;
4723	case I915_FORMAT_MOD_Y_TILED:
4724		return PLANE_CTL_TILED_Y;
4725	case I915_FORMAT_MOD_Y_TILED_CCS:
4726		return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4727	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
4728		return PLANE_CTL_TILED_Y |
4729		       PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
4730		       PLANE_CTL_CLEAR_COLOR_DISABLE;
4731	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
4732		return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE;
4733	case I915_FORMAT_MOD_Yf_TILED:
4734		return PLANE_CTL_TILED_YF;
4735	case I915_FORMAT_MOD_Yf_TILED_CCS:
4736		return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4737	default:
4738		MISSING_CASE(fb_modifier);
4739	}
4740
4741	return 0;
4742}
4743
4744static u32 skl_plane_ctl_rotate(unsigned int rotate)
4745{
4746	switch (rotate) {
4747	case DRM_MODE_ROTATE_0:
4748		break;
4749	/*
4750	 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
4751	 * while i915 HW rotation is clockwise, thats why this swapping.
4752	 */
4753	case DRM_MODE_ROTATE_90:
4754		return PLANE_CTL_ROTATE_270;
4755	case DRM_MODE_ROTATE_180:
4756		return PLANE_CTL_ROTATE_180;
4757	case DRM_MODE_ROTATE_270:
4758		return PLANE_CTL_ROTATE_90;
4759	default:
4760		MISSING_CASE(rotate);
4761	}
4762
4763	return 0;
4764}
4765
4766static u32 cnl_plane_ctl_flip(unsigned int reflect)
4767{
4768	switch (reflect) {
4769	case 0:
4770		break;
4771	case DRM_MODE_REFLECT_X:
4772		return PLANE_CTL_FLIP_HORIZONTAL;
4773	case DRM_MODE_REFLECT_Y:
4774	default:
4775		MISSING_CASE(reflect);
4776	}
4777
4778	return 0;
4779}
4780
4781u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4782{
4783	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4784	u32 plane_ctl = 0;
4785
4786	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4787		return plane_ctl;
4788
4789	if (crtc_state->gamma_enable)
4790		plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
4791
4792	if (crtc_state->csc_enable)
4793		plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
4794
4795	return plane_ctl;
4796}
4797
4798u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
4799		  const struct intel_plane_state *plane_state)
4800{
4801	struct drm_i915_private *dev_priv =
4802		to_i915(plane_state->uapi.plane->dev);
4803	const struct drm_framebuffer *fb = plane_state->hw.fb;
4804	unsigned int rotation = plane_state->hw.rotation;
4805	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
4806	u32 plane_ctl;
4807
4808	plane_ctl = PLANE_CTL_ENABLE;
4809
4810	if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
4811		plane_ctl |= skl_plane_ctl_alpha(plane_state);
4812		plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
4813
4814		if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4815			plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
4816
4817		if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4818			plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
4819	}
4820
4821	plane_ctl |= skl_plane_ctl_format(fb->format->format);
4822	plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
4823	plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
4824
4825	if (INTEL_GEN(dev_priv) >= 10)
4826		plane_ctl |= cnl_plane_ctl_flip(rotation &
4827						DRM_MODE_REFLECT_MASK);
4828
4829	if (key->flags & I915_SET_COLORKEY_DESTINATION)
4830		plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
4831	else if (key->flags & I915_SET_COLORKEY_SOURCE)
4832		plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
4833
4834	return plane_ctl;
4835}
4836
4837u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
4838{
4839	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4840	u32 plane_color_ctl = 0;
4841
4842	if (INTEL_GEN(dev_priv) >= 11)
4843		return plane_color_ctl;
4844
4845	if (crtc_state->gamma_enable)
4846		plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
4847
4848	if (crtc_state->csc_enable)
4849		plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
4850
4851	return plane_color_ctl;
4852}
4853
4854u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
4855			const struct intel_plane_state *plane_state)
4856{
4857	struct drm_i915_private *dev_priv =
4858		to_i915(plane_state->uapi.plane->dev);
4859	const struct drm_framebuffer *fb = plane_state->hw.fb;
4860	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4861	u32 plane_color_ctl = 0;
4862
4863	plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
4864	plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
4865
4866	if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
4867		switch (plane_state->hw.color_encoding) {
4868		case DRM_COLOR_YCBCR_BT709:
4869			plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
4870			break;
4871		case DRM_COLOR_YCBCR_BT2020:
4872			plane_color_ctl |=
4873				PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020;
4874			break;
4875		default:
4876			plane_color_ctl |=
4877				PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601;
4878		}
4879		if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4880			plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
4881	} else if (fb->format->is_yuv) {
4882		plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
4883		if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4884			plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
4885	}
4886
4887	return plane_color_ctl;
4888}
4889
4890static int
4891__intel_display_resume(struct drm_device *dev,
4892		       struct drm_atomic_state *state,
4893		       struct drm_modeset_acquire_ctx *ctx)
4894{
4895	struct drm_crtc_state *crtc_state;
4896	struct drm_crtc *crtc;
4897	int i, ret;
4898
4899	intel_modeset_setup_hw_state(dev, ctx);
4900	intel_vga_redisable(to_i915(dev));
4901
4902	if (!state)
4903		return 0;
4904
4905	/*
4906	 * We've duplicated the state, pointers to the old state are invalid.
4907	 *
4908	 * Don't attempt to use the old state until we commit the duplicated state.
4909	 */
4910	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
4911		/*
4912		 * Force recalculation even if we restore
4913		 * current state. With fast modeset this may not result
4914		 * in a modeset when the state is compatible.
4915		 */
4916		crtc_state->mode_changed = true;
4917	}
4918
4919	/* ignore any reset values/BIOS leftovers in the WM registers */
4920	if (!HAS_GMCH(to_i915(dev)))
4921		to_intel_atomic_state(state)->skip_intermediate_wm = true;
4922
4923	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4924
4925	drm_WARN_ON(dev, ret == -EDEADLK);
4926	return ret;
4927}
4928
4929static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
4930{
4931	return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
4932		intel_has_gpu_reset(&dev_priv->gt));
4933}
4934
4935void intel_prepare_reset(struct drm_i915_private *dev_priv)
4936{
4937	struct drm_device *dev = &dev_priv->drm;
4938	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4939	struct drm_atomic_state *state;
4940	int ret;
4941
4942	/* reset doesn't touch the display */
4943	if (!dev_priv->params.force_reset_modeset_test &&
4944	    !gpu_reset_clobbers_display(dev_priv))
4945		return;
4946
4947	/* We have a modeset vs reset deadlock, defensively unbreak it. */
4948	set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4949	smp_mb__after_atomic();
4950	wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
4951
4952	if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
4953		drm_dbg_kms(&dev_priv->drm,
4954			    "Modeset potentially stuck, unbreaking through wedging\n");
4955		intel_gt_set_wedged(&dev_priv->gt);
4956	}
4957
4958	/*
4959	 * Need mode_config.mutex so that we don't
4960	 * trample ongoing ->detect() and whatnot.
4961	 */
4962	mutex_lock(&dev->mode_config.mutex);
4963	drm_modeset_acquire_init(ctx, 0);
4964	while (1) {
4965		ret = drm_modeset_lock_all_ctx(dev, ctx);
4966		if (ret != -EDEADLK)
4967			break;
4968
4969		drm_modeset_backoff(ctx);
4970	}
4971	/*
4972	 * Disabling the crtcs gracefully seems nicer. Also the
4973	 * g33 docs say we should at least disable all the planes.
4974	 */
4975	state = drm_atomic_helper_duplicate_state(dev, ctx);
4976	if (IS_ERR(state)) {
4977		ret = PTR_ERR(state);
4978		drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
4979			ret);
4980		return;
4981	}
4982
4983	ret = drm_atomic_helper_disable_all(dev, ctx);
4984	if (ret) {
4985		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
4986			ret);
4987		drm_atomic_state_put(state);
4988		return;
4989	}
4990
4991	dev_priv->modeset_restore_state = state;
4992	state->acquire_ctx = ctx;
4993}
4994
4995void intel_finish_reset(struct drm_i915_private *dev_priv)
4996{
4997	struct drm_device *dev = &dev_priv->drm;
4998	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4999	struct drm_atomic_state *state;
5000	int ret;
5001
5002	/* reset doesn't touch the display */
5003	if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
5004		return;
5005
5006	state = fetch_and_zero(&dev_priv->modeset_restore_state);
5007	if (!state)
5008		goto unlock;
5009
5010	/* reset doesn't touch the display */
5011	if (!gpu_reset_clobbers_display(dev_priv)) {
5012		/* for testing only restore the display */
5013		ret = __intel_display_resume(dev, state, ctx);
5014		if (ret)
5015			drm_err(&dev_priv->drm,
5016				"Restoring old state failed with %i\n", ret);
5017	} else {
5018		/*
5019		 * The display has been reset as well,
5020		 * so need a full re-initialization.
5021		 */
5022		intel_pps_unlock_regs_wa(dev_priv);
5023		intel_modeset_init_hw(dev_priv);
5024		intel_init_clock_gating(dev_priv);
5025
5026		spin_lock_irq(&dev_priv->irq_lock);
5027		if (dev_priv->display.hpd_irq_setup)
5028			dev_priv->display.hpd_irq_setup(dev_priv);
5029		spin_unlock_irq(&dev_priv->irq_lock);
5030
5031		ret = __intel_display_resume(dev, state, ctx);
5032		if (ret)
5033			drm_err(&dev_priv->drm,
5034				"Restoring old state failed with %i\n", ret);
5035
5036		intel_hpd_init(dev_priv);
5037	}
5038
5039	drm_atomic_state_put(state);
5040unlock:
5041	drm_modeset_drop_locks(ctx);
5042	drm_modeset_acquire_fini(ctx);
5043	mutex_unlock(&dev->mode_config.mutex);
5044
5045	clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
5046}
5047
5048static void icl_set_pipe_chicken(struct intel_crtc *crtc)
5049{
5050	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5051	enum pipe pipe = crtc->pipe;
5052	u32 tmp;
5053
5054	tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
5055
5056	/*
5057	 * Display WA #1153: icl
5058	 * enable hardware to bypass the alpha math
5059	 * and rounding for per-pixel values 00 and 0xff
5060	 */
5061	tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
5062	/*
5063	 * Display WA # 1605353570: icl
5064	 * Set the pixel rounding bit to 1 for allowing
5065	 * passthrough of Frame buffer pixels unmodified
5066	 * across pipe
5067	 */
5068	tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
5069	intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
5070}
5071
5072static void intel_fdi_normal_train(struct intel_crtc *crtc)
5073{
5074	struct drm_device *dev = crtc->base.dev;
5075	struct drm_i915_private *dev_priv = to_i915(dev);
5076	enum pipe pipe = crtc->pipe;
5077	i915_reg_t reg;
5078	u32 temp;
5079
5080	/* enable normal train */
5081	reg = FDI_TX_CTL(pipe);
5082	temp = intel_de_read(dev_priv, reg);
5083	if (IS_IVYBRIDGE(dev_priv)) {
5084		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
5085		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
5086	} else {
5087		temp &= ~FDI_LINK_TRAIN_NONE;
5088		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
5089	}
5090	intel_de_write(dev_priv, reg, temp);
5091
5092	reg = FDI_RX_CTL(pipe);
5093	temp = intel_de_read(dev_priv, reg);
5094	if (HAS_PCH_CPT(dev_priv)) {
5095		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5096		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
5097	} else {
5098		temp &= ~FDI_LINK_TRAIN_NONE;
5099		temp |= FDI_LINK_TRAIN_NONE;
5100	}
5101	intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
5102
5103	/* wait one idle pattern time */
5104	intel_de_posting_read(dev_priv, reg);
5105	udelay(1000);
5106
5107	/* IVB wants error correction enabled */
5108	if (IS_IVYBRIDGE(dev_priv))
5109		intel_de_write(dev_priv, reg,
5110		               intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
5111}
5112
5113/* The FDI link training functions for ILK/Ibexpeak. */
5114static void ilk_fdi_link_train(struct intel_crtc *crtc,
5115			       const struct intel_crtc_state *crtc_state)
5116{
5117	struct drm_device *dev = crtc->base.dev;
5118	struct drm_i915_private *dev_priv = to_i915(dev);
5119	enum pipe pipe = crtc->pipe;
5120	i915_reg_t reg;
5121	u32 temp, tries;
5122
5123	/* FDI needs bits from pipe first */
5124	assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
5125
5126	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
5127	   for train result */
5128	reg = FDI_RX_IMR(pipe);
5129	temp = intel_de_read(dev_priv, reg);
5130	temp &= ~FDI_RX_SYMBOL_LOCK;
5131	temp &= ~FDI_RX_BIT_LOCK;
5132	intel_de_write(dev_priv, reg, temp);
5133	intel_de_read(dev_priv, reg);
5134	udelay(150);
5135
5136	/* enable CPU FDI TX and PCH FDI RX */
5137	reg = FDI_TX_CTL(pipe);
5138	temp = intel_de_read(dev_priv, reg);
5139	temp &= ~FDI_DP_PORT_WIDTH_MASK;
5140	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5141	temp &= ~FDI_LINK_TRAIN_NONE;
5142	temp |= FDI_LINK_TRAIN_PATTERN_1;
5143	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
5144
5145	reg = FDI_RX_CTL(pipe);
5146	temp = intel_de_read(dev_priv, reg);
5147	temp &= ~FDI_LINK_TRAIN_NONE;
5148	temp |= FDI_LINK_TRAIN_PATTERN_1;
5149	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
5150
5151	intel_de_posting_read(dev_priv, reg);
5152	udelay(150);
5153
5154	/* Ironlake workaround, enable clock pointer after FDI enable*/
5155	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
5156		       FDI_RX_PHASE_SYNC_POINTER_OVR);
5157	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
5158		       FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
5159
5160	reg = FDI_RX_IIR(pipe);
5161	for (tries = 0; tries < 5; tries++) {
5162		temp = intel_de_read(dev_priv, reg);
5163		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5164
5165		if ((temp & FDI_RX_BIT_LOCK)) {
5166			drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
5167			intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
5168			break;
5169		}
5170	}
5171	if (tries == 5)
5172		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
5173
5174	/* Train 2 */
5175	reg = FDI_TX_CTL(pipe);
5176	temp = intel_de_read(dev_priv, reg);
5177	temp &= ~FDI_LINK_TRAIN_NONE;
5178	temp |= FDI_LINK_TRAIN_PATTERN_2;
5179	intel_de_write(dev_priv, reg, temp);
5180
5181	reg = FDI_RX_CTL(pipe);
5182	temp = intel_de_read(dev_priv, reg);
5183	temp &= ~FDI_LINK_TRAIN_NONE;
5184	temp |= FDI_LINK_TRAIN_PATTERN_2;
5185	intel_de_write(dev_priv, reg, temp);
5186
5187	intel_de_posting_read(dev_priv, reg);
5188	udelay(150);
5189
5190	reg = FDI_RX_IIR(pipe);
5191	for (tries = 0; tries < 5; tries++) {
5192		temp = intel_de_read(dev_priv, reg);
5193		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5194
5195		if (temp & FDI_RX_SYMBOL_LOCK) {
5196			intel_de_write(dev_priv, reg,
5197				       temp | FDI_RX_SYMBOL_LOCK);
5198			drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
5199			break;
5200		}
5201	}
5202	if (tries == 5)
5203		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
5204
5205	drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
5206
5207}
5208
5209static const int snb_b_fdi_train_param[] = {
5210	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
5211	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
5212	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
5213	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
5214};
5215
5216/* The FDI link training functions for SNB/Cougarpoint. */
5217static void gen6_fdi_link_train(struct intel_crtc *crtc,
5218				const struct intel_crtc_state *crtc_state)
5219{
5220	struct drm_device *dev = crtc->base.dev;
5221	struct drm_i915_private *dev_priv = to_i915(dev);
5222	enum pipe pipe = crtc->pipe;
5223	i915_reg_t reg;
5224	u32 temp, i, retry;
5225
5226	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
5227	   for train result */
5228	reg = FDI_RX_IMR(pipe);
5229	temp = intel_de_read(dev_priv, reg);
5230	temp &= ~FDI_RX_SYMBOL_LOCK;
5231	temp &= ~FDI_RX_BIT_LOCK;
5232	intel_de_write(dev_priv, reg, temp);
5233
5234	intel_de_posting_read(dev_priv, reg);
5235	udelay(150);
5236
5237	/* enable CPU FDI TX and PCH FDI RX */
5238	reg = FDI_TX_CTL(pipe);
5239	temp = intel_de_read(dev_priv, reg);
5240	temp &= ~FDI_DP_PORT_WIDTH_MASK;
5241	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5242	temp &= ~FDI_LINK_TRAIN_NONE;
5243	temp |= FDI_LINK_TRAIN_PATTERN_1;
5244	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5245	/* SNB-B */
5246	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
5247	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
5248
5249	intel_de_write(dev_priv, FDI_RX_MISC(pipe),
5250		       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
5251
5252	reg = FDI_RX_CTL(pipe);
5253	temp = intel_de_read(dev_priv, reg);
5254	if (HAS_PCH_CPT(dev_priv)) {
5255		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5256		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5257	} else {
5258		temp &= ~FDI_LINK_TRAIN_NONE;
5259		temp |= FDI_LINK_TRAIN_PATTERN_1;
5260	}
5261	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
5262
5263	intel_de_posting_read(dev_priv, reg);
5264	udelay(150);
5265
5266	for (i = 0; i < 4; i++) {
5267		reg = FDI_TX_CTL(pipe);
5268		temp = intel_de_read(dev_priv, reg);
5269		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5270		temp |= snb_b_fdi_train_param[i];
5271		intel_de_write(dev_priv, reg, temp);
5272
5273		intel_de_posting_read(dev_priv, reg);
5274		udelay(500);
5275
5276		for (retry = 0; retry < 5; retry++) {
5277			reg = FDI_RX_IIR(pipe);
5278			temp = intel_de_read(dev_priv, reg);
5279			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5280			if (temp & FDI_RX_BIT_LOCK) {
5281				intel_de_write(dev_priv, reg,
5282					       temp | FDI_RX_BIT_LOCK);
5283				drm_dbg_kms(&dev_priv->drm,
5284					    "FDI train 1 done.\n");
5285				break;
5286			}
5287			udelay(50);
5288		}
5289		if (retry < 5)
5290			break;
5291	}
5292	if (i == 4)
5293		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
5294
5295	/* Train 2 */
5296	reg = FDI_TX_CTL(pipe);
5297	temp = intel_de_read(dev_priv, reg);
5298	temp &= ~FDI_LINK_TRAIN_NONE;
5299	temp |= FDI_LINK_TRAIN_PATTERN_2;
5300	if (IS_GEN(dev_priv, 6)) {
5301		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5302		/* SNB-B */
5303		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
5304	}
5305	intel_de_write(dev_priv, reg, temp);
5306
5307	reg = FDI_RX_CTL(pipe);
5308	temp = intel_de_read(dev_priv, reg);
5309	if (HAS_PCH_CPT(dev_priv)) {
5310		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5311		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
5312	} else {
5313		temp &= ~FDI_LINK_TRAIN_NONE;
5314		temp |= FDI_LINK_TRAIN_PATTERN_2;
5315	}
5316	intel_de_write(dev_priv, reg, temp);
5317
5318	intel_de_posting_read(dev_priv, reg);
5319	udelay(150);
5320
5321	for (i = 0; i < 4; i++) {
5322		reg = FDI_TX_CTL(pipe);
5323		temp = intel_de_read(dev_priv, reg);
5324		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5325		temp |= snb_b_fdi_train_param[i];
5326		intel_de_write(dev_priv, reg, temp);
5327
5328		intel_de_posting_read(dev_priv, reg);
5329		udelay(500);
5330
5331		for (retry = 0; retry < 5; retry++) {
5332			reg = FDI_RX_IIR(pipe);
5333			temp = intel_de_read(dev_priv, reg);
5334			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5335			if (temp & FDI_RX_SYMBOL_LOCK) {
5336				intel_de_write(dev_priv, reg,
5337					       temp | FDI_RX_SYMBOL_LOCK);
5338				drm_dbg_kms(&dev_priv->drm,
5339					    "FDI train 2 done.\n");
5340				break;
5341			}
5342			udelay(50);
5343		}
5344		if (retry < 5)
5345			break;
5346	}
5347	if (i == 4)
5348		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
5349
5350	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
5351}
5352
5353/* Manual link training for Ivy Bridge A0 parts */
5354static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
5355				      const struct intel_crtc_state *crtc_state)
5356{
5357	struct drm_device *dev = crtc->base.dev;
5358	struct drm_i915_private *dev_priv = to_i915(dev);
5359	enum pipe pipe = crtc->pipe;
5360	i915_reg_t reg;
5361	u32 temp, i, j;
5362
5363	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
5364	   for train result */
5365	reg = FDI_RX_IMR(pipe);
5366	temp = intel_de_read(dev_priv, reg);
5367	temp &= ~FDI_RX_SYMBOL_LOCK;
5368	temp &= ~FDI_RX_BIT_LOCK;
5369	intel_de_write(dev_priv, reg, temp);
5370
5371	intel_de_posting_read(dev_priv, reg);
5372	udelay(150);
5373
5374	drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
5375		    intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
5376
5377	/* Try each vswing and preemphasis setting twice before moving on */
5378	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
5379		/* disable first in case we need to retry */
5380		reg = FDI_TX_CTL(pipe);
5381		temp = intel_de_read(dev_priv, reg);
5382		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
5383		temp &= ~FDI_TX_ENABLE;
5384		intel_de_write(dev_priv, reg, temp);
5385
5386		reg = FDI_RX_CTL(pipe);
5387		temp = intel_de_read(dev_priv, reg);
5388		temp &= ~FDI_LINK_TRAIN_AUTO;
5389		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5390		temp &= ~FDI_RX_ENABLE;
5391		intel_de_write(dev_priv, reg, temp);
5392
5393		/* enable CPU FDI TX and PCH FDI RX */
5394		reg = FDI_TX_CTL(pipe);
5395		temp = intel_de_read(dev_priv, reg);
5396		temp &= ~FDI_DP_PORT_WIDTH_MASK;
5397		temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5398		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
5399		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5400		temp |= snb_b_fdi_train_param[j/2];
5401		temp |= FDI_COMPOSITE_SYNC;
5402		intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
5403
5404		intel_de_write(dev_priv, FDI_RX_MISC(pipe),
5405			       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
5406
5407		reg = FDI_RX_CTL(pipe);
5408		temp = intel_de_read(dev_priv, reg);
5409		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5410		temp |= FDI_COMPOSITE_SYNC;
5411		intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
5412
5413		intel_de_posting_read(dev_priv, reg);
5414		udelay(1); /* should be 0.5us */
5415
5416		for (i = 0; i < 4; i++) {
5417			reg = FDI_RX_IIR(pipe);
5418			temp = intel_de_read(dev_priv, reg);
5419			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5420
5421			if (temp & FDI_RX_BIT_LOCK ||
5422			    (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
5423				intel_de_write(dev_priv, reg,
5424					       temp | FDI_RX_BIT_LOCK);
5425				drm_dbg_kms(&dev_priv->drm,
5426					    "FDI train 1 done, level %i.\n",
5427					    i);
5428				break;
5429			}
5430			udelay(1); /* should be 0.5us */
5431		}
5432		if (i == 4) {
5433			drm_dbg_kms(&dev_priv->drm,
5434				    "FDI train 1 fail on vswing %d\n", j / 2);
5435			continue;
5436		}
5437
5438		/* Train 2 */
5439		reg = FDI_TX_CTL(pipe);
5440		temp = intel_de_read(dev_priv, reg);
5441		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
5442		temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
5443		intel_de_write(dev_priv, reg, temp);
5444
5445		reg = FDI_RX_CTL(pipe);
5446		temp = intel_de_read(dev_priv, reg);
5447		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5448		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
5449		intel_de_write(dev_priv, reg, temp);
5450
5451		intel_de_posting_read(dev_priv, reg);
5452		udelay(2); /* should be 1.5us */
5453
5454		for (i = 0; i < 4; i++) {
5455			reg = FDI_RX_IIR(pipe);
5456			temp = intel_de_read(dev_priv, reg);
5457			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5458
5459			if (temp & FDI_RX_SYMBOL_LOCK ||
5460			    (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
5461				intel_de_write(dev_priv, reg,
5462					       temp | FDI_RX_SYMBOL_LOCK);
5463				drm_dbg_kms(&dev_priv->drm,
5464					    "FDI train 2 done, level %i.\n",
5465					    i);
5466				goto train_done;
5467			}
5468			udelay(2); /* should be 1.5us */
5469		}
5470		if (i == 4)
5471			drm_dbg_kms(&dev_priv->drm,
5472				    "FDI train 2 fail on vswing %d\n", j / 2);
5473	}
5474
5475train_done:
5476	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
5477}
5478
5479static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
5480{
5481	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
5482	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5483	enum pipe pipe = intel_crtc->pipe;
5484	i915_reg_t reg;
5485	u32 temp;
5486
5487	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
5488	reg = FDI_RX_CTL(pipe);
5489	temp = intel_de_read(dev_priv, reg);
5490	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
5491	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5492	temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5493	intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
5494
5495	intel_de_posting_read(dev_priv, reg);
5496	udelay(200);
5497
5498	/* Switch from Rawclk to PCDclk */
5499	temp = intel_de_read(dev_priv, reg);
5500	intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
5501
5502	intel_de_posting_read(dev_priv, reg);
5503	udelay(200);
5504
5505	/* Enable CPU FDI TX PLL, always on for Ironlake */
5506	reg = FDI_TX_CTL(pipe);
5507	temp = intel_de_read(dev_priv, reg);
5508	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
5509		intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
5510
5511		intel_de_posting_read(dev_priv, reg);
5512		udelay(100);
5513	}
5514}
5515
5516static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
5517{
5518	struct drm_device *dev = intel_crtc->base.dev;
5519	struct drm_i915_private *dev_priv = to_i915(dev);
5520	enum pipe pipe = intel_crtc->pipe;
5521	i915_reg_t reg;
5522	u32 temp;
5523
5524	/* Switch from PCDclk to Rawclk */
5525	reg = FDI_RX_CTL(pipe);
5526	temp = intel_de_read(dev_priv, reg);
5527	intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
5528
5529	/* Disable CPU FDI TX PLL */
5530	reg = FDI_TX_CTL(pipe);
5531	temp = intel_de_read(dev_priv, reg);
5532	intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
5533
5534	intel_de_posting_read(dev_priv, reg);
5535	udelay(100);
5536
5537	reg = FDI_RX_CTL(pipe);
5538	temp = intel_de_read(dev_priv, reg);
5539	intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
5540
5541	/* Wait for the clocks to turn off. */
5542	intel_de_posting_read(dev_priv, reg);
5543	udelay(100);
5544}
5545
5546static void ilk_fdi_disable(struct intel_crtc *crtc)
5547{
5548	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5549	enum pipe pipe = crtc->pipe;
5550	i915_reg_t reg;
5551	u32 temp;
5552
5553	/* disable CPU FDI tx and PCH FDI rx */
5554	reg = FDI_TX_CTL(pipe);
5555	temp = intel_de_read(dev_priv, reg);
5556	intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
5557	intel_de_posting_read(dev_priv, reg);
5558
5559	reg = FDI_RX_CTL(pipe);
5560	temp = intel_de_read(dev_priv, reg);
5561	temp &= ~(0x7 << 16);
5562	temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5563	intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
5564
5565	intel_de_posting_read(dev_priv, reg);
5566	udelay(100);
5567
5568	/* Ironlake workaround, disable clock pointer after downing FDI */
5569	if (HAS_PCH_IBX(dev_priv))
5570		intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
5571			       FDI_RX_PHASE_SYNC_POINTER_OVR);
5572
5573	/* still set train pattern 1 */
5574	reg = FDI_TX_CTL(pipe);
5575	temp = intel_de_read(dev_priv, reg);
5576	temp &= ~FDI_LINK_TRAIN_NONE;
5577	temp |= FDI_LINK_TRAIN_PATTERN_1;
5578	intel_de_write(dev_priv, reg, temp);
5579
5580	reg = FDI_RX_CTL(pipe);
5581	temp = intel_de_read(dev_priv, reg);
5582	if (HAS_PCH_CPT(dev_priv)) {
5583		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5584		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5585	} else {
5586		temp &= ~FDI_LINK_TRAIN_NONE;
5587		temp |= FDI_LINK_TRAIN_PATTERN_1;
5588	}
5589	/* BPC in FDI rx is consistent with that in PIPECONF */
5590	temp &= ~(0x07 << 16);
5591	temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5592	intel_de_write(dev_priv, reg, temp);
5593
5594	intel_de_posting_read(dev_priv, reg);
5595	udelay(100);
5596}
5597
5598bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
5599{
5600	struct drm_crtc *crtc;
5601	bool cleanup_done;
5602
5603	drm_for_each_crtc(crtc, &dev_priv->drm) {
5604		struct drm_crtc_commit *commit;
5605		spin_lock(&crtc->commit_lock);
5606		commit = list_first_entry_or_null(&crtc->commit_list,
5607						  struct drm_crtc_commit, commit_entry);
5608		cleanup_done = commit ?
5609			try_wait_for_completion(&commit->cleanup_done) : true;
5610		spin_unlock(&crtc->commit_lock);
5611
5612		if (cleanup_done)
5613			continue;
5614
5615		drm_crtc_wait_one_vblank(crtc);
5616
5617		return true;
5618	}
5619
5620	return false;
5621}
5622
5623void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
5624{
5625	u32 temp;
5626
5627	intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
5628
5629	mutex_lock(&dev_priv->sb_lock);
5630
5631	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5632	temp |= SBI_SSCCTL_DISABLE;
5633	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5634
5635	mutex_unlock(&dev_priv->sb_lock);
5636}
5637
5638/* Program iCLKIP clock to the desired frequency */
5639static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
5640{
5641	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5642	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5643	int clock = crtc_state->hw.adjusted_mode.crtc_clock;
5644	u32 divsel, phaseinc, auxdiv, phasedir = 0;
5645	u32 temp;
5646
5647	lpt_disable_iclkip(dev_priv);
5648
5649	/* The iCLK virtual clock root frequency is in MHz,
5650	 * but the adjusted_mode->crtc_clock in in KHz. To get the
5651	 * divisors, it is necessary to divide one by another, so we
5652	 * convert the virtual clock precision to KHz here for higher
5653	 * precision.
5654	 */
5655	for (auxdiv = 0; auxdiv < 2; auxdiv++) {
5656		u32 iclk_virtual_root_freq = 172800 * 1000;
5657		u32 iclk_pi_range = 64;
5658		u32 desired_divisor;
5659
5660		desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5661						    clock << auxdiv);
5662		divsel = (desired_divisor / iclk_pi_range) - 2;
5663		phaseinc = desired_divisor % iclk_pi_range;
5664
5665		/*
5666		 * Near 20MHz is a corner case which is
5667		 * out of range for the 7-bit divisor
5668		 */
5669		if (divsel <= 0x7f)
5670			break;
5671	}
5672
5673	/* This should not happen with any sane values */
5674	drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
5675		    ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
5676	drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
5677		    ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
5678
5679	drm_dbg_kms(&dev_priv->drm,
5680		    "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
5681		    clock, auxdiv, divsel, phasedir, phaseinc);
5682
5683	mutex_lock(&dev_priv->sb_lock);
5684
5685	/* Program SSCDIVINTPHASE6 */
5686	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5687	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
5688	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
5689	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
5690	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
5691	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
5692	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
5693	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
5694
5695	/* Program SSCAUXDIV */
5696	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5697	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
5698	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
5699	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
5700
5701	/* Enable modulator and associated divider */
5702	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5703	temp &= ~SBI_SSCCTL_DISABLE;
5704	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5705
5706	mutex_unlock(&dev_priv->sb_lock);
5707
5708	/* Wait for initialization time */
5709	udelay(24);
5710
5711	intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
5712}
5713
5714int lpt_get_iclkip(struct drm_i915_private *dev_priv)
5715{
5716	u32 divsel, phaseinc, auxdiv;
5717	u32 iclk_virtual_root_freq = 172800 * 1000;
5718	u32 iclk_pi_range = 64;
5719	u32 desired_divisor;
5720	u32 temp;
5721
5722	if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
5723		return 0;
5724
5725	mutex_lock(&dev_priv->sb_lock);
5726
5727	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5728	if (temp & SBI_SSCCTL_DISABLE) {
5729		mutex_unlock(&dev_priv->sb_lock);
5730		return 0;
5731	}
5732
5733	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5734	divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
5735		SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
5736	phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
5737		SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
5738
5739	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5740	auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
5741		SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
5742
5743	mutex_unlock(&dev_priv->sb_lock);
5744
5745	desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
5746
5747	return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5748				 desired_divisor << auxdiv);
5749}
5750
5751static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
5752					   enum pipe pch_transcoder)
5753{
5754	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5755	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5756	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5757
5758	intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
5759		       intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
5760	intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
5761		       intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
5762	intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
5763		       intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
5764
5765	intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
5766		       intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
5767	intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
5768		       intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
5769	intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
5770		       intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
5771	intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
5772		       intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
5773}
5774
5775static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
5776{
5777	u32 temp;
5778
5779	temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
5780	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
5781		return;
5782
5783	drm_WARN_ON(&dev_priv->drm,
5784		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
5785		    FDI_RX_ENABLE);
5786	drm_WARN_ON(&dev_priv->drm,
5787		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
5788		    FDI_RX_ENABLE);
5789
5790	temp &= ~FDI_BC_BIFURCATION_SELECT;
5791	if (enable)
5792		temp |= FDI_BC_BIFURCATION_SELECT;
5793
5794	drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
5795		    enable ? "en" : "dis");
5796	intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
5797	intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
5798}
5799
5800static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
5801{
5802	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5803	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5804
5805	switch (crtc->pipe) {
5806	case PIPE_A:
5807		break;
5808	case PIPE_B:
5809		if (crtc_state->fdi_lanes > 2)
5810			cpt_set_fdi_bc_bifurcation(dev_priv, false);
5811		else
5812			cpt_set_fdi_bc_bifurcation(dev_priv, true);
5813
5814		break;
5815	case PIPE_C:
5816		cpt_set_fdi_bc_bifurcation(dev_priv, true);
5817
5818		break;
5819	default:
5820		BUG();
5821	}
5822}
5823
5824/*
5825 * Finds the encoder associated with the given CRTC. This can only be
5826 * used when we know that the CRTC isn't feeding multiple encoders!
5827 */
5828static struct intel_encoder *
5829intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
5830			   const struct intel_crtc_state *crtc_state)
5831{
5832	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5833	const struct drm_connector_state *connector_state;
5834	const struct drm_connector *connector;
5835	struct intel_encoder *encoder = NULL;
5836	int num_encoders = 0;
5837	int i;
5838
5839	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5840		if (connector_state->crtc != &crtc->base)
5841			continue;
5842
5843		encoder = to_intel_encoder(connector_state->best_encoder);
5844		num_encoders++;
5845	}
5846
5847	drm_WARN(state->base.dev, num_encoders != 1,
5848		 "%d encoders for pipe %c\n",
5849		 num_encoders, pipe_name(crtc->pipe));
5850
5851	return encoder;
5852}
5853
5854/*
5855 * Enable PCH resources required for PCH ports:
5856 *   - PCH PLLs
5857 *   - FDI training & RX/TX
5858 *   - update transcoder timings
5859 *   - DP transcoding bits
5860 *   - transcoder
5861 */
5862static void ilk_pch_enable(const struct intel_atomic_state *state,
5863			   const struct intel_crtc_state *crtc_state)
5864{
5865	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5866	struct drm_device *dev = crtc->base.dev;
5867	struct drm_i915_private *dev_priv = to_i915(dev);
5868	enum pipe pipe = crtc->pipe;
5869	u32 temp;
5870
5871	assert_pch_transcoder_disabled(dev_priv, pipe);
5872
5873	if (IS_IVYBRIDGE(dev_priv))
5874		ivb_update_fdi_bc_bifurcation(crtc_state);
5875
5876	/* Write the TU size bits before fdi link training, so that error
5877	 * detection works. */
5878	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
5879		       intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
5880
5881	/* For PCH output, training FDI link */
5882	dev_priv->display.fdi_link_train(crtc, crtc_state);
5883
5884	/* We need to program the right clock selection before writing the pixel
5885	 * mutliplier into the DPLL. */
5886	if (HAS_PCH_CPT(dev_priv)) {
5887		u32 sel;
5888
5889		temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
5890		temp |= TRANS_DPLL_ENABLE(pipe);
5891		sel = TRANS_DPLLB_SEL(pipe);
5892		if (crtc_state->shared_dpll ==
5893		    intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
5894			temp |= sel;
5895		else
5896			temp &= ~sel;
5897		intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
5898	}
5899
5900	/* XXX: pch pll's can be enabled any time before we enable the PCH
5901	 * transcoder, and we actually should do this to not upset any PCH
5902	 * transcoder that already use the clock when we share it.
5903	 *
5904	 * Note that enable_shared_dpll tries to do the right thing, but
5905	 * get_shared_dpll unconditionally resets the pll - we need that to have
5906	 * the right LVDS enable sequence. */
5907	intel_enable_shared_dpll(crtc_state);
5908
5909	/* set transcoder timing, panel must allow it */
5910	assert_panel_unlocked(dev_priv, pipe);
5911	ilk_pch_transcoder_set_timings(crtc_state, pipe);
5912
5913	intel_fdi_normal_train(crtc);
5914
5915	/* For PCH DP, enable TRANS_DP_CTL */
5916	if (HAS_PCH_CPT(dev_priv) &&
5917	    intel_crtc_has_dp_encoder(crtc_state)) {
5918		const struct drm_display_mode *adjusted_mode =
5919			&crtc_state->hw.adjusted_mode;
5920		u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
5921		i915_reg_t reg = TRANS_DP_CTL(pipe);
5922		enum port port;
5923
5924		temp = intel_de_read(dev_priv, reg);
5925		temp &= ~(TRANS_DP_PORT_SEL_MASK |
5926			  TRANS_DP_SYNC_MASK |
5927			  TRANS_DP_BPC_MASK);
5928		temp |= TRANS_DP_OUTPUT_ENABLE;
5929		temp |= bpc << 9; /* same format but at 11:9 */
5930
5931		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5932			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
5933		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5934			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
5935
5936		port = intel_get_crtc_new_encoder(state, crtc_state)->port;
5937		drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
5938		temp |= TRANS_DP_PORT_SEL(port);
5939
5940		intel_de_write(dev_priv, reg, temp);
5941	}
5942
5943	ilk_enable_pch_transcoder(crtc_state);
5944}
5945
5946void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
5947{
5948	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5949	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5950	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5951
5952	assert_pch_transcoder_disabled(dev_priv, PIPE_A);
5953
5954	lpt_program_iclkip(crtc_state);
5955
5956	/* Set transcoder timing. */
5957	ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
5958
5959	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
5960}
5961
5962static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
5963			       enum pipe pipe)
5964{
5965	i915_reg_t dslreg = PIPEDSL(pipe);
5966	u32 temp;
5967
5968	temp = intel_de_read(dev_priv, dslreg);
5969	udelay(500);
5970	if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
5971		if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
5972			drm_err(&dev_priv->drm,
5973				"mode set failed: pipe %c stuck\n",
5974				pipe_name(pipe));
5975	}
5976}
5977
5978/*
5979 * The hardware phase 0.0 refers to the center of the pixel.
5980 * We want to start from the top/left edge which is phase
5981 * -0.5. That matches how the hardware calculates the scaling
5982 * factors (from top-left of the first pixel to bottom-right
5983 * of the last pixel, as opposed to the pixel centers).
5984 *
5985 * For 4:2:0 subsampled chroma planes we obviously have to
5986 * adjust that so that the chroma sample position lands in
5987 * the right spot.
5988 *
5989 * Note that for packed YCbCr 4:2:2 formats there is no way to
5990 * control chroma siting. The hardware simply replicates the
5991 * chroma samples for both of the luma samples, and thus we don't
5992 * actually get the expected MPEG2 chroma siting convention :(
5993 * The same behaviour is observed on pre-SKL platforms as well.
5994 *
5995 * Theory behind the formula (note that we ignore sub-pixel
5996 * source coordinates):
5997 * s = source sample position
5998 * d = destination sample position
5999 *
6000 * Downscaling 4:1:
6001 * -0.5
6002 * | 0.0
6003 * | |     1.5 (initial phase)
6004 * | |     |
6005 * v v     v
6006 * | s | s | s | s |
6007 * |       d       |
6008 *
6009 * Upscaling 1:4:
6010 * -0.5
6011 * | -0.375 (initial phase)
6012 * | |     0.0
6013 * | |     |
6014 * v v     v
6015 * |       s       |
6016 * | d | d | d | d |
6017 */
6018u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
6019{
6020	int phase = -0x8000;
6021	u16 trip = 0;
6022
6023	if (chroma_cosited)
6024		phase += (sub - 1) * 0x8000 / sub;
6025
6026	phase += scale / (2 * sub);
6027
6028	/*
6029	 * Hardware initial phase limited to [-0.5:1.5].
6030	 * Since the max hardware scale factor is 3.0, we
6031	 * should never actually excdeed 1.0 here.
6032	 */
6033	WARN_ON(phase < -0x8000 || phase > 0x18000);
6034
6035	if (phase < 0)
6036		phase = 0x10000 + phase;
6037	else
6038		trip = PS_PHASE_TRIP;
6039
6040	return ((phase >> 2) & PS_PHASE_MASK) | trip;
6041}
6042
6043#define SKL_MIN_SRC_W 8
6044#define SKL_MAX_SRC_W 4096
6045#define SKL_MIN_SRC_H 8
6046#define SKL_MAX_SRC_H 4096
6047#define SKL_MIN_DST_W 8
6048#define SKL_MAX_DST_W 4096
6049#define SKL_MIN_DST_H 8
6050#define SKL_MAX_DST_H 4096
6051#define ICL_MAX_SRC_W 5120
6052#define ICL_MAX_SRC_H 4096
6053#define ICL_MAX_DST_W 5120
6054#define ICL_MAX_DST_H 4096
6055#define SKL_MIN_YUV_420_SRC_W 16
6056#define SKL_MIN_YUV_420_SRC_H 16
6057
6058static int
6059skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
6060		  unsigned int scaler_user, int *scaler_id,
6061		  int src_w, int src_h, int dst_w, int dst_h,
6062		  const struct drm_format_info *format,
6063		  u64 modifier, bool need_scaler)
6064{
6065	struct intel_crtc_scaler_state *scaler_state =
6066		&crtc_state->scaler_state;
6067	struct intel_crtc *intel_crtc =
6068		to_intel_crtc(crtc_state->uapi.crtc);
6069	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
6070	const struct drm_display_mode *adjusted_mode =
6071		&crtc_state->hw.adjusted_mode;
6072
6073	/*
6074	 * Src coordinates are already rotated by 270 degrees for
6075	 * the 90/270 degree plane rotation cases (to match the
6076	 * GTT mapping), hence no need to account for rotation here.
6077	 */
6078	if (src_w != dst_w || src_h != dst_h)
6079		need_scaler = true;
6080
6081	/*
6082	 * Scaling/fitting not supported in IF-ID mode in GEN9+
6083	 * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
6084	 * Once NV12 is enabled, handle it here while allocating scaler
6085	 * for NV12.
6086	 */
6087	if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
6088	    need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
6089		drm_dbg_kms(&dev_priv->drm,
6090			    "Pipe/Plane scaling not supported with IF-ID mode\n");
6091		return -EINVAL;
6092	}
6093
6094	/*
6095	 * if plane is being disabled or scaler is no more required or force detach
6096	 *  - free scaler binded to this plane/crtc
6097	 *  - in order to do this, update crtc->scaler_usage
6098	 *
6099	 * Here scaler state in crtc_state is set free so that
6100	 * scaler can be assigned to other user. Actual register
6101	 * update to free the scaler is done in plane/panel-fit programming.
6102	 * For this purpose crtc/plane_state->scaler_id isn't reset here.
6103	 */
6104	if (force_detach || !need_scaler) {
6105		if (*scaler_id >= 0) {
6106			scaler_state->scaler_users &= ~(1 << scaler_user);
6107			scaler_state->scalers[*scaler_id].in_use = 0;
6108
6109			drm_dbg_kms(&dev_priv->drm,
6110				    "scaler_user index %u.%u: "
6111				    "Staged freeing scaler id %d scaler_users = 0x%x\n",
6112				    intel_crtc->pipe, scaler_user, *scaler_id,
6113				    scaler_state->scaler_users);
6114			*scaler_id = -1;
6115		}
6116		return 0;
6117	}
6118
6119	if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
6120	    (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
6121		drm_dbg_kms(&dev_priv->drm,
6122			    "Planar YUV: src dimensions not met\n");
6123		return -EINVAL;
6124	}
6125
6126	/* range checks */
6127	if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
6128	    dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
6129	    (INTEL_GEN(dev_priv) >= 11 &&
6130	     (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
6131	      dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
6132	    (INTEL_GEN(dev_priv) < 11 &&
6133	     (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
6134	      dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H)))	{
6135		drm_dbg_kms(&dev_priv->drm,
6136			    "scaler_user index %u.%u: src %ux%u dst %ux%u "
6137			    "size is out of scaler range\n",
6138			    intel_crtc->pipe, scaler_user, src_w, src_h,
6139			    dst_w, dst_h);
6140		return -EINVAL;
6141	}
6142
6143	/* mark this plane as a scaler user in crtc_state */
6144	scaler_state->scaler_users |= (1 << scaler_user);
6145	drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
6146		    "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
6147		    intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
6148		    scaler_state->scaler_users);
6149
6150	return 0;
6151}
6152
6153static int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
6154{
6155	const struct drm_display_mode *adjusted_mode =
6156		&crtc_state->hw.adjusted_mode;
6157	int width, height;
6158
6159	if (crtc_state->pch_pfit.enabled) {
6160		width = drm_rect_width(&crtc_state->pch_pfit.dst);
6161		height = drm_rect_height(&crtc_state->pch_pfit.dst);
6162	} else {
6163		width = adjusted_mode->crtc_hdisplay;
6164		height = adjusted_mode->crtc_vdisplay;
6165	}
6166
6167	return skl_update_scaler(crtc_state, !crtc_state->hw.active,
6168				 SKL_CRTC_INDEX,
6169				 &crtc_state->scaler_state.scaler_id,
6170				 crtc_state->pipe_src_w, crtc_state->pipe_src_h,
6171				 width, height, NULL, 0,
6172				 crtc_state->pch_pfit.enabled);
6173}
6174
6175/**
6176 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
6177 * @crtc_state: crtc's scaler state
6178 * @plane_state: atomic plane state to update
6179 *
6180 * Return
6181 *     0 - scaler_usage updated successfully
6182 *    error - requested scaling cannot be supported or other error condition
6183 */
6184static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
6185				   struct intel_plane_state *plane_state)
6186{
6187	struct intel_plane *intel_plane =
6188		to_intel_plane(plane_state->uapi.plane);
6189	struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
6190	struct drm_framebuffer *fb = plane_state->hw.fb;
6191	int ret;
6192	bool force_detach = !fb || !plane_state->uapi.visible;
6193	bool need_scaler = false;
6194
6195	/* Pre-gen11 and SDR planes always need a scaler for planar formats. */
6196	if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
6197	    fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
6198		need_scaler = true;
6199
6200	ret = skl_update_scaler(crtc_state, force_detach,
6201				drm_plane_index(&intel_plane->base),
6202				&plane_state->scaler_id,
6203				drm_rect_width(&plane_state->uapi.src) >> 16,
6204				drm_rect_height(&plane_state->uapi.src) >> 16,
6205				drm_rect_width(&plane_state->uapi.dst),
6206				drm_rect_height(&plane_state->uapi.dst),
6207				fb ? fb->format : NULL,
6208				fb ? fb->modifier : 0,
6209				need_scaler);
6210
6211	if (ret || plane_state->scaler_id < 0)
6212		return ret;
6213
6214	/* check colorkey */
6215	if (plane_state->ckey.flags) {
6216		drm_dbg_kms(&dev_priv->drm,
6217			    "[PLANE:%d:%s] scaling with color key not allowed",
6218			    intel_plane->base.base.id,
6219			    intel_plane->base.name);
6220		return -EINVAL;
6221	}
6222
6223	/* Check src format */
6224	switch (fb->format->format) {
6225	case DRM_FORMAT_RGB565:
6226	case DRM_FORMAT_XBGR8888:
6227	case DRM_FORMAT_XRGB8888:
6228	case DRM_FORMAT_ABGR8888:
6229	case DRM_FORMAT_ARGB8888:
6230	case DRM_FORMAT_XRGB2101010:
6231	case DRM_FORMAT_XBGR2101010:
6232	case DRM_FORMAT_ARGB2101010:
6233	case DRM_FORMAT_ABGR2101010:
6234	case DRM_FORMAT_YUYV:
6235	case DRM_FORMAT_YVYU:
6236	case DRM_FORMAT_UYVY:
6237	case DRM_FORMAT_VYUY:
6238	case DRM_FORMAT_NV12:
6239	case DRM_FORMAT_XYUV8888:
6240	case DRM_FORMAT_P010:
6241	case DRM_FORMAT_P012:
6242	case DRM_FORMAT_P016:
6243	case DRM_FORMAT_Y210:
6244	case DRM_FORMAT_Y212:
6245	case DRM_FORMAT_Y216:
6246	case DRM_FORMAT_XVYU2101010:
6247	case DRM_FORMAT_XVYU12_16161616:
6248	case DRM_FORMAT_XVYU16161616:
6249		break;
6250	case DRM_FORMAT_XBGR16161616F:
6251	case DRM_FORMAT_ABGR16161616F:
6252	case DRM_FORMAT_XRGB16161616F:
6253	case DRM_FORMAT_ARGB16161616F:
6254		if (INTEL_GEN(dev_priv) >= 11)
6255			break;
6256		fallthrough;
6257	default:
6258		drm_dbg_kms(&dev_priv->drm,
6259			    "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
6260			    intel_plane->base.base.id, intel_plane->base.name,
6261			    fb->base.id, fb->format->format);
6262		return -EINVAL;
6263	}
6264
6265	return 0;
6266}
6267
6268void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
6269{
6270	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
6271	int i;
6272
6273	for (i = 0; i < crtc->num_scalers; i++)
6274		skl_detach_scaler(crtc, i);
6275}
6276
6277static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
6278{
6279	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6280	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6281	const struct intel_crtc_scaler_state *scaler_state =
6282		&crtc_state->scaler_state;
6283	struct drm_rect src = {
6284		.x2 = crtc_state->pipe_src_w << 16,
6285		.y2 = crtc_state->pipe_src_h << 16,
6286	};
6287	const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
6288	u16 uv_rgb_hphase, uv_rgb_vphase;
6289	enum pipe pipe = crtc->pipe;
6290	int width = drm_rect_width(dst);
6291	int height = drm_rect_height(dst);
6292	int x = dst->x1;
6293	int y = dst->y1;
6294	int hscale, vscale;
6295	unsigned long irqflags;
6296	int id;
6297
6298	if (!crtc_state->pch_pfit.enabled)
6299		return;
6300
6301	if (drm_WARN_ON(&dev_priv->drm,
6302			crtc_state->scaler_state.scaler_id < 0))
6303		return;
6304
6305	hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX);
6306	vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX);
6307
6308	uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
6309	uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
6310
6311	id = scaler_state->scaler_id;
6312
6313	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
6314
6315	intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
6316			  PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
6317	intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
6318			  PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
6319	intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
6320			  PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
6321	intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
6322			  x << 16 | y);
6323	intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
6324			  width << 16 | height);
6325
6326	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
6327}
6328
6329static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
6330{
6331	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6332	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6333	const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
6334	enum pipe pipe = crtc->pipe;
6335	int width = drm_rect_width(dst);
6336	int height = drm_rect_height(dst);
6337	int x = dst->x1;
6338	int y = dst->y1;
6339
6340	if (!crtc_state->pch_pfit.enabled)
6341		return;
6342
6343	/* Force use of hard-coded filter coefficients
6344	 * as some pre-programmed values are broken,
6345	 * e.g. x201.
6346	 */
6347	if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
6348		intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
6349			       PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
6350	else
6351		intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
6352			       PF_FILTER_MED_3x3);
6353	intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
6354	intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
6355}
6356
6357void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
6358{
6359	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6360	struct drm_device *dev = crtc->base.dev;
6361	struct drm_i915_private *dev_priv = to_i915(dev);
6362
6363	if (!crtc_state->ips_enabled)
6364		return;
6365
6366	/*
6367	 * We can only enable IPS after we enable a plane and wait for a vblank
6368	 * This function is called from post_plane_update, which is run after
6369	 * a vblank wait.
6370	 */
6371	drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
6372
6373	if (IS_BROADWELL(dev_priv)) {
6374		drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
6375							 IPS_ENABLE | IPS_PCODE_CONTROL));
6376		/* Quoting Art Runyan: "its not safe to expect any particular
6377		 * value in IPS_CTL bit 31 after enabling IPS through the
6378		 * mailbox." Moreover, the mailbox may return a bogus state,
6379		 * so we need to just enable it and continue on.
6380		 */
6381	} else {
6382		intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
6383		/* The bit only becomes 1 in the next vblank, so this wait here
6384		 * is essentially intel_wait_for_vblank. If we don't have this
6385		 * and don't wait for vblanks until the end of crtc_enable, then
6386		 * the HW state readout code will complain that the expected
6387		 * IPS_CTL value is not the one we read. */
6388		if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
6389			drm_err(&dev_priv->drm,
6390				"Timed out waiting for IPS enable\n");
6391	}
6392}
6393
6394void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
6395{
6396	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6397	struct drm_device *dev = crtc->base.dev;
6398	struct drm_i915_private *dev_priv = to_i915(dev);
6399
6400	if (!crtc_state->ips_enabled)
6401		return;
6402
6403	if (IS_BROADWELL(dev_priv)) {
6404		drm_WARN_ON(dev,
6405			    sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
6406		/*
6407		 * Wait for PCODE to finish disabling IPS. The BSpec specified
6408		 * 42ms timeout value leads to occasional timeouts so use 100ms
6409		 * instead.
6410		 */
6411		if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
6412			drm_err(&dev_priv->drm,
6413				"Timed out waiting for IPS disable\n");
6414	} else {
6415		intel_de_write(dev_priv, IPS_CTL, 0);
6416		intel_de_posting_read(dev_priv, IPS_CTL);
6417	}
6418
6419	/* We need to wait for a vblank before we can disable the plane. */
6420	intel_wait_for_vblank(dev_priv, crtc->pipe);
6421}
6422
6423static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
6424{
6425	if (intel_crtc->overlay)
6426		(void) intel_overlay_switch_off(intel_crtc->overlay);
6427
6428	/* Let userspace switch the overlay on again. In most cases userspace
6429	 * has to recompute where to put it anyway.
6430	 */
6431}
6432
6433static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
6434				       const struct intel_crtc_state *new_crtc_state)
6435{
6436	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6437	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6438
6439	if (!old_crtc_state->ips_enabled)
6440		return false;
6441
6442	if (needs_modeset(new_crtc_state))
6443		return true;
6444
6445	/*
6446	 * Workaround : Do not read or write the pipe palette/gamma data while
6447	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6448	 *
6449	 * Disable IPS before we program the LUT.
6450	 */
6451	if (IS_HASWELL(dev_priv) &&
6452	    (new_crtc_state->uapi.color_mgmt_changed ||
6453	     new_crtc_state->update_pipe) &&
6454	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
6455		return true;
6456
6457	return !new_crtc_state->ips_enabled;
6458}
6459
6460static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
6461				       const struct intel_crtc_state *new_crtc_state)
6462{
6463	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6464	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6465
6466	if (!new_crtc_state->ips_enabled)
6467		return false;
6468
6469	if (needs_modeset(new_crtc_state))
6470		return true;
6471
6472	/*
6473	 * Workaround : Do not read or write the pipe palette/gamma data while
6474	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6475	 *
6476	 * Re-enable IPS after the LUT has been programmed.
6477	 */
6478	if (IS_HASWELL(dev_priv) &&
6479	    (new_crtc_state->uapi.color_mgmt_changed ||
6480	     new_crtc_state->update_pipe) &&
6481	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
6482		return true;
6483
6484	/*
6485	 * We can't read out IPS on broadwell, assume the worst and
6486	 * forcibly enable IPS on the first fastset.
6487	 */
6488	if (new_crtc_state->update_pipe && old_crtc_state->inherited)
6489		return true;
6490
6491	return !old_crtc_state->ips_enabled;
6492}
6493
6494static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
6495{
6496	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
6497
6498	if (!crtc_state->nv12_planes)
6499		return false;
6500
6501	/* WA Display #0827: Gen9:all */
6502	if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
6503		return true;
6504
6505	return false;
6506}
6507
6508static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
6509{
6510	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
6511
6512	/* Wa_2006604312:icl,ehl */
6513	if (crtc_state->scaler_state.scaler_users > 0 && IS_GEN(dev_priv, 11))
6514		return true;
6515
6516	return false;
6517}
6518
6519static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
6520			    const struct intel_crtc_state *new_crtc_state)
6521{
6522	return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) &&
6523		new_crtc_state->active_planes;
6524}
6525
6526static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
6527			     const struct intel_crtc_state *new_crtc_state)
6528{
6529	return old_crtc_state->active_planes &&
6530		(!new_crtc_state->active_planes || needs_modeset(new_crtc_state));
6531}
6532
6533static void intel_post_plane_update(struct intel_atomic_state *state,
6534				    struct intel_crtc *crtc)
6535{
6536	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6537	const struct intel_crtc_state *old_crtc_state =
6538		intel_atomic_get_old_crtc_state(state, crtc);
6539	const struct intel_crtc_state *new_crtc_state =
6540		intel_atomic_get_new_crtc_state(state, crtc);
6541	enum pipe pipe = crtc->pipe;
6542
6543	intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
6544
6545	if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
6546		intel_update_watermarks(crtc);
6547
6548	if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
6549		hsw_enable_ips(new_crtc_state);
6550
6551	intel_fbc_post_update(state, crtc);
6552
6553	if (needs_nv12_wa(old_crtc_state) &&
6554	    !needs_nv12_wa(new_crtc_state))
6555		skl_wa_827(dev_priv, pipe, false);
6556
6557	if (needs_scalerclk_wa(old_crtc_state) &&
6558	    !needs_scalerclk_wa(new_crtc_state))
6559		icl_wa_scalerclkgating(dev_priv, pipe, false);
6560}
6561
6562static void intel_pre_plane_update(struct intel_atomic_state *state,
6563				   struct intel_crtc *crtc)
6564{
6565	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6566	const struct intel_crtc_state *old_crtc_state =
6567		intel_atomic_get_old_crtc_state(state, crtc);
6568	const struct intel_crtc_state *new_crtc_state =
6569		intel_atomic_get_new_crtc_state(state, crtc);
6570	enum pipe pipe = crtc->pipe;
6571
6572	if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
6573		hsw_disable_ips(old_crtc_state);
6574
6575	if (intel_fbc_pre_update(state, crtc))
6576		intel_wait_for_vblank(dev_priv, pipe);
6577
6578	/* Display WA 827 */
6579	if (!needs_nv12_wa(old_crtc_state) &&
6580	    needs_nv12_wa(new_crtc_state))
6581		skl_wa_827(dev_priv, pipe, true);
6582
6583	/* Wa_2006604312:icl,ehl */
6584	if (!needs_scalerclk_wa(old_crtc_state) &&
6585	    needs_scalerclk_wa(new_crtc_state))
6586		icl_wa_scalerclkgating(dev_priv, pipe, true);
6587
6588	/*
6589	 * Vblank time updates from the shadow to live plane control register
6590	 * are blocked if the memory self-refresh mode is active at that
6591	 * moment. So to make sure the plane gets truly disabled, disable
6592	 * first the self-refresh mode. The self-refresh enable bit in turn
6593	 * will be checked/applied by the HW only at the next frame start
6594	 * event which is after the vblank start event, so we need to have a
6595	 * wait-for-vblank between disabling the plane and the pipe.
6596	 */
6597	if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
6598	    new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
6599		intel_wait_for_vblank(dev_priv, pipe);
6600
6601	/*
6602	 * IVB workaround: must disable low power watermarks for at least
6603	 * one frame before enabling scaling.  LP watermarks can be re-enabled
6604	 * when scaling is disabled.
6605	 *
6606	 * WaCxSRDisabledForSpriteScaling:ivb
6607	 */
6608	if (old_crtc_state->hw.active &&
6609	    new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
6610		intel_wait_for_vblank(dev_priv, pipe);
6611
6612	/*
6613	 * If we're doing a modeset we don't need to do any
6614	 * pre-vblank watermark programming here.
6615	 */
6616	if (!needs_modeset(new_crtc_state)) {
6617		/*
6618		 * For platforms that support atomic watermarks, program the
6619		 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
6620		 * will be the intermediate values that are safe for both pre- and
6621		 * post- vblank; when vblank happens, the 'active' values will be set
6622		 * to the final 'target' values and we'll do this again to get the
6623		 * optimal watermarks.  For gen9+ platforms, the values we program here
6624		 * will be the final target values which will get automatically latched
6625		 * at vblank time; no further programming will be necessary.
6626		 *
6627		 * If a platform hasn't been transitioned to atomic watermarks yet,
6628		 * we'll continue to update watermarks the old way, if flags tell
6629		 * us to.
6630		 */
6631		if (dev_priv->display.initial_watermarks)
6632			dev_priv->display.initial_watermarks(state, crtc);
6633		else if (new_crtc_state->update_wm_pre)
6634			intel_update_watermarks(crtc);
6635	}
6636
6637	/*
6638	 * Gen2 reports pipe underruns whenever all planes are disabled.
6639	 * So disable underrun reporting before all the planes get disabled.
6640	 *
6641	 * We do this after .initial_watermarks() so that we have a
6642	 * chance of catching underruns with the intermediate watermarks
6643	 * vs. the old plane configuration.
6644	 */
6645	if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
6646		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6647}
6648
6649static void intel_crtc_disable_planes(struct intel_atomic_state *state,
6650				      struct intel_crtc *crtc)
6651{
6652	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6653	const struct intel_crtc_state *new_crtc_state =
6654		intel_atomic_get_new_crtc_state(state, crtc);
6655	unsigned int update_mask = new_crtc_state->update_planes;
6656	const struct intel_plane_state *old_plane_state;
6657	struct intel_plane *plane;
6658	unsigned fb_bits = 0;
6659	int i;
6660
6661	intel_crtc_dpms_overlay_disable(crtc);
6662
6663	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
6664		if (crtc->pipe != plane->pipe ||
6665		    !(update_mask & BIT(plane->id)))
6666			continue;
6667
6668		intel_disable_plane(plane, new_crtc_state);
6669
6670		if (old_plane_state->uapi.visible)
6671			fb_bits |= plane->frontbuffer_bit;
6672	}
6673
6674	intel_frontbuffer_flip(dev_priv, fb_bits);
6675}
6676
6677/*
6678 * intel_connector_primary_encoder - get the primary encoder for a connector
6679 * @connector: connector for which to return the encoder
6680 *
6681 * Returns the primary encoder for a connector. There is a 1:1 mapping from
6682 * all connectors to their encoder, except for DP-MST connectors which have
6683 * both a virtual and a primary encoder. These DP-MST primary encoders can be
6684 * pointed to by as many DP-MST connectors as there are pipes.
6685 */
6686static struct intel_encoder *
6687intel_connector_primary_encoder(struct intel_connector *connector)
6688{
6689	struct intel_encoder *encoder;
6690
6691	if (connector->mst_port)
6692		return &dp_to_dig_port(connector->mst_port)->base;
6693
6694	encoder = intel_attached_encoder(connector);
6695	drm_WARN_ON(connector->base.dev, !encoder);
6696
6697	return encoder;
6698}
6699
6700static void intel_encoders_update_prepare(struct intel_atomic_state *state)
6701{
6702	struct drm_connector_state *new_conn_state;
6703	struct drm_connector *connector;
6704	int i;
6705
6706	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
6707					i) {
6708		struct intel_connector *intel_connector;
6709		struct intel_encoder *encoder;
6710		struct intel_crtc *crtc;
6711
6712		if (!intel_connector_needs_modeset(state, connector))
6713			continue;
6714
6715		intel_connector = to_intel_connector(connector);
6716		encoder = intel_connector_primary_encoder(intel_connector);
6717		if (!encoder->update_prepare)
6718			continue;
6719
6720		crtc = new_conn_state->crtc ?
6721			to_intel_crtc(new_conn_state->crtc) : NULL;
6722		encoder->update_prepare(state, encoder, crtc);
6723	}
6724}
6725
6726static void intel_encoders_update_complete(struct intel_atomic_state *state)
6727{
6728	struct drm_connector_state *new_conn_state;
6729	struct drm_connector *connector;
6730	int i;
6731
6732	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
6733					i) {
6734		struct intel_connector *intel_connector;
6735		struct intel_encoder *encoder;
6736		struct intel_crtc *crtc;
6737
6738		if (!intel_connector_needs_modeset(state, connector))
6739			continue;
6740
6741		intel_connector = to_intel_connector(connector);
6742		encoder = intel_connector_primary_encoder(intel_connector);
6743		if (!encoder->update_complete)
6744			continue;
6745
6746		crtc = new_conn_state->crtc ?
6747			to_intel_crtc(new_conn_state->crtc) : NULL;
6748		encoder->update_complete(state, encoder, crtc);
6749	}
6750}
6751
6752static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
6753					  struct intel_crtc *crtc)
6754{
6755	const struct intel_crtc_state *crtc_state =
6756		intel_atomic_get_new_crtc_state(state, crtc);
6757	const struct drm_connector_state *conn_state;
6758	struct drm_connector *conn;
6759	int i;
6760
6761	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6762		struct intel_encoder *encoder =
6763			to_intel_encoder(conn_state->best_encoder);
6764
6765		if (conn_state->crtc != &crtc->base)
6766			continue;
6767
6768		if (encoder->pre_pll_enable)
6769			encoder->pre_pll_enable(state, encoder,
6770						crtc_state, conn_state);
6771	}
6772}
6773
6774static void intel_encoders_pre_enable(struct intel_atomic_state *state,
6775				      struct intel_crtc *crtc)
6776{
6777	const struct intel_crtc_state *crtc_state =
6778		intel_atomic_get_new_crtc_state(state, crtc);
6779	const struct drm_connector_state *conn_state;
6780	struct drm_connector *conn;
6781	int i;
6782
6783	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6784		struct intel_encoder *encoder =
6785			to_intel_encoder(conn_state->best_encoder);
6786
6787		if (conn_state->crtc != &crtc->base)
6788			continue;
6789
6790		if (encoder->pre_enable)
6791			encoder->pre_enable(state, encoder,
6792					    crtc_state, conn_state);
6793	}
6794}
6795
6796static void intel_encoders_enable(struct intel_atomic_state *state,
6797				  struct intel_crtc *crtc)
6798{
6799	const struct intel_crtc_state *crtc_state =
6800		intel_atomic_get_new_crtc_state(state, crtc);
6801	const struct drm_connector_state *conn_state;
6802	struct drm_connector *conn;
6803	int i;
6804
6805	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6806		struct intel_encoder *encoder =
6807			to_intel_encoder(conn_state->best_encoder);
6808
6809		if (conn_state->crtc != &crtc->base)
6810			continue;
6811
6812		if (encoder->enable)
6813			encoder->enable(state, encoder,
6814					crtc_state, conn_state);
6815		intel_opregion_notify_encoder(encoder, true);
6816	}
6817}
6818
6819static void intel_encoders_disable(struct intel_atomic_state *state,
6820				   struct intel_crtc *crtc)
6821{
6822	const struct intel_crtc_state *old_crtc_state =
6823		intel_atomic_get_old_crtc_state(state, crtc);
6824	const struct drm_connector_state *old_conn_state;
6825	struct drm_connector *conn;
6826	int i;
6827
6828	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6829		struct intel_encoder *encoder =
6830			to_intel_encoder(old_conn_state->best_encoder);
6831
6832		if (old_conn_state->crtc != &crtc->base)
6833			continue;
6834
6835		intel_opregion_notify_encoder(encoder, false);
6836		if (encoder->disable)
6837			encoder->disable(state, encoder,
6838					 old_crtc_state, old_conn_state);
6839	}
6840}
6841
6842static void intel_encoders_post_disable(struct intel_atomic_state *state,
6843					struct intel_crtc *crtc)
6844{
6845	const struct intel_crtc_state *old_crtc_state =
6846		intel_atomic_get_old_crtc_state(state, crtc);
6847	const struct drm_connector_state *old_conn_state;
6848	struct drm_connector *conn;
6849	int i;
6850
6851	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6852		struct intel_encoder *encoder =
6853			to_intel_encoder(old_conn_state->best_encoder);
6854
6855		if (old_conn_state->crtc != &crtc->base)
6856			continue;
6857
6858		if (encoder->post_disable)
6859			encoder->post_disable(state, encoder,
6860					      old_crtc_state, old_conn_state);
6861	}
6862}
6863
6864static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
6865					    struct intel_crtc *crtc)
6866{
6867	const struct intel_crtc_state *old_crtc_state =
6868		intel_atomic_get_old_crtc_state(state, crtc);
6869	const struct drm_connector_state *old_conn_state;
6870	struct drm_connector *conn;
6871	int i;
6872
6873	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6874		struct intel_encoder *encoder =
6875			to_intel_encoder(old_conn_state->best_encoder);
6876
6877		if (old_conn_state->crtc != &crtc->base)
6878			continue;
6879
6880		if (encoder->post_pll_disable)
6881			encoder->post_pll_disable(state, encoder,
6882						  old_crtc_state, old_conn_state);
6883	}
6884}
6885
6886static void intel_encoders_update_pipe(struct intel_atomic_state *state,
6887				       struct intel_crtc *crtc)
6888{
6889	const struct intel_crtc_state *crtc_state =
6890		intel_atomic_get_new_crtc_state(state, crtc);
6891	const struct drm_connector_state *conn_state;
6892	struct drm_connector *conn;
6893	int i;
6894
6895	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6896		struct intel_encoder *encoder =
6897			to_intel_encoder(conn_state->best_encoder);
6898
6899		if (conn_state->crtc != &crtc->base)
6900			continue;
6901
6902		if (encoder->update_pipe)
6903			encoder->update_pipe(state, encoder,
6904					     crtc_state, conn_state);
6905	}
6906}
6907
6908static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
6909{
6910	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6911	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6912
6913	plane->disable_plane(plane, crtc_state);
6914}
6915
6916static void ilk_crtc_enable(struct intel_atomic_state *state,
6917			    struct intel_crtc *crtc)
6918{
6919	const struct intel_crtc_state *new_crtc_state =
6920		intel_atomic_get_new_crtc_state(state, crtc);
6921	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6922	enum pipe pipe = crtc->pipe;
6923
6924	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
6925		return;
6926
6927	/*
6928	 * Sometimes spurious CPU pipe underruns happen during FDI
6929	 * training, at least with VGA+HDMI cloning. Suppress them.
6930	 *
6931	 * On ILK we get an occasional spurious CPU pipe underruns
6932	 * between eDP port A enable and vdd enable. Also PCH port
6933	 * enable seems to result in the occasional CPU pipe underrun.
6934	 *
6935	 * Spurious PCH underruns also occur during PCH enabling.
6936	 */
6937	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6938	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6939
6940	if (new_crtc_state->has_pch_encoder)
6941		intel_prepare_shared_dpll(new_crtc_state);
6942
6943	if (intel_crtc_has_dp_encoder(new_crtc_state))
6944		intel_dp_set_m_n(new_crtc_state, M1_N1);
6945
6946	intel_set_pipe_timings(new_crtc_state);
6947	intel_set_pipe_src_size(new_crtc_state);
6948
6949	if (new_crtc_state->has_pch_encoder)
6950		intel_cpu_transcoder_set_m_n(new_crtc_state,
6951					     &new_crtc_state->fdi_m_n, NULL);
6952
6953	ilk_set_pipeconf(new_crtc_state);
6954
6955	crtc->active = true;
6956
6957	intel_encoders_pre_enable(state, crtc);
6958
6959	if (new_crtc_state->has_pch_encoder) {
6960		/* Note: FDI PLL enabling _must_ be done before we enable the
6961		 * cpu pipes, hence this is separate from all the other fdi/pch
6962		 * enabling. */
6963		ilk_fdi_pll_enable(new_crtc_state);
6964	} else {
6965		assert_fdi_tx_disabled(dev_priv, pipe);
6966		assert_fdi_rx_disabled(dev_priv, pipe);
6967	}
6968
6969	ilk_pfit_enable(new_crtc_state);
6970
6971	/*
6972	 * On ILK+ LUT must be loaded before the pipe is running but with
6973	 * clocks enabled
6974	 */
6975	intel_color_load_luts(new_crtc_state);
6976	intel_color_commit(new_crtc_state);
6977	/* update DSPCNTR to configure gamma for pipe bottom color */
6978	intel_disable_primary_plane(new_crtc_state);
6979
6980	if (dev_priv->display.initial_watermarks)
6981		dev_priv->display.initial_watermarks(state, crtc);
6982	intel_enable_pipe(new_crtc_state);
6983
6984	if (new_crtc_state->has_pch_encoder)
6985		ilk_pch_enable(state, new_crtc_state);
6986
6987	intel_crtc_vblank_on(new_crtc_state);
6988
6989	intel_encoders_enable(state, crtc);
6990
6991	if (HAS_PCH_CPT(dev_priv))
6992		cpt_verify_modeset(dev_priv, pipe);
6993
6994	/*
6995	 * Must wait for vblank to avoid spurious PCH FIFO underruns.
6996	 * And a second vblank wait is needed at least on ILK with
6997	 * some interlaced HDMI modes. Let's do the double wait always
6998	 * in case there are more corner cases we don't know about.
6999	 */
7000	if (new_crtc_state->has_pch_encoder) {
7001		intel_wait_for_vblank(dev_priv, pipe);
7002		intel_wait_for_vblank(dev_priv, pipe);
7003	}
7004	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7005	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
7006}
7007
7008/* IPS only exists on ULT machines and is tied to pipe A. */
7009static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
7010{
7011	return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
7012}
7013
7014static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
7015					    enum pipe pipe, bool apply)
7016{
7017	u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
7018	u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
7019
7020	if (apply)
7021		val |= mask;
7022	else
7023		val &= ~mask;
7024
7025	intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
7026}
7027
7028static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
7029{
7030	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7031	enum pipe pipe = crtc->pipe;
7032	u32 val;
7033
7034	val = MBUS_DBOX_A_CREDIT(2);
7035
7036	if (INTEL_GEN(dev_priv) >= 12) {
7037		val |= MBUS_DBOX_BW_CREDIT(2);
7038		val |= MBUS_DBOX_B_CREDIT(12);
7039	} else {
7040		val |= MBUS_DBOX_BW_CREDIT(1);
7041		val |= MBUS_DBOX_B_CREDIT(8);
7042	}
7043
7044	intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
7045}
7046
7047static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
7048{
7049	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7050	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7051
7052	intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
7053		       HSW_LINETIME(crtc_state->linetime) |
7054		       HSW_IPS_LINETIME(crtc_state->ips_linetime));
7055}
7056
7057static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
7058{
7059	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7060	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7061	i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
7062	u32 val;
7063
7064	val = intel_de_read(dev_priv, reg);
7065	val &= ~HSW_FRAME_START_DELAY_MASK;
7066	val |= HSW_FRAME_START_DELAY(0);
7067	intel_de_write(dev_priv, reg, val);
7068}
7069
7070static void hsw_crtc_enable(struct intel_atomic_state *state,
7071			    struct intel_crtc *crtc)
7072{
7073	const struct intel_crtc_state *new_crtc_state =
7074		intel_atomic_get_new_crtc_state(state, crtc);
7075	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7076	enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
7077	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
7078	bool psl_clkgate_wa;
7079
7080	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
7081		return;
7082
7083	intel_encoders_pre_pll_enable(state, crtc);
7084
7085	if (new_crtc_state->shared_dpll)
7086		intel_enable_shared_dpll(new_crtc_state);
7087
7088	intel_encoders_pre_enable(state, crtc);
7089
7090	if (!transcoder_is_dsi(cpu_transcoder))
7091		intel_set_pipe_timings(new_crtc_state);
7092
7093	intel_set_pipe_src_size(new_crtc_state);
7094
7095	if (cpu_transcoder != TRANSCODER_EDP &&
7096	    !transcoder_is_dsi(cpu_transcoder))
7097		intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
7098			       new_crtc_state->pixel_multiplier - 1);
7099
7100	if (new_crtc_state->has_pch_encoder)
7101		intel_cpu_transcoder_set_m_n(new_crtc_state,
7102					     &new_crtc_state->fdi_m_n, NULL);
7103
7104	if (!transcoder_is_dsi(cpu_transcoder)) {
7105		hsw_set_frame_start_delay(new_crtc_state);
7106		hsw_set_pipeconf(new_crtc_state);
7107	}
7108
7109	if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
7110		bdw_set_pipemisc(new_crtc_state);
7111
7112	crtc->active = true;
7113
7114	/* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
7115	psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
7116		new_crtc_state->pch_pfit.enabled;
7117	if (psl_clkgate_wa)
7118		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
7119
7120	if (INTEL_GEN(dev_priv) >= 9)
7121		skl_pfit_enable(new_crtc_state);
7122	else
7123		ilk_pfit_enable(new_crtc_state);
7124
7125	/*
7126	 * On ILK+ LUT must be loaded before the pipe is running but with
7127	 * clocks enabled
7128	 */
7129	intel_color_load_luts(new_crtc_state);
7130	intel_color_commit(new_crtc_state);
7131	/* update DSPCNTR to configure gamma/csc for pipe bottom color */
7132	if (INTEL_GEN(dev_priv) < 9)
7133		intel_disable_primary_plane(new_crtc_state);
7134
7135	hsw_set_linetime_wm(new_crtc_state);
7136
7137	if (INTEL_GEN(dev_priv) >= 11)
7138		icl_set_pipe_chicken(crtc);
7139
7140	if (dev_priv->display.initial_watermarks)
7141		dev_priv->display.initial_watermarks(state, crtc);
7142
7143	if (INTEL_GEN(dev_priv) >= 11)
7144		icl_pipe_mbus_enable(crtc);
7145
7146	intel_encoders_enable(state, crtc);
7147
7148	if (psl_clkgate_wa) {
7149		intel_wait_for_vblank(dev_priv, pipe);
7150		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
7151	}
7152
7153	/* If we change the relative order between pipe/planes enabling, we need
7154	 * to change the workaround. */
7155	hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
7156	if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
7157		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
7158		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
7159	}
7160}
7161
7162void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
7163{
7164	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
7165	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7166	enum pipe pipe = crtc->pipe;
7167
7168	/* To avoid upsetting the power well on haswell only disable the pfit if
7169	 * it's in use. The hw state code will make sure we get this right. */
7170	if (!old_crtc_state->pch_pfit.enabled)
7171		return;
7172
7173	intel_de_write(dev_priv, PF_CTL(pipe), 0);
7174	intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
7175	intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
7176}
7177
7178static void ilk_crtc_disable(struct intel_atomic_state *state,
7179			     struct intel_crtc *crtc)
7180{
7181	const struct intel_crtc_state *old_crtc_state =
7182		intel_atomic_get_old_crtc_state(state, crtc);
7183	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7184	enum pipe pipe = crtc->pipe;
7185
7186	/*
7187	 * Sometimes spurious CPU pipe underruns happen when the
7188	 * pipe is already disabled, but FDI RX/TX is still enabled.
7189	 * Happens at least with VGA+HDMI cloning. Suppress them.
7190	 */
7191	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7192	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
7193
7194	intel_encoders_disable(state, crtc);
7195
7196	intel_crtc_vblank_off(old_crtc_state);
7197
7198	intel_disable_pipe(old_crtc_state);
7199
7200	ilk_pfit_disable(old_crtc_state);
7201
7202	if (old_crtc_state->has_pch_encoder)
7203		ilk_fdi_disable(crtc);
7204
7205	intel_encoders_post_disable(state, crtc);
7206
7207	if (old_crtc_state->has_pch_encoder) {
7208		ilk_disable_pch_transcoder(dev_priv, pipe);
7209
7210		if (HAS_PCH_CPT(dev_priv)) {
7211			i915_reg_t reg;
7212			u32 temp;
7213
7214			/* disable TRANS_DP_CTL */
7215			reg = TRANS_DP_CTL(pipe);
7216			temp = intel_de_read(dev_priv, reg);
7217			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
7218				  TRANS_DP_PORT_SEL_MASK);
7219			temp |= TRANS_DP_PORT_SEL_NONE;
7220			intel_de_write(dev_priv, reg, temp);
7221
7222			/* disable DPLL_SEL */
7223			temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
7224			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
7225			intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
7226		}
7227
7228		ilk_fdi_pll_disable(crtc);
7229	}
7230
7231	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7232	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
7233}
7234
7235static void hsw_crtc_disable(struct intel_atomic_state *state,
7236			     struct intel_crtc *crtc)
7237{
7238	/*
7239	 * FIXME collapse everything to one hook.
7240	 * Need care with mst->ddi interactions.
7241	 */
7242	intel_encoders_disable(state, crtc);
7243	intel_encoders_post_disable(state, crtc);
7244}
7245
7246static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
7247{
7248	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7249	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7250
7251	if (!crtc_state->gmch_pfit.control)
7252		return;
7253
7254	/*
7255	 * The panel fitter should only be adjusted whilst the pipe is disabled,
7256	 * according to register description and PRM.
7257	 */
7258	drm_WARN_ON(&dev_priv->drm,
7259		    intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
7260	assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
7261
7262	intel_de_write(dev_priv, PFIT_PGM_RATIOS,
7263		       crtc_state->gmch_pfit.pgm_ratios);
7264	intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
7265
7266	/* Border color in case we don't scale up to the full screen. Black by
7267	 * default, change to something else for debugging. */
7268	intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
7269}
7270
7271bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
7272{
7273	if (phy == PHY_NONE)
7274		return false;
7275	else if (IS_ROCKETLAKE(dev_priv))
7276		return phy <= PHY_D;
7277	else if (IS_ELKHARTLAKE(dev_priv))
7278		return phy <= PHY_C;
7279	else if (INTEL_GEN(dev_priv) >= 11)
7280		return phy <= PHY_B;
7281	else
7282		return false;
7283}
7284
7285bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
7286{
7287	if (IS_ROCKETLAKE(dev_priv))
7288		return false;
7289	else if (INTEL_GEN(dev_priv) >= 12)
7290		return phy >= PHY_D && phy <= PHY_I;
7291	else if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
7292		return phy >= PHY_C && phy <= PHY_F;
7293	else
7294		return false;
7295}
7296
7297enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
7298{
7299	if (IS_ROCKETLAKE(i915) && port >= PORT_D)
7300		return (enum phy)port - 1;
7301	else if (IS_ELKHARTLAKE(i915) && port == PORT_D)
7302		return PHY_A;
7303
7304	return (enum phy)port;
7305}
7306
7307enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
7308{
7309	if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
7310		return PORT_TC_NONE;
7311
7312	if (INTEL_GEN(dev_priv) >= 12)
7313		return port - PORT_D;
7314
7315	return port - PORT_C;
7316}
7317
7318enum intel_display_power_domain intel_port_to_power_domain(enum port port)
7319{
7320	switch (port) {
7321	case PORT_A:
7322		return POWER_DOMAIN_PORT_DDI_A_LANES;
7323	case PORT_B:
7324		return POWER_DOMAIN_PORT_DDI_B_LANES;
7325	case PORT_C:
7326		return POWER_DOMAIN_PORT_DDI_C_LANES;
7327	case PORT_D:
7328		return POWER_DOMAIN_PORT_DDI_D_LANES;
7329	case PORT_E:
7330		return POWER_DOMAIN_PORT_DDI_E_LANES;
7331	case PORT_F:
7332		return POWER_DOMAIN_PORT_DDI_F_LANES;
7333	case PORT_G:
7334		return POWER_DOMAIN_PORT_DDI_G_LANES;
7335	case PORT_H:
7336		return POWER_DOMAIN_PORT_DDI_H_LANES;
7337	case PORT_I:
7338		return POWER_DOMAIN_PORT_DDI_I_LANES;
7339	default:
7340		MISSING_CASE(port);
7341		return POWER_DOMAIN_PORT_OTHER;
7342	}
7343}
7344
7345enum intel_display_power_domain
7346intel_aux_power_domain(struct intel_digital_port *dig_port)
7347{
7348	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
7349	enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
7350
7351	if (intel_phy_is_tc(dev_priv, phy) &&
7352	    dig_port->tc_mode == TC_PORT_TBT_ALT) {
7353		switch (dig_port->aux_ch) {
7354		case AUX_CH_C:
7355			return POWER_DOMAIN_AUX_C_TBT;
7356		case AUX_CH_D:
7357			return POWER_DOMAIN_AUX_D_TBT;
7358		case AUX_CH_E:
7359			return POWER_DOMAIN_AUX_E_TBT;
7360		case AUX_CH_F:
7361			return POWER_DOMAIN_AUX_F_TBT;
7362		case AUX_CH_G:
7363			return POWER_DOMAIN_AUX_G_TBT;
7364		case AUX_CH_H:
7365			return POWER_DOMAIN_AUX_H_TBT;
7366		case AUX_CH_I:
7367			return POWER_DOMAIN_AUX_I_TBT;
7368		default:
7369			MISSING_CASE(dig_port->aux_ch);
7370			return POWER_DOMAIN_AUX_C_TBT;
7371		}
7372	}
7373
7374	return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
7375}
7376
7377/*
7378 * Converts aux_ch to power_domain without caring about TBT ports for that use
7379 * intel_aux_power_domain()
7380 */
7381enum intel_display_power_domain
7382intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
7383{
7384	switch (aux_ch) {
7385	case AUX_CH_A:
7386		return POWER_DOMAIN_AUX_A;
7387	case AUX_CH_B:
7388		return POWER_DOMAIN_AUX_B;
7389	case AUX_CH_C:
7390		return POWER_DOMAIN_AUX_C;
7391	case AUX_CH_D:
7392		return POWER_DOMAIN_AUX_D;
7393	case AUX_CH_E:
7394		return POWER_DOMAIN_AUX_E;
7395	case AUX_CH_F:
7396		return POWER_DOMAIN_AUX_F;
7397	case AUX_CH_G:
7398		return POWER_DOMAIN_AUX_G;
7399	case AUX_CH_H:
7400		return POWER_DOMAIN_AUX_H;
7401	case AUX_CH_I:
7402		return POWER_DOMAIN_AUX_I;
7403	default:
7404		MISSING_CASE(aux_ch);
7405		return POWER_DOMAIN_AUX_A;
7406	}
7407}
7408
7409static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
7410{
7411	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7412	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7413	struct drm_encoder *encoder;
7414	enum pipe pipe = crtc->pipe;
7415	u64 mask;
7416	enum transcoder transcoder = crtc_state->cpu_transcoder;
7417
7418	if (!crtc_state->hw.active)
7419		return 0;
7420
7421	mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
7422	mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
7423	if (crtc_state->pch_pfit.enabled ||
7424	    crtc_state->pch_pfit.force_thru)
7425		mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
7426
7427	drm_for_each_encoder_mask(encoder, &dev_priv->drm,
7428				  crtc_state->uapi.encoder_mask) {
7429		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7430
7431		mask |= BIT_ULL(intel_encoder->power_domain);
7432	}
7433
7434	if (HAS_DDI(dev_priv) && crtc_state->has_audio)
7435		mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
7436
7437	if (crtc_state->shared_dpll)
7438		mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
7439
7440	return mask;
7441}
7442
7443static u64
7444modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
7445{
7446	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7447	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7448	enum intel_display_power_domain domain;
7449	u64 domains, new_domains, old_domains;
7450
7451	old_domains = crtc->enabled_power_domains;
7452	crtc->enabled_power_domains = new_domains =
7453		get_crtc_power_domains(crtc_state);
7454
7455	domains = new_domains & ~old_domains;
7456
7457	for_each_power_domain(domain, domains)
7458		intel_display_power_get(dev_priv, domain);
7459
7460	return old_domains & ~new_domains;
7461}
7462
7463static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
7464				      u64 domains)
7465{
7466	enum intel_display_power_domain domain;
7467
7468	for_each_power_domain(domain, domains)
7469		intel_display_power_put_unchecked(dev_priv, domain);
7470}
7471
7472static void valleyview_crtc_enable(struct intel_atomic_state *state,
7473				   struct intel_crtc *crtc)
7474{
7475	const struct intel_crtc_state *new_crtc_state =
7476		intel_atomic_get_new_crtc_state(state, crtc);
7477	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7478	enum pipe pipe = crtc->pipe;
7479
7480	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
7481		return;
7482
7483	if (intel_crtc_has_dp_encoder(new_crtc_state))
7484		intel_dp_set_m_n(new_crtc_state, M1_N1);
7485
7486	intel_set_pipe_timings(new_crtc_state);
7487	intel_set_pipe_src_size(new_crtc_state);
7488
7489	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
7490		intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
7491		intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
7492	}
7493
7494	i9xx_set_pipeconf(new_crtc_state);
7495
7496	crtc->active = true;
7497
7498	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7499
7500	intel_encoders_pre_pll_enable(state, crtc);
7501
7502	if (IS_CHERRYVIEW(dev_priv)) {
7503		chv_prepare_pll(crtc, new_crtc_state);
7504		chv_enable_pll(crtc, new_crtc_state);
7505	} else {
7506		vlv_prepare_pll(crtc, new_crtc_state);
7507		vlv_enable_pll(crtc, new_crtc_state);
7508	}
7509
7510	intel_encoders_pre_enable(state, crtc);
7511
7512	i9xx_pfit_enable(new_crtc_state);
7513
7514	intel_color_load_luts(new_crtc_state);
7515	intel_color_commit(new_crtc_state);
7516	/* update DSPCNTR to configure gamma for pipe bottom color */
7517	intel_disable_primary_plane(new_crtc_state);
7518
7519	dev_priv->display.initial_watermarks(state, crtc);
7520	intel_enable_pipe(new_crtc_state);
7521
7522	intel_crtc_vblank_on(new_crtc_state);
7523
7524	intel_encoders_enable(state, crtc);
7525}
7526
7527static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
7528{
7529	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7530	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7531
7532	intel_de_write(dev_priv, FP0(crtc->pipe),
7533		       crtc_state->dpll_hw_state.fp0);
7534	intel_de_write(dev_priv, FP1(crtc->pipe),
7535		       crtc_state->dpll_hw_state.fp1);
7536}
7537
7538static void i9xx_crtc_enable(struct intel_atomic_state *state,
7539			     struct intel_crtc *crtc)
7540{
7541	const struct intel_crtc_state *new_crtc_state =
7542		intel_atomic_get_new_crtc_state(state, crtc);
7543	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7544	enum pipe pipe = crtc->pipe;
7545
7546	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
7547		return;
7548
7549	i9xx_set_pll_dividers(new_crtc_state);
7550
7551	if (intel_crtc_has_dp_encoder(new_crtc_state))
7552		intel_dp_set_m_n(new_crtc_state, M1_N1);
7553
7554	intel_set_pipe_timings(new_crtc_state);
7555	intel_set_pipe_src_size(new_crtc_state);
7556
7557	i9xx_set_pipeconf(new_crtc_state);
7558
7559	crtc->active = true;
7560
7561	if (!IS_GEN(dev_priv, 2))
7562		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7563
7564	intel_encoders_pre_enable(state, crtc);
7565
7566	i9xx_enable_pll(crtc, new_crtc_state);
7567
7568	i9xx_pfit_enable(new_crtc_state);
7569
7570	intel_color_load_luts(new_crtc_state);
7571	intel_color_commit(new_crtc_state);
7572	/* update DSPCNTR to configure gamma for pipe bottom color */
7573	intel_disable_primary_plane(new_crtc_state);
7574
7575	if (dev_priv->display.initial_watermarks)
7576		dev_priv->display.initial_watermarks(state, crtc);
7577	else
7578		intel_update_watermarks(crtc);
7579	intel_enable_pipe(new_crtc_state);
7580
7581	intel_crtc_vblank_on(new_crtc_state);
7582
7583	intel_encoders_enable(state, crtc);
7584
7585	/* prevents spurious underruns */
7586	if (IS_GEN(dev_priv, 2))
7587		intel_wait_for_vblank(dev_priv, pipe);
7588}
7589
7590static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
7591{
7592	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
7593	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7594
7595	if (!old_crtc_state->gmch_pfit.control)
7596		return;
7597
7598	assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
7599
7600	drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
7601		    intel_de_read(dev_priv, PFIT_CONTROL));
7602	intel_de_write(dev_priv, PFIT_CONTROL, 0);
7603}
7604
7605static void i9xx_crtc_disable(struct intel_atomic_state *state,
7606			      struct intel_crtc *crtc)
7607{
7608	struct intel_crtc_state *old_crtc_state =
7609		intel_atomic_get_old_crtc_state(state, crtc);
7610	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7611	enum pipe pipe = crtc->pipe;
7612
7613	/*
7614	 * On gen2 planes are double buffered but the pipe isn't, so we must
7615	 * wait for planes to fully turn off before disabling the pipe.
7616	 */
7617	if (IS_GEN(dev_priv, 2))
7618		intel_wait_for_vblank(dev_priv, pipe);
7619
7620	intel_encoders_disable(state, crtc);
7621
7622	intel_crtc_vblank_off(old_crtc_state);
7623
7624	intel_disable_pipe(old_crtc_state);
7625
7626	i9xx_pfit_disable(old_crtc_state);
7627
7628	intel_encoders_post_disable(state, crtc);
7629
7630	if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
7631		if (IS_CHERRYVIEW(dev_priv))
7632			chv_disable_pll(dev_priv, pipe);
7633		else if (IS_VALLEYVIEW(dev_priv))
7634			vlv_disable_pll(dev_priv, pipe);
7635		else
7636			i9xx_disable_pll(old_crtc_state);
7637	}
7638
7639	intel_encoders_post_pll_disable(state, crtc);
7640
7641	if (!IS_GEN(dev_priv, 2))
7642		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7643
7644	if (!dev_priv->display.initial_watermarks)
7645		intel_update_watermarks(crtc);
7646
7647	/* clock the pipe down to 640x480@60 to potentially save power */
7648	if (IS_I830(dev_priv))
7649		i830_enable_pipe(dev_priv, pipe);
7650}
7651
7652static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
7653					struct drm_modeset_acquire_ctx *ctx)
7654{
7655	struct intel_encoder *encoder;
7656	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7657	struct intel_bw_state *bw_state =
7658		to_intel_bw_state(dev_priv->bw_obj.state);
7659	struct intel_cdclk_state *cdclk_state =
7660		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
7661	struct intel_dbuf_state *dbuf_state =
7662		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
7663	struct intel_crtc_state *crtc_state =
7664		to_intel_crtc_state(crtc->base.state);
7665	enum intel_display_power_domain domain;
7666	struct intel_plane *plane;
7667	struct drm_atomic_state *state;
7668	struct intel_crtc_state *temp_crtc_state;
7669	enum pipe pipe = crtc->pipe;
7670	u64 domains;
7671	int ret;
7672
7673	if (!crtc_state->hw.active)
7674		return;
7675
7676	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
7677		const struct intel_plane_state *plane_state =
7678			to_intel_plane_state(plane->base.state);
7679
7680		if (plane_state->uapi.visible)
7681			intel_plane_disable_noatomic(crtc, plane);
7682	}
7683
7684	state = drm_atomic_state_alloc(&dev_priv->drm);
7685	if (!state) {
7686		drm_dbg_kms(&dev_priv->drm,
7687			    "failed to disable [CRTC:%d:%s], out of memory",
7688			    crtc->base.base.id, crtc->base.name);
7689		return;
7690	}
7691
7692	state->acquire_ctx = ctx;
7693
7694	/* Everything's already locked, -EDEADLK can't happen. */
7695	temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
7696	ret = drm_atomic_add_affected_connectors(state, &crtc->base);
7697
7698	drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
7699
7700	dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
7701
7702	drm_atomic_state_put(state);
7703
7704	drm_dbg_kms(&dev_priv->drm,
7705		    "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
7706		    crtc->base.base.id, crtc->base.name);
7707
7708	crtc->active = false;
7709	crtc->base.enabled = false;
7710
7711	drm_WARN_ON(&dev_priv->drm,
7712		    drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
7713	crtc_state->uapi.active = false;
7714	crtc_state->uapi.connector_mask = 0;
7715	crtc_state->uapi.encoder_mask = 0;
7716	intel_crtc_free_hw_state(crtc_state);
7717	memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
7718
7719	for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
7720		encoder->base.crtc = NULL;
7721
7722	intel_fbc_disable(crtc);
7723	intel_update_watermarks(crtc);
7724	intel_disable_shared_dpll(crtc_state);
7725
7726	domains = crtc->enabled_power_domains;
7727	for_each_power_domain(domain, domains)
7728		intel_display_power_put_unchecked(dev_priv, domain);
7729	crtc->enabled_power_domains = 0;
7730
7731	dev_priv->active_pipes &= ~BIT(pipe);
7732	cdclk_state->min_cdclk[pipe] = 0;
7733	cdclk_state->min_voltage_level[pipe] = 0;
7734	cdclk_state->active_pipes &= ~BIT(pipe);
7735
7736	dbuf_state->active_pipes &= ~BIT(pipe);
7737
7738	bw_state->data_rate[pipe] = 0;
7739	bw_state->num_active_planes[pipe] = 0;
7740}
7741
7742/*
7743 * turn all crtc's off, but do not adjust state
7744 * This has to be paired with a call to intel_modeset_setup_hw_state.
7745 */
7746int intel_display_suspend(struct drm_device *dev)
7747{
7748	struct drm_i915_private *dev_priv = to_i915(dev);
7749	struct drm_atomic_state *state;
7750	int ret;
7751
7752	state = drm_atomic_helper_suspend(dev);
7753	ret = PTR_ERR_OR_ZERO(state);
7754	if (ret)
7755		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
7756			ret);
7757	else
7758		dev_priv->modeset_restore_state = state;
7759	return ret;
7760}
7761
7762void intel_encoder_destroy(struct drm_encoder *encoder)
7763{
7764	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7765
7766	drm_encoder_cleanup(encoder);
7767	kfree(intel_encoder);
7768}
7769
7770/* Cross check the actual hw state with our own modeset state tracking (and it's
7771 * internal consistency). */
7772static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
7773					 struct drm_connector_state *conn_state)
7774{
7775	struct intel_connector *connector = to_intel_connector(conn_state->connector);
7776	struct drm_i915_private *i915 = to_i915(connector->base.dev);
7777
7778	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
7779		    connector->base.base.id, connector->base.name);
7780
7781	if (connector->get_hw_state(connector)) {
7782		struct intel_encoder *encoder = intel_attached_encoder(connector);
7783
7784		I915_STATE_WARN(!crtc_state,
7785			 "connector enabled without attached crtc\n");
7786
7787		if (!crtc_state)
7788			return;
7789
7790		I915_STATE_WARN(!crtc_state->hw.active,
7791				"connector is active, but attached crtc isn't\n");
7792
7793		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
7794			return;
7795
7796		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
7797			"atomic encoder doesn't match attached encoder\n");
7798
7799		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
7800			"attached encoder crtc differs from connector crtc\n");
7801	} else {
7802		I915_STATE_WARN(crtc_state && crtc_state->hw.active,
7803				"attached crtc is active, but connector isn't\n");
7804		I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
7805			"best encoder set without crtc!\n");
7806	}
7807}
7808
7809static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
7810{
7811	if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
7812		return crtc_state->fdi_lanes;
7813
7814	return 0;
7815}
7816
7817static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
7818			       struct intel_crtc_state *pipe_config)
7819{
7820	struct drm_i915_private *dev_priv = to_i915(dev);
7821	struct drm_atomic_state *state = pipe_config->uapi.state;
7822	struct intel_crtc *other_crtc;
7823	struct intel_crtc_state *other_crtc_state;
7824
7825	drm_dbg_kms(&dev_priv->drm,
7826		    "checking fdi config on pipe %c, lanes %i\n",
7827		    pipe_name(pipe), pipe_config->fdi_lanes);
7828	if (pipe_config->fdi_lanes > 4) {
7829		drm_dbg_kms(&dev_priv->drm,
7830			    "invalid fdi lane config on pipe %c: %i lanes\n",
7831			    pipe_name(pipe), pipe_config->fdi_lanes);
7832		return -EINVAL;
7833	}
7834
7835	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7836		if (pipe_config->fdi_lanes > 2) {
7837			drm_dbg_kms(&dev_priv->drm,
7838				    "only 2 lanes on haswell, required: %i lanes\n",
7839				    pipe_config->fdi_lanes);
7840			return -EINVAL;
7841		} else {
7842			return 0;
7843		}
7844	}
7845
7846	if (INTEL_NUM_PIPES(dev_priv) == 2)
7847		return 0;
7848
7849	/* Ivybridge 3 pipe is really complicated */
7850	switch (pipe) {
7851	case PIPE_A:
7852		return 0;
7853	case PIPE_B:
7854		if (pipe_config->fdi_lanes <= 2)
7855			return 0;
7856
7857		other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
7858		other_crtc_state =
7859			intel_atomic_get_crtc_state(state, other_crtc);
7860		if (IS_ERR(other_crtc_state))
7861			return PTR_ERR(other_crtc_state);
7862
7863		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
7864			drm_dbg_kms(&dev_priv->drm,
7865				    "invalid shared fdi lane config on pipe %c: %i lanes\n",
7866				    pipe_name(pipe), pipe_config->fdi_lanes);
7867			return -EINVAL;
7868		}
7869		return 0;
7870	case PIPE_C:
7871		if (pipe_config->fdi_lanes > 2) {
7872			drm_dbg_kms(&dev_priv->drm,
7873				    "only 2 lanes on pipe %c: required %i lanes\n",
7874				    pipe_name(pipe), pipe_config->fdi_lanes);
7875			return -EINVAL;
7876		}
7877
7878		other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
7879		other_crtc_state =
7880			intel_atomic_get_crtc_state(state, other_crtc);
7881		if (IS_ERR(other_crtc_state))
7882			return PTR_ERR(other_crtc_state);
7883
7884		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
7885			drm_dbg_kms(&dev_priv->drm,
7886				    "fdi link B uses too many lanes to enable link C\n");
7887			return -EINVAL;
7888		}
7889		return 0;
7890	default:
7891		BUG();
7892	}
7893}
7894
7895#define RETRY 1
7896static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
7897				  struct intel_crtc_state *pipe_config)
7898{
7899	struct drm_device *dev = intel_crtc->base.dev;
7900	struct drm_i915_private *i915 = to_i915(dev);
7901	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7902	int lane, link_bw, fdi_dotclock, ret;
7903	bool needs_recompute = false;
7904
7905retry:
7906	/* FDI is a binary signal running at ~2.7GHz, encoding
7907	 * each output octet as 10 bits. The actual frequency
7908	 * is stored as a divider into a 100MHz clock, and the
7909	 * mode pixel clock is stored in units of 1KHz.
7910	 * Hence the bw of each lane in terms of the mode signal
7911	 * is:
7912	 */
7913	link_bw = intel_fdi_link_freq(i915, pipe_config);
7914
7915	fdi_dotclock = adjusted_mode->crtc_clock;
7916
7917	lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
7918				      pipe_config->pipe_bpp);
7919
7920	pipe_config->fdi_lanes = lane;
7921
7922	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
7923			       link_bw, &pipe_config->fdi_m_n, false, false);
7924
7925	ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
7926	if (ret == -EDEADLK)
7927		return ret;
7928
7929	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
7930		pipe_config->pipe_bpp -= 2*3;
7931		drm_dbg_kms(&i915->drm,
7932			    "fdi link bw constraint, reducing pipe bpp to %i\n",
7933			    pipe_config->pipe_bpp);
7934		needs_recompute = true;
7935		pipe_config->bw_constrained = true;
7936
7937		goto retry;
7938	}
7939
7940	if (needs_recompute)
7941		return RETRY;
7942
7943	return ret;
7944}
7945
7946bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
7947{
7948	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7949	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7950
7951	/* IPS only exists on ULT machines and is tied to pipe A. */
7952	if (!hsw_crtc_supports_ips(crtc))
7953		return false;
7954
7955	if (!dev_priv->params.enable_ips)
7956		return false;
7957
7958	if (crtc_state->pipe_bpp > 24)
7959		return false;
7960
7961	/*
7962	 * We compare against max which means we must take
7963	 * the increased cdclk requirement into account when
7964	 * calculating the new cdclk.
7965	 *
7966	 * Should measure whether using a lower cdclk w/o IPS
7967	 */
7968	if (IS_BROADWELL(dev_priv) &&
7969	    crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
7970		return false;
7971
7972	return true;
7973}
7974
7975static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
7976{
7977	struct drm_i915_private *dev_priv =
7978		to_i915(crtc_state->uapi.crtc->dev);
7979	struct intel_atomic_state *state =
7980		to_intel_atomic_state(crtc_state->uapi.state);
7981
7982	crtc_state->ips_enabled = false;
7983
7984	if (!hsw_crtc_state_ips_capable(crtc_state))
7985		return 0;
7986
7987	/*
7988	 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
7989	 * enabled and disabled dynamically based on package C states,
7990	 * user space can't make reliable use of the CRCs, so let's just
7991	 * completely disable it.
7992	 */
7993	if (crtc_state->crc_enabled)
7994		return 0;
7995
7996	/* IPS should be fine as long as at least one plane is enabled. */
7997	if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
7998		return 0;
7999
8000	if (IS_BROADWELL(dev_priv)) {
8001		const struct intel_cdclk_state *cdclk_state;
8002
8003		cdclk_state = intel_atomic_get_cdclk_state(state);
8004		if (IS_ERR(cdclk_state))
8005			return PTR_ERR(cdclk_state);
8006
8007		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
8008		if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
8009			return 0;
8010	}
8011
8012	crtc_state->ips_enabled = true;
8013
8014	return 0;
8015}
8016
8017static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
8018{
8019	const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8020
8021	/* GDG double wide on either pipe, otherwise pipe A only */
8022	return INTEL_GEN(dev_priv) < 4 &&
8023		(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
8024}
8025
8026static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
8027{
8028	u32 pixel_rate = crtc_state->hw.adjusted_mode.crtc_clock;
8029	unsigned int pipe_w, pipe_h, pfit_w, pfit_h;
8030
8031	/*
8032	 * We only use IF-ID interlacing. If we ever use
8033	 * PF-ID we'll need to adjust the pixel_rate here.
8034	 */
8035
8036	if (!crtc_state->pch_pfit.enabled)
8037		return pixel_rate;
8038
8039	pipe_w = crtc_state->pipe_src_w;
8040	pipe_h = crtc_state->pipe_src_h;
8041
8042	pfit_w = drm_rect_width(&crtc_state->pch_pfit.dst);
8043	pfit_h = drm_rect_height(&crtc_state->pch_pfit.dst);
8044
8045	if (pipe_w < pfit_w)
8046		pipe_w = pfit_w;
8047	if (pipe_h < pfit_h)
8048		pipe_h = pfit_h;
8049
8050	if (drm_WARN_ON(crtc_state->uapi.crtc->dev,
8051			!pfit_w || !pfit_h))
8052		return pixel_rate;
8053
8054	return div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
8055		       pfit_w * pfit_h);
8056}
8057
8058static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
8059{
8060	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
8061
8062	if (HAS_GMCH(dev_priv))
8063		/* FIXME calculate proper pipe pixel rate for GMCH pfit */
8064		crtc_state->pixel_rate =
8065			crtc_state->hw.adjusted_mode.crtc_clock;
8066	else
8067		crtc_state->pixel_rate =
8068			ilk_pipe_pixel_rate(crtc_state);
8069}
8070
8071static int intel_crtc_compute_config(struct intel_crtc *crtc,
8072				     struct intel_crtc_state *pipe_config)
8073{
8074	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8075	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
8076	int clock_limit = dev_priv->max_dotclk_freq;
8077
8078	if (INTEL_GEN(dev_priv) < 4) {
8079		clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
8080
8081		/*
8082		 * Enable double wide mode when the dot clock
8083		 * is > 90% of the (display) core speed.
8084		 */
8085		if (intel_crtc_supports_double_wide(crtc) &&
8086		    adjusted_mode->crtc_clock > clock_limit) {
8087			clock_limit = dev_priv->max_dotclk_freq;
8088			pipe_config->double_wide = true;
8089		}
8090	}
8091
8092	if (adjusted_mode->crtc_clock > clock_limit) {
8093		drm_dbg_kms(&dev_priv->drm,
8094			    "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
8095			    adjusted_mode->crtc_clock, clock_limit,
8096			    yesno(pipe_config->double_wide));
8097		return -EINVAL;
8098	}
8099
8100	if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
8101	     pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
8102	     pipe_config->hw.ctm) {
8103		/*
8104		 * There is only one pipe CSC unit per pipe, and we need that
8105		 * for output conversion from RGB->YCBCR. So if CTM is already
8106		 * applied we can't support YCBCR420 output.
8107		 */
8108		drm_dbg_kms(&dev_priv->drm,
8109			    "YCBCR420 and CTM together are not possible\n");
8110		return -EINVAL;
8111	}
8112
8113	/*
8114	 * Pipe horizontal size must be even in:
8115	 * - DVO ganged mode
8116	 * - LVDS dual channel mode
8117	 * - Double wide pipe
8118	 */
8119	if (pipe_config->pipe_src_w & 1) {
8120		if (pipe_config->double_wide) {
8121			drm_dbg_kms(&dev_priv->drm,
8122				    "Odd pipe source width not supported with double wide pipe\n");
8123			return -EINVAL;
8124		}
8125
8126		if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
8127		    intel_is_dual_link_lvds(dev_priv)) {
8128			drm_dbg_kms(&dev_priv->drm,
8129				    "Odd pipe source width not supported with dual link LVDS\n");
8130			return -EINVAL;
8131		}
8132	}
8133
8134	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
8135	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
8136	 */
8137	if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
8138		adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
8139		return -EINVAL;
8140
8141	intel_crtc_compute_pixel_rate(pipe_config);
8142
8143	if (pipe_config->has_pch_encoder)
8144		return ilk_fdi_compute_config(crtc, pipe_config);
8145
8146	return 0;
8147}
8148
8149static void
8150intel_reduce_m_n_ratio(u32 *num, u32 *den)
8151{
8152	while (*num > DATA_LINK_M_N_MASK ||
8153	       *den > DATA_LINK_M_N_MASK) {
8154		*num >>= 1;
8155		*den >>= 1;
8156	}
8157}
8158
8159static void compute_m_n(unsigned int m, unsigned int n,
8160			u32 *ret_m, u32 *ret_n,
8161			bool constant_n)
8162{
8163	/*
8164	 * Several DP dongles in particular seem to be fussy about
8165	 * too large link M/N values. Give N value as 0x8000 that
8166	 * should be acceptable by specific devices. 0x8000 is the
8167	 * specified fixed N value for asynchronous clock mode,
8168	 * which the devices expect also in synchronous clock mode.
8169	 */
8170	if (constant_n)
8171		*ret_n = DP_LINK_CONSTANT_N_VALUE;
8172	else
8173		*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
8174
8175	*ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
8176	intel_reduce_m_n_ratio(ret_m, ret_n);
8177}
8178
8179void
8180intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
8181		       int pixel_clock, int link_clock,
8182		       struct intel_link_m_n *m_n,
8183		       bool constant_n, bool fec_enable)
8184{
8185	u32 data_clock = bits_per_pixel * pixel_clock;
8186
8187	if (fec_enable)
8188		data_clock = intel_dp_mode_to_fec_clock(data_clock);
8189
8190	m_n->tu = 64;
8191	compute_m_n(data_clock,
8192		    link_clock * nlanes * 8,
8193		    &m_n->gmch_m, &m_n->gmch_n,
8194		    constant_n);
8195
8196	compute_m_n(pixel_clock, link_clock,
8197		    &m_n->link_m, &m_n->link_n,
8198		    constant_n);
8199}
8200
8201static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
8202{
8203	/*
8204	 * There may be no VBT; and if the BIOS enabled SSC we can
8205	 * just keep using it to avoid unnecessary flicker.  Whereas if the
8206	 * BIOS isn't using it, don't assume it will work even if the VBT
8207	 * indicates as much.
8208	 */
8209	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
8210		bool bios_lvds_use_ssc = intel_de_read(dev_priv,
8211						       PCH_DREF_CONTROL) &
8212			DREF_SSC1_ENABLE;
8213
8214		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
8215			drm_dbg_kms(&dev_priv->drm,
8216				    "SSC %s by BIOS, overriding VBT which says %s\n",
8217				    enableddisabled(bios_lvds_use_ssc),
8218				    enableddisabled(dev_priv->vbt.lvds_use_ssc));
8219			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
8220		}
8221	}
8222}
8223
8224static bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
8225{
8226	if (dev_priv->params.panel_use_ssc >= 0)
8227		return dev_priv->params.panel_use_ssc != 0;
8228	return dev_priv->vbt.lvds_use_ssc
8229		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
8230}
8231
8232static u32 pnv_dpll_compute_fp(struct dpll *dpll)
8233{
8234	return (1 << dpll->n) << 16 | dpll->m2;
8235}
8236
8237static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
8238{
8239	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
8240}
8241
8242static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
8243				     struct intel_crtc_state *crtc_state,
8244				     struct dpll *reduced_clock)
8245{
8246	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8247	u32 fp, fp2 = 0;
8248
8249	if (IS_PINEVIEW(dev_priv)) {
8250		fp = pnv_dpll_compute_fp(&crtc_state->dpll);
8251		if (reduced_clock)
8252			fp2 = pnv_dpll_compute_fp(reduced_clock);
8253	} else {
8254		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8255		if (reduced_clock)
8256			fp2 = i9xx_dpll_compute_fp(reduced_clock);
8257	}
8258
8259	crtc_state->dpll_hw_state.fp0 = fp;
8260
8261	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8262	    reduced_clock) {
8263		crtc_state->dpll_hw_state.fp1 = fp2;
8264	} else {
8265		crtc_state->dpll_hw_state.fp1 = fp;
8266	}
8267}
8268
8269static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
8270		pipe)
8271{
8272	u32 reg_val;
8273
8274	/*
8275	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
8276	 * and set it to a reasonable value instead.
8277	 */
8278	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
8279	reg_val &= 0xffffff00;
8280	reg_val |= 0x00000030;
8281	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
8282
8283	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
8284	reg_val &= 0x00ffffff;
8285	reg_val |= 0x8c000000;
8286	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
8287
8288	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
8289	reg_val &= 0xffffff00;
8290	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
8291
8292	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
8293	reg_val &= 0x00ffffff;
8294	reg_val |= 0xb0000000;
8295	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
8296}
8297
8298static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
8299					 const struct intel_link_m_n *m_n)
8300{
8301	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8302	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8303	enum pipe pipe = crtc->pipe;
8304
8305	intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
8306		       TU_SIZE(m_n->tu) | m_n->gmch_m);
8307	intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
8308	intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
8309	intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
8310}
8311
8312static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
8313				 enum transcoder transcoder)
8314{
8315	if (IS_HASWELL(dev_priv))
8316		return transcoder == TRANSCODER_EDP;
8317
8318	/*
8319	 * Strictly speaking some registers are available before
8320	 * gen7, but we only support DRRS on gen7+
8321	 */
8322	return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
8323}
8324
8325static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
8326					 const struct intel_link_m_n *m_n,
8327					 const struct intel_link_m_n *m2_n2)
8328{
8329	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8330	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8331	enum pipe pipe = crtc->pipe;
8332	enum transcoder transcoder = crtc_state->cpu_transcoder;
8333
8334	if (INTEL_GEN(dev_priv) >= 5) {
8335		intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
8336			       TU_SIZE(m_n->tu) | m_n->gmch_m);
8337		intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
8338			       m_n->gmch_n);
8339		intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
8340			       m_n->link_m);
8341		intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
8342			       m_n->link_n);
8343		/*
8344		 *  M2_N2 registers are set only if DRRS is supported
8345		 * (to make sure the registers are not unnecessarily accessed).
8346		 */
8347		if (m2_n2 && crtc_state->has_drrs &&
8348		    transcoder_has_m2_n2(dev_priv, transcoder)) {
8349			intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
8350				       TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
8351			intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
8352				       m2_n2->gmch_n);
8353			intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
8354				       m2_n2->link_m);
8355			intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
8356				       m2_n2->link_n);
8357		}
8358	} else {
8359		intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
8360			       TU_SIZE(m_n->tu) | m_n->gmch_m);
8361		intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
8362		intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
8363		intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
8364	}
8365}
8366
8367void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
8368{
8369	const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
8370	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
8371
8372	if (m_n == M1_N1) {
8373		dp_m_n = &crtc_state->dp_m_n;
8374		dp_m2_n2 = &crtc_state->dp_m2_n2;
8375	} else if (m_n == M2_N2) {
8376
8377		/*
8378		 * M2_N2 registers are not supported. Hence m2_n2 divider value
8379		 * needs to be programmed into M1_N1.
8380		 */
8381		dp_m_n = &crtc_state->dp_m2_n2;
8382	} else {
8383		drm_err(&i915->drm, "Unsupported divider value\n");
8384		return;
8385	}
8386
8387	if (crtc_state->has_pch_encoder)
8388		intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
8389	else
8390		intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
8391}
8392
8393static void vlv_compute_dpll(struct intel_crtc *crtc,
8394			     struct intel_crtc_state *pipe_config)
8395{
8396	pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
8397		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
8398	if (crtc->pipe != PIPE_A)
8399		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
8400
8401	/* DPLL not used with DSI, but still need the rest set up */
8402	if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
8403		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
8404			DPLL_EXT_BUFFER_ENABLE_VLV;
8405
8406	pipe_config->dpll_hw_state.dpll_md =
8407		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8408}
8409
8410static void chv_compute_dpll(struct intel_crtc *crtc,
8411			     struct intel_crtc_state *pipe_config)
8412{
8413	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
8414		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
8415	if (crtc->pipe != PIPE_A)
8416		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
8417
8418	/* DPLL not used with DSI, but still need the rest set up */
8419	if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
8420		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
8421
8422	pipe_config->dpll_hw_state.dpll_md =
8423		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8424}
8425
8426static void vlv_prepare_pll(struct intel_crtc *crtc,
8427			    const struct intel_crtc_state *pipe_config)
8428{
8429	struct drm_device *dev = crtc->base.dev;
8430	struct drm_i915_private *dev_priv = to_i915(dev);
8431	enum pipe pipe = crtc->pipe;
8432	u32 mdiv;
8433	u32 bestn, bestm1, bestm2, bestp1, bestp2;
8434	u32 coreclk, reg_val;
8435
8436	/* Enable Refclk */
8437	intel_de_write(dev_priv, DPLL(pipe),
8438		       pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
8439
8440	/* No need to actually set up the DPLL with DSI */
8441	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8442		return;
8443
8444	vlv_dpio_get(dev_priv);
8445
8446	bestn = pipe_config->dpll.n;
8447	bestm1 = pipe_config->dpll.m1;
8448	bestm2 = pipe_config->dpll.m2;
8449	bestp1 = pipe_config->dpll.p1;
8450	bestp2 = pipe_config->dpll.p2;
8451
8452	/* See eDP HDMI DPIO driver vbios notes doc */
8453
8454	/* PLL B needs special handling */
8455	if (pipe == PIPE_B)
8456		vlv_pllb_recal_opamp(dev_priv, pipe);
8457
8458	/* Set up Tx target for periodic Rcomp update */
8459	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
8460
8461	/* Disable target IRef on PLL */
8462	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
8463	reg_val &= 0x00ffffff;
8464	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
8465
8466	/* Disable fast lock */
8467	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
8468
8469	/* Set idtafcrecal before PLL is enabled */
8470	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
8471	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
8472	mdiv |= ((bestn << DPIO_N_SHIFT));
8473	mdiv |= (1 << DPIO_K_SHIFT);
8474
8475	/*
8476	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
8477	 * but we don't support that).
8478	 * Note: don't use the DAC post divider as it seems unstable.
8479	 */
8480	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
8481	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8482
8483	mdiv |= DPIO_ENABLE_CALIBRATION;
8484	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8485
8486	/* Set HBR and RBR LPF coefficients */
8487	if (pipe_config->port_clock == 162000 ||
8488	    intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
8489	    intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
8490		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8491				 0x009f0003);
8492	else
8493		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8494				 0x00d0000f);
8495
8496	if (intel_crtc_has_dp_encoder(pipe_config)) {
8497		/* Use SSC source */
8498		if (pipe == PIPE_A)
8499			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8500					 0x0df40000);
8501		else
8502			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8503					 0x0df70000);
8504	} else { /* HDMI or VGA */
8505		/* Use bend source */
8506		if (pipe == PIPE_A)
8507			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8508					 0x0df70000);
8509		else
8510			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8511					 0x0df40000);
8512	}
8513
8514	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
8515	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
8516	if (intel_crtc_has_dp_encoder(pipe_config))
8517		coreclk |= 0x01000000;
8518	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
8519
8520	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
8521
8522	vlv_dpio_put(dev_priv);
8523}
8524
8525static void chv_prepare_pll(struct intel_crtc *crtc,
8526			    const struct intel_crtc_state *pipe_config)
8527{
8528	struct drm_device *dev = crtc->base.dev;
8529	struct drm_i915_private *dev_priv = to_i915(dev);
8530	enum pipe pipe = crtc->pipe;
8531	enum dpio_channel port = vlv_pipe_to_channel(pipe);
8532	u32 loopfilter, tribuf_calcntr;
8533	u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
8534	u32 dpio_val;
8535	int vco;
8536
8537	/* Enable Refclk and SSC */
8538	intel_de_write(dev_priv, DPLL(pipe),
8539		       pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
8540
8541	/* No need to actually set up the DPLL with DSI */
8542	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8543		return;
8544
8545	bestn = pipe_config->dpll.n;
8546	bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
8547	bestm1 = pipe_config->dpll.m1;
8548	bestm2 = pipe_config->dpll.m2 >> 22;
8549	bestp1 = pipe_config->dpll.p1;
8550	bestp2 = pipe_config->dpll.p2;
8551	vco = pipe_config->dpll.vco;
8552	dpio_val = 0;
8553	loopfilter = 0;
8554
8555	vlv_dpio_get(dev_priv);
8556
8557	/* p1 and p2 divider */
8558	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
8559			5 << DPIO_CHV_S1_DIV_SHIFT |
8560			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
8561			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
8562			1 << DPIO_CHV_K_DIV_SHIFT);
8563
8564	/* Feedback post-divider - m2 */
8565	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
8566
8567	/* Feedback refclk divider - n and m1 */
8568	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
8569			DPIO_CHV_M1_DIV_BY_2 |
8570			1 << DPIO_CHV_N_DIV_SHIFT);
8571
8572	/* M2 fraction division */
8573	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
8574
8575	/* M2 fraction division enable */
8576	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8577	dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
8578	dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
8579	if (bestm2_frac)
8580		dpio_val |= DPIO_CHV_FRAC_DIV_EN;
8581	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
8582
8583	/* Program digital lock detect threshold */
8584	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
8585	dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
8586					DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
8587	dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
8588	if (!bestm2_frac)
8589		dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
8590	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
8591
8592	/* Loop filter */
8593	if (vco == 5400000) {
8594		loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
8595		loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
8596		loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
8597		tribuf_calcntr = 0x9;
8598	} else if (vco <= 6200000) {
8599		loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
8600		loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
8601		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8602		tribuf_calcntr = 0x9;
8603	} else if (vco <= 6480000) {
8604		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8605		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8606		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8607		tribuf_calcntr = 0x8;
8608	} else {
8609		/* Not supported. Apply the same limits as in the max case */
8610		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8611		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8612		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8613		tribuf_calcntr = 0;
8614	}
8615	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
8616
8617	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
8618	dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
8619	dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
8620	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
8621
8622	/* AFC Recal */
8623	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
8624			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
8625			DPIO_AFC_RECAL);
8626
8627	vlv_dpio_put(dev_priv);
8628}
8629
8630/**
8631 * vlv_force_pll_on - forcibly enable just the PLL
8632 * @dev_priv: i915 private structure
8633 * @pipe: pipe PLL to enable
8634 * @dpll: PLL configuration
8635 *
8636 * Enable the PLL for @pipe using the supplied @dpll config. To be used
8637 * in cases where we need the PLL enabled even when @pipe is not going to
8638 * be enabled.
8639 */
8640int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
8641		     const struct dpll *dpll)
8642{
8643	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
8644	struct intel_crtc_state *pipe_config;
8645
8646	pipe_config = intel_crtc_state_alloc(crtc);
8647	if (!pipe_config)
8648		return -ENOMEM;
8649
8650	pipe_config->cpu_transcoder = (enum transcoder)pipe;
8651	pipe_config->pixel_multiplier = 1;
8652	pipe_config->dpll = *dpll;
8653
8654	if (IS_CHERRYVIEW(dev_priv)) {
8655		chv_compute_dpll(crtc, pipe_config);
8656		chv_prepare_pll(crtc, pipe_config);
8657		chv_enable_pll(crtc, pipe_config);
8658	} else {
8659		vlv_compute_dpll(crtc, pipe_config);
8660		vlv_prepare_pll(crtc, pipe_config);
8661		vlv_enable_pll(crtc, pipe_config);
8662	}
8663
8664	kfree(pipe_config);
8665
8666	return 0;
8667}
8668
8669/**
8670 * vlv_force_pll_off - forcibly disable just the PLL
8671 * @dev_priv: i915 private structure
8672 * @pipe: pipe PLL to disable
8673 *
8674 * Disable the PLL for @pipe. To be used in cases where we need
8675 * the PLL enabled even when @pipe is not going to be enabled.
8676 */
8677void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
8678{
8679	if (IS_CHERRYVIEW(dev_priv))
8680		chv_disable_pll(dev_priv, pipe);
8681	else
8682		vlv_disable_pll(dev_priv, pipe);
8683}
8684
8685static void i9xx_compute_dpll(struct intel_crtc *crtc,
8686			      struct intel_crtc_state *crtc_state,
8687			      struct dpll *reduced_clock)
8688{
8689	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8690	u32 dpll;
8691	struct dpll *clock = &crtc_state->dpll;
8692
8693	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8694
8695	dpll = DPLL_VGA_MODE_DIS;
8696
8697	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8698		dpll |= DPLLB_MODE_LVDS;
8699	else
8700		dpll |= DPLLB_MODE_DAC_SERIAL;
8701
8702	if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8703	    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8704		dpll |= (crtc_state->pixel_multiplier - 1)
8705			<< SDVO_MULTIPLIER_SHIFT_HIRES;
8706	}
8707
8708	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8709	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8710		dpll |= DPLL_SDVO_HIGH_SPEED;
8711
8712	if (intel_crtc_has_dp_encoder(crtc_state))
8713		dpll |= DPLL_SDVO_HIGH_SPEED;
8714
8715	/* compute bitmask from p1 value */
8716	if (IS_PINEVIEW(dev_priv))
8717		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
8718	else {
8719		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8720		if (IS_G4X(dev_priv) && reduced_clock)
8721			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8722	}
8723	switch (clock->p2) {
8724	case 5:
8725		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8726		break;
8727	case 7:
8728		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8729		break;
8730	case 10:
8731		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8732		break;
8733	case 14:
8734		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8735		break;
8736	}
8737	if (INTEL_GEN(dev_priv) >= 4)
8738		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
8739
8740	if (crtc_state->sdvo_tv_clock)
8741		dpll |= PLL_REF_INPUT_TVCLKINBC;
8742	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8743		 intel_panel_use_ssc(dev_priv))
8744		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8745	else
8746		dpll |= PLL_REF_INPUT_DREFCLK;
8747
8748	dpll |= DPLL_VCO_ENABLE;
8749	crtc_state->dpll_hw_state.dpll = dpll;
8750
8751	if (INTEL_GEN(dev_priv) >= 4) {
8752		u32 dpll_md = (crtc_state->pixel_multiplier - 1)
8753			<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
8754		crtc_state->dpll_hw_state.dpll_md = dpll_md;
8755	}
8756}
8757
8758static void i8xx_compute_dpll(struct intel_crtc *crtc,
8759			      struct intel_crtc_state *crtc_state,
8760			      struct dpll *reduced_clock)
8761{
8762	struct drm_device *dev = crtc->base.dev;
8763	struct drm_i915_private *dev_priv = to_i915(dev);
8764	u32 dpll;
8765	struct dpll *clock = &crtc_state->dpll;
8766
8767	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8768
8769	dpll = DPLL_VGA_MODE_DIS;
8770
8771	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8772		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8773	} else {
8774		if (clock->p1 == 2)
8775			dpll |= PLL_P1_DIVIDE_BY_TWO;
8776		else
8777			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8778		if (clock->p2 == 4)
8779			dpll |= PLL_P2_DIVIDE_BY_4;
8780	}
8781
8782	/*
8783	 * Bspec:
8784	 * "[Almador Errata}: For the correct operation of the muxed DVO pins
8785	 *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
8786	 *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
8787	 *  Enable) must be set to “1” in both the DPLL A Control Register
8788	 *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
8789	 *
8790	 * For simplicity We simply keep both bits always enabled in
8791	 * both DPLLS. The spec says we should disable the DVO 2X clock
8792	 * when not needed, but this seems to work fine in practice.
8793	 */
8794	if (IS_I830(dev_priv) ||
8795	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
8796		dpll |= DPLL_DVO_2X_MODE;
8797
8798	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8799	    intel_panel_use_ssc(dev_priv))
8800		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8801	else
8802		dpll |= PLL_REF_INPUT_DREFCLK;
8803
8804	dpll |= DPLL_VCO_ENABLE;
8805	crtc_state->dpll_hw_state.dpll = dpll;
8806}
8807
8808static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
8809{
8810	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8811	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8812	enum pipe pipe = crtc->pipe;
8813	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8814	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
8815	u32 crtc_vtotal, crtc_vblank_end;
8816	int vsyncshift = 0;
8817
8818	/* We need to be careful not to changed the adjusted mode, for otherwise
8819	 * the hw state checker will get angry at the mismatch. */
8820	crtc_vtotal = adjusted_mode->crtc_vtotal;
8821	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
8822
8823	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
8824		/* the chip adds 2 halflines automatically */
8825		crtc_vtotal -= 1;
8826		crtc_vblank_end -= 1;
8827
8828		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8829			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
8830		else
8831			vsyncshift = adjusted_mode->crtc_hsync_start -
8832				adjusted_mode->crtc_htotal / 2;
8833		if (vsyncshift < 0)
8834			vsyncshift += adjusted_mode->crtc_htotal;
8835	}
8836
8837	if (INTEL_GEN(dev_priv) > 3)
8838		intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
8839		               vsyncshift);
8840
8841	intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
8842		       (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
8843	intel_de_write(dev_priv, HBLANK(cpu_transcoder),
8844		       (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
8845	intel_de_write(dev_priv, HSYNC(cpu_transcoder),
8846		       (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
8847
8848	intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
8849		       (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
8850	intel_de_write(dev_priv, VBLANK(cpu_transcoder),
8851		       (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
8852	intel_de_write(dev_priv, VSYNC(cpu_transcoder),
8853		       (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
8854
8855	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
8856	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
8857	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
8858	 * bits. */
8859	if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
8860	    (pipe == PIPE_B || pipe == PIPE_C))
8861		intel_de_write(dev_priv, VTOTAL(pipe),
8862		               intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
8863
8864}
8865
8866static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
8867{
8868	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8869	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8870	enum pipe pipe = crtc->pipe;
8871
8872	/* pipesrc controls the size that is scaled from, which should
8873	 * always be the user's requested size.
8874	 */
8875	intel_de_write(dev_priv, PIPESRC(pipe),
8876		       ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
8877}
8878
8879static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
8880{
8881	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
8882	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8883
8884	if (IS_GEN(dev_priv, 2))
8885		return false;
8886
8887	if (INTEL_GEN(dev_priv) >= 9 ||
8888	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8889		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
8890	else
8891		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
8892}
8893
8894static void intel_get_pipe_timings(struct intel_crtc *crtc,
8895				   struct intel_crtc_state *pipe_config)
8896{
8897	struct drm_device *dev = crtc->base.dev;
8898	struct drm_i915_private *dev_priv = to_i915(dev);
8899	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
8900	u32 tmp;
8901
8902	tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
8903	pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
8904	pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
8905
8906	if (!transcoder_is_dsi(cpu_transcoder)) {
8907		tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
8908		pipe_config->hw.adjusted_mode.crtc_hblank_start =
8909							(tmp & 0xffff) + 1;
8910		pipe_config->hw.adjusted_mode.crtc_hblank_end =
8911						((tmp >> 16) & 0xffff) + 1;
8912	}
8913	tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
8914	pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
8915	pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
8916
8917	tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
8918	pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
8919	pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
8920
8921	if (!transcoder_is_dsi(cpu_transcoder)) {
8922		tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
8923		pipe_config->hw.adjusted_mode.crtc_vblank_start =
8924							(tmp & 0xffff) + 1;
8925		pipe_config->hw.adjusted_mode.crtc_vblank_end =
8926						((tmp >> 16) & 0xffff) + 1;
8927	}
8928	tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
8929	pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
8930	pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
8931
8932	if (intel_pipe_is_interlaced(pipe_config)) {
8933		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
8934		pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
8935		pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
8936	}
8937}
8938
8939static void intel_get_pipe_src_size(struct intel_crtc *crtc,
8940				    struct intel_crtc_state *pipe_config)
8941{
8942	struct drm_device *dev = crtc->base.dev;
8943	struct drm_i915_private *dev_priv = to_i915(dev);
8944	u32 tmp;
8945
8946	tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
8947	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
8948	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
8949
8950	pipe_config->hw.mode.vdisplay = pipe_config->pipe_src_h;
8951	pipe_config->hw.mode.hdisplay = pipe_config->pipe_src_w;
8952}
8953
8954void intel_mode_from_pipe_config(struct drm_display_mode *mode,
8955				 struct intel_crtc_state *pipe_config)
8956{
8957	mode->hdisplay = pipe_config->hw.adjusted_mode.crtc_hdisplay;
8958	mode->htotal = pipe_config->hw.adjusted_mode.crtc_htotal;
8959	mode->hsync_start = pipe_config->hw.adjusted_mode.crtc_hsync_start;
8960	mode->hsync_end = pipe_config->hw.adjusted_mode.crtc_hsync_end;
8961
8962	mode->vdisplay = pipe_config->hw.adjusted_mode.crtc_vdisplay;
8963	mode->vtotal = pipe_config->hw.adjusted_mode.crtc_vtotal;
8964	mode->vsync_start = pipe_config->hw.adjusted_mode.crtc_vsync_start;
8965	mode->vsync_end = pipe_config->hw.adjusted_mode.crtc_vsync_end;
8966
8967	mode->flags = pipe_config->hw.adjusted_mode.flags;
8968	mode->type = DRM_MODE_TYPE_DRIVER;
8969
8970	mode->clock = pipe_config->hw.adjusted_mode.crtc_clock;
8971
8972	drm_mode_set_name(mode);
8973}
8974
8975static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
8976{
8977	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8978	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8979	u32 pipeconf;
8980
8981	pipeconf = 0;
8982
8983	/* we keep both pipes enabled on 830 */
8984	if (IS_I830(dev_priv))
8985		pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
8986
8987	if (crtc_state->double_wide)
8988		pipeconf |= PIPECONF_DOUBLE_WIDE;
8989
8990	/* only g4x and later have fancy bpc/dither controls */
8991	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8992	    IS_CHERRYVIEW(dev_priv)) {
8993		/* Bspec claims that we can't use dithering for 30bpp pipes. */
8994		if (crtc_state->dither && crtc_state->pipe_bpp != 30)
8995			pipeconf |= PIPECONF_DITHER_EN |
8996				    PIPECONF_DITHER_TYPE_SP;
8997
8998		switch (crtc_state->pipe_bpp) {
8999		case 18:
9000			pipeconf |= PIPECONF_6BPC;
9001			break;
9002		case 24:
9003			pipeconf |= PIPECONF_8BPC;
9004			break;
9005		case 30:
9006			pipeconf |= PIPECONF_10BPC;
9007			break;
9008		default:
9009			/* Case prevented by intel_choose_pipe_bpp_dither. */
9010			BUG();
9011		}
9012	}
9013
9014	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
9015		if (INTEL_GEN(dev_priv) < 4 ||
9016		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
9017			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
9018		else
9019			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
9020	} else {
9021		pipeconf |= PIPECONF_PROGRESSIVE;
9022	}
9023
9024	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
9025	     crtc_state->limited_color_range)
9026		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
9027
9028	pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
9029
9030	pipeconf |= PIPECONF_FRAME_START_DELAY(0);
9031
9032	intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
9033	intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
9034}
9035
9036static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
9037				   struct intel_crtc_state *crtc_state)
9038{
9039	struct drm_device *dev = crtc->base.dev;
9040	struct drm_i915_private *dev_priv = to_i915(dev);
9041	const struct intel_limit *limit;
9042	int refclk = 48000;
9043
9044	memset(&crtc_state->dpll_hw_state, 0,
9045	       sizeof(crtc_state->dpll_hw_state));
9046
9047	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9048		if (intel_panel_use_ssc(dev_priv)) {
9049			refclk = dev_priv->vbt.lvds_ssc_freq;
9050			drm_dbg_kms(&dev_priv->drm,
9051				    "using SSC reference clock of %d kHz\n",
9052				    refclk);
9053		}
9054
9055		limit = &intel_limits_i8xx_lvds;
9056	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
9057		limit = &intel_limits_i8xx_dvo;
9058	} else {
9059		limit = &intel_limits_i8xx_dac;
9060	}
9061
9062	if (!crtc_state->clock_set &&
9063	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9064				 refclk, NULL, &crtc_state->dpll)) {
9065		drm_err(&dev_priv->drm,
9066			"Couldn't find PLL settings for mode!\n");
9067		return -EINVAL;
9068	}
9069
9070	i8xx_compute_dpll(crtc, crtc_state, NULL);
9071
9072	return 0;
9073}
9074
9075static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
9076				  struct intel_crtc_state *crtc_state)
9077{
9078	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9079	const struct intel_limit *limit;
9080	int refclk = 96000;
9081
9082	memset(&crtc_state->dpll_hw_state, 0,
9083	       sizeof(crtc_state->dpll_hw_state));
9084
9085	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9086		if (intel_panel_use_ssc(dev_priv)) {
9087			refclk = dev_priv->vbt.lvds_ssc_freq;
9088			drm_dbg_kms(&dev_priv->drm,
9089				    "using SSC reference clock of %d kHz\n",
9090				    refclk);
9091		}
9092
9093		if (intel_is_dual_link_lvds(dev_priv))
9094			limit = &intel_limits_g4x_dual_channel_lvds;
9095		else
9096			limit = &intel_limits_g4x_single_channel_lvds;
9097	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
9098		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
9099		limit = &intel_limits_g4x_hdmi;
9100	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
9101		limit = &intel_limits_g4x_sdvo;
9102	} else {
9103		/* The option is for other outputs */
9104		limit = &intel_limits_i9xx_sdvo;
9105	}
9106
9107	if (!crtc_state->clock_set &&
9108	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9109				refclk, NULL, &crtc_state->dpll)) {
9110		drm_err(&dev_priv->drm,
9111			"Couldn't find PLL settings for mode!\n");
9112		return -EINVAL;
9113	}
9114
9115	i9xx_compute_dpll(crtc, crtc_state, NULL);
9116
9117	return 0;
9118}
9119
9120static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
9121				  struct intel_crtc_state *crtc_state)
9122{
9123	struct drm_device *dev = crtc->base.dev;
9124	struct drm_i915_private *dev_priv = to_i915(dev);
9125	const struct intel_limit *limit;
9126	int refclk = 96000;
9127
9128	memset(&crtc_state->dpll_hw_state, 0,
9129	       sizeof(crtc_state->dpll_hw_state));
9130
9131	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9132		if (intel_panel_use_ssc(dev_priv)) {
9133			refclk = dev_priv->vbt.lvds_ssc_freq;
9134			drm_dbg_kms(&dev_priv->drm,
9135				    "using SSC reference clock of %d kHz\n",
9136				    refclk);
9137		}
9138
9139		limit = &pnv_limits_lvds;
9140	} else {
9141		limit = &pnv_limits_sdvo;
9142	}
9143
9144	if (!crtc_state->clock_set &&
9145	    !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9146				refclk, NULL, &crtc_state->dpll)) {
9147		drm_err(&dev_priv->drm,
9148			"Couldn't find PLL settings for mode!\n");
9149		return -EINVAL;
9150	}
9151
9152	i9xx_compute_dpll(crtc, crtc_state, NULL);
9153
9154	return 0;
9155}
9156
9157static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
9158				   struct intel_crtc_state *crtc_state)
9159{
9160	struct drm_device *dev = crtc->base.dev;
9161	struct drm_i915_private *dev_priv = to_i915(dev);
9162	const struct intel_limit *limit;
9163	int refclk = 96000;
9164
9165	memset(&crtc_state->dpll_hw_state, 0,
9166	       sizeof(crtc_state->dpll_hw_state));
9167
9168	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9169		if (intel_panel_use_ssc(dev_priv)) {
9170			refclk = dev_priv->vbt.lvds_ssc_freq;
9171			drm_dbg_kms(&dev_priv->drm,
9172				    "using SSC reference clock of %d kHz\n",
9173				    refclk);
9174		}
9175
9176		limit = &intel_limits_i9xx_lvds;
9177	} else {
9178		limit = &intel_limits_i9xx_sdvo;
9179	}
9180
9181	if (!crtc_state->clock_set &&
9182	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9183				 refclk, NULL, &crtc_state->dpll)) {
9184		drm_err(&dev_priv->drm,
9185			"Couldn't find PLL settings for mode!\n");
9186		return -EINVAL;
9187	}
9188
9189	i9xx_compute_dpll(crtc, crtc_state, NULL);
9190
9191	return 0;
9192}
9193
9194static int chv_crtc_compute_clock(struct intel_crtc *crtc,
9195				  struct intel_crtc_state *crtc_state)
9196{
9197	int refclk = 100000;
9198	const struct intel_limit *limit = &intel_limits_chv;
9199	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
9200
9201	memset(&crtc_state->dpll_hw_state, 0,
9202	       sizeof(crtc_state->dpll_hw_state));
9203
9204	if (!crtc_state->clock_set &&
9205	    !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9206				refclk, NULL, &crtc_state->dpll)) {
9207		drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
9208		return -EINVAL;
9209	}
9210
9211	chv_compute_dpll(crtc, crtc_state);
9212
9213	return 0;
9214}
9215
9216static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
9217				  struct intel_crtc_state *crtc_state)
9218{
9219	int refclk = 100000;
9220	const struct intel_limit *limit = &intel_limits_vlv;
9221	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
9222
9223	memset(&crtc_state->dpll_hw_state, 0,
9224	       sizeof(crtc_state->dpll_hw_state));
9225
9226	if (!crtc_state->clock_set &&
9227	    !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9228				refclk, NULL, &crtc_state->dpll)) {
9229		drm_err(&i915->drm,  "Couldn't find PLL settings for mode!\n");
9230		return -EINVAL;
9231	}
9232
9233	vlv_compute_dpll(crtc, crtc_state);
9234
9235	return 0;
9236}
9237
9238static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
9239{
9240	if (IS_I830(dev_priv))
9241		return false;
9242
9243	return INTEL_GEN(dev_priv) >= 4 ||
9244		IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
9245}
9246
9247static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
9248{
9249	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9250	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9251	u32 tmp;
9252
9253	if (!i9xx_has_pfit(dev_priv))
9254		return;
9255
9256	tmp = intel_de_read(dev_priv, PFIT_CONTROL);
9257	if (!(tmp & PFIT_ENABLE))
9258		return;
9259
9260	/* Check whether the pfit is attached to our pipe. */
9261	if (INTEL_GEN(dev_priv) < 4) {
9262		if (crtc->pipe != PIPE_B)
9263			return;
9264	} else {
9265		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
9266			return;
9267	}
9268
9269	crtc_state->gmch_pfit.control = tmp;
9270	crtc_state->gmch_pfit.pgm_ratios =
9271		intel_de_read(dev_priv, PFIT_PGM_RATIOS);
9272}
9273
9274static void vlv_crtc_clock_get(struct intel_crtc *crtc,
9275			       struct intel_crtc_state *pipe_config)
9276{
9277	struct drm_device *dev = crtc->base.dev;
9278	struct drm_i915_private *dev_priv = to_i915(dev);
9279	enum pipe pipe = crtc->pipe;
9280	struct dpll clock;
9281	u32 mdiv;
9282	int refclk = 100000;
9283
9284	/* In case of DSI, DPLL will not be used */
9285	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
9286		return;
9287
9288	vlv_dpio_get(dev_priv);
9289	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
9290	vlv_dpio_put(dev_priv);
9291
9292	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
9293	clock.m2 = mdiv & DPIO_M2DIV_MASK;
9294	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
9295	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
9296	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
9297
9298	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
9299}
9300
9301static void
9302i9xx_get_initial_plane_config(struct intel_crtc *crtc,
9303			      struct intel_initial_plane_config *plane_config)
9304{
9305	struct drm_device *dev = crtc->base.dev;
9306	struct drm_i915_private *dev_priv = to_i915(dev);
9307	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9308	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
9309	enum pipe pipe;
9310	u32 val, base, offset;
9311	int fourcc, pixel_format;
9312	unsigned int aligned_height;
9313	struct drm_framebuffer *fb;
9314	struct intel_framebuffer *intel_fb;
9315
9316	if (!plane->get_hw_state(plane, &pipe))
9317		return;
9318
9319	drm_WARN_ON(dev, pipe != crtc->pipe);
9320
9321	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9322	if (!intel_fb) {
9323		drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
9324		return;
9325	}
9326
9327	fb = &intel_fb->base;
9328
9329	fb->dev = dev;
9330
9331	val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
9332
9333	if (INTEL_GEN(dev_priv) >= 4) {
9334		if (val & DISPPLANE_TILED) {
9335			plane_config->tiling = I915_TILING_X;
9336			fb->modifier = I915_FORMAT_MOD_X_TILED;
9337		}
9338
9339		if (val & DISPPLANE_ROTATE_180)
9340			plane_config->rotation = DRM_MODE_ROTATE_180;
9341	}
9342
9343	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
9344	    val & DISPPLANE_MIRROR)
9345		plane_config->rotation |= DRM_MODE_REFLECT_X;
9346
9347	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
9348	fourcc = i9xx_format_to_fourcc(pixel_format);
9349	fb->format = drm_format_info(fourcc);
9350
9351	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
9352		offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane));
9353		base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
9354	} else if (INTEL_GEN(dev_priv) >= 4) {
9355		if (plane_config->tiling)
9356			offset = intel_de_read(dev_priv,
9357					       DSPTILEOFF(i9xx_plane));
9358		else
9359			offset = intel_de_read(dev_priv,
9360					       DSPLINOFF(i9xx_plane));
9361		base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
9362	} else {
9363		base = intel_de_read(dev_priv, DSPADDR(i9xx_plane));
9364	}
9365	plane_config->base = base;
9366
9367	val = intel_de_read(dev_priv, PIPESRC(pipe));
9368	fb->width = ((val >> 16) & 0xfff) + 1;
9369	fb->height = ((val >> 0) & 0xfff) + 1;
9370
9371	val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane));
9372	fb->pitches[0] = val & 0xffffffc0;
9373
9374	aligned_height = intel_fb_align_height(fb, 0, fb->height);
9375
9376	plane_config->size = fb->pitches[0] * aligned_height;
9377
9378	drm_dbg_kms(&dev_priv->drm,
9379		    "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9380		    crtc->base.name, plane->base.name, fb->width, fb->height,
9381		    fb->format->cpp[0] * 8, base, fb->pitches[0],
9382		    plane_config->size);
9383
9384	plane_config->fb = intel_fb;
9385}
9386
9387static void chv_crtc_clock_get(struct intel_crtc *crtc,
9388			       struct intel_crtc_state *pipe_config)
9389{
9390	struct drm_device *dev = crtc->base.dev;
9391	struct drm_i915_private *dev_priv = to_i915(dev);
9392	enum pipe pipe = crtc->pipe;
9393	enum dpio_channel port = vlv_pipe_to_channel(pipe);
9394	struct dpll clock;
9395	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
9396	int refclk = 100000;
9397
9398	/* In case of DSI, DPLL will not be used */
9399	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
9400		return;
9401
9402	vlv_dpio_get(dev_priv);
9403	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
9404	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
9405	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
9406	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
9407	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
9408	vlv_dpio_put(dev_priv);
9409
9410	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
9411	clock.m2 = (pll_dw0 & 0xff) << 22;
9412	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
9413		clock.m2 |= pll_dw2 & 0x3fffff;
9414	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
9415	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
9416	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
9417
9418	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
9419}
9420
9421static enum intel_output_format
9422bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
9423{
9424	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9425	u32 tmp;
9426
9427	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
9428
9429	if (tmp & PIPEMISC_YUV420_ENABLE) {
9430		/* We support 4:2:0 in full blend mode only */
9431		drm_WARN_ON(&dev_priv->drm,
9432			    (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
9433
9434		return INTEL_OUTPUT_FORMAT_YCBCR420;
9435	} else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
9436		return INTEL_OUTPUT_FORMAT_YCBCR444;
9437	} else {
9438		return INTEL_OUTPUT_FORMAT_RGB;
9439	}
9440}
9441
9442static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
9443{
9444	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9445	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9446	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9447	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
9448	u32 tmp;
9449
9450	tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
9451
9452	if (tmp & DISPPLANE_GAMMA_ENABLE)
9453		crtc_state->gamma_enable = true;
9454
9455	if (!HAS_GMCH(dev_priv) &&
9456	    tmp & DISPPLANE_PIPE_CSC_ENABLE)
9457		crtc_state->csc_enable = true;
9458}
9459
9460static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
9461				 struct intel_crtc_state *pipe_config)
9462{
9463	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9464	enum intel_display_power_domain power_domain;
9465	intel_wakeref_t wakeref;
9466	u32 tmp;
9467	bool ret;
9468
9469	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9470	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
9471	if (!wakeref)
9472		return false;
9473
9474	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
9475	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9476	pipe_config->shared_dpll = NULL;
9477
9478	ret = false;
9479
9480	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
9481	if (!(tmp & PIPECONF_ENABLE))
9482		goto out;
9483
9484	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
9485	    IS_CHERRYVIEW(dev_priv)) {
9486		switch (tmp & PIPECONF_BPC_MASK) {
9487		case PIPECONF_6BPC:
9488			pipe_config->pipe_bpp = 18;
9489			break;
9490		case PIPECONF_8BPC:
9491			pipe_config->pipe_bpp = 24;
9492			break;
9493		case PIPECONF_10BPC:
9494			pipe_config->pipe_bpp = 30;
9495			break;
9496		default:
9497			break;
9498		}
9499	}
9500
9501	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
9502	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
9503		pipe_config->limited_color_range = true;
9504
9505	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
9506		PIPECONF_GAMMA_MODE_SHIFT;
9507
9508	if (IS_CHERRYVIEW(dev_priv))
9509		pipe_config->cgm_mode = intel_de_read(dev_priv,
9510						      CGM_PIPE_MODE(crtc->pipe));
9511
9512	i9xx_get_pipe_color_config(pipe_config);
9513	intel_color_get_config(pipe_config);
9514
9515	if (INTEL_GEN(dev_priv) < 4)
9516		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
9517
9518	intel_get_pipe_timings(crtc, pipe_config);
9519	intel_get_pipe_src_size(crtc, pipe_config);
9520
9521	i9xx_get_pfit_config(pipe_config);
9522
9523	if (INTEL_GEN(dev_priv) >= 4) {
9524		/* No way to read it out on pipes B and C */
9525		if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
9526			tmp = dev_priv->chv_dpll_md[crtc->pipe];
9527		else
9528			tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
9529		pipe_config->pixel_multiplier =
9530			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
9531			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
9532		pipe_config->dpll_hw_state.dpll_md = tmp;
9533	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
9534		   IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
9535		tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
9536		pipe_config->pixel_multiplier =
9537			((tmp & SDVO_MULTIPLIER_MASK)
9538			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
9539	} else {
9540		/* Note that on i915G/GM the pixel multiplier is in the sdvo
9541		 * port and will be fixed up in the encoder->get_config
9542		 * function. */
9543		pipe_config->pixel_multiplier = 1;
9544	}
9545	pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
9546							DPLL(crtc->pipe));
9547	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
9548		pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
9549							       FP0(crtc->pipe));
9550		pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
9551							       FP1(crtc->pipe));
9552	} else {
9553		/* Mask out read-only status bits. */
9554		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
9555						     DPLL_PORTC_READY_MASK |
9556						     DPLL_PORTB_READY_MASK);
9557	}
9558
9559	if (IS_CHERRYVIEW(dev_priv))
9560		chv_crtc_clock_get(crtc, pipe_config);
9561	else if (IS_VALLEYVIEW(dev_priv))
9562		vlv_crtc_clock_get(crtc, pipe_config);
9563	else
9564		i9xx_crtc_clock_get(crtc, pipe_config);
9565
9566	/*
9567	 * Normally the dotclock is filled in by the encoder .get_config()
9568	 * but in case the pipe is enabled w/o any ports we need a sane
9569	 * default.
9570	 */
9571	pipe_config->hw.adjusted_mode.crtc_clock =
9572		pipe_config->port_clock / pipe_config->pixel_multiplier;
9573
9574	ret = true;
9575
9576out:
9577	intel_display_power_put(dev_priv, power_domain, wakeref);
9578
9579	return ret;
9580}
9581
9582static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
9583{
9584	struct intel_encoder *encoder;
9585	int i;
9586	u32 val, final;
9587	bool has_lvds = false;
9588	bool has_cpu_edp = false;
9589	bool has_panel = false;
9590	bool has_ck505 = false;
9591	bool can_ssc = false;
9592	bool using_ssc_source = false;
9593
9594	/* We need to take the global config into account */
9595	for_each_intel_encoder(&dev_priv->drm, encoder) {
9596		switch (encoder->type) {
9597		case INTEL_OUTPUT_LVDS:
9598			has_panel = true;
9599			has_lvds = true;
9600			break;
9601		case INTEL_OUTPUT_EDP:
9602			has_panel = true;
9603			if (encoder->port == PORT_A)
9604				has_cpu_edp = true;
9605			break;
9606		default:
9607			break;
9608		}
9609	}
9610
9611	if (HAS_PCH_IBX(dev_priv)) {
9612		has_ck505 = dev_priv->vbt.display_clock_mode;
9613		can_ssc = has_ck505;
9614	} else {
9615		has_ck505 = false;
9616		can_ssc = true;
9617	}
9618
9619	/* Check if any DPLLs are using the SSC source */
9620	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
9621		u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
9622
9623		if (!(temp & DPLL_VCO_ENABLE))
9624			continue;
9625
9626		if ((temp & PLL_REF_INPUT_MASK) ==
9627		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
9628			using_ssc_source = true;
9629			break;
9630		}
9631	}
9632
9633	drm_dbg_kms(&dev_priv->drm,
9634		    "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
9635		    has_panel, has_lvds, has_ck505, using_ssc_source);
9636
9637	/* Ironlake: try to setup display ref clock before DPLL
9638	 * enabling. This is only under driver's control after
9639	 * PCH B stepping, previous chipset stepping should be
9640	 * ignoring this setting.
9641	 */
9642	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
9643
9644	/* As we must carefully and slowly disable/enable each source in turn,
9645	 * compute the final state we want first and check if we need to
9646	 * make any changes at all.
9647	 */
9648	final = val;
9649	final &= ~DREF_NONSPREAD_SOURCE_MASK;
9650	if (has_ck505)
9651		final |= DREF_NONSPREAD_CK505_ENABLE;
9652	else
9653		final |= DREF_NONSPREAD_SOURCE_ENABLE;
9654
9655	final &= ~DREF_SSC_SOURCE_MASK;
9656	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9657	final &= ~DREF_SSC1_ENABLE;
9658
9659	if (has_panel) {
9660		final |= DREF_SSC_SOURCE_ENABLE;
9661
9662		if (intel_panel_use_ssc(dev_priv) && can_ssc)
9663			final |= DREF_SSC1_ENABLE;
9664
9665		if (has_cpu_edp) {
9666			if (intel_panel_use_ssc(dev_priv) && can_ssc)
9667				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9668			else
9669				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9670		} else
9671			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9672	} else if (using_ssc_source) {
9673		final |= DREF_SSC_SOURCE_ENABLE;
9674		final |= DREF_SSC1_ENABLE;
9675	}
9676
9677	if (final == val)
9678		return;
9679
9680	/* Always enable nonspread source */
9681	val &= ~DREF_NONSPREAD_SOURCE_MASK;
9682
9683	if (has_ck505)
9684		val |= DREF_NONSPREAD_CK505_ENABLE;
9685	else
9686		val |= DREF_NONSPREAD_SOURCE_ENABLE;
9687
9688	if (has_panel) {
9689		val &= ~DREF_SSC_SOURCE_MASK;
9690		val |= DREF_SSC_SOURCE_ENABLE;
9691
9692		/* SSC must be turned on before enabling the CPU output  */
9693		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9694			drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
9695			val |= DREF_SSC1_ENABLE;
9696		} else
9697			val &= ~DREF_SSC1_ENABLE;
9698
9699		/* Get SSC going before enabling the outputs */
9700		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9701		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9702		udelay(200);
9703
9704		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9705
9706		/* Enable CPU source on CPU attached eDP */
9707		if (has_cpu_edp) {
9708			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9709				drm_dbg_kms(&dev_priv->drm,
9710					    "Using SSC on eDP\n");
9711				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9712			} else
9713				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9714		} else
9715			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9716
9717		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9718		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9719		udelay(200);
9720	} else {
9721		drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
9722
9723		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9724
9725		/* Turn off CPU output */
9726		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9727
9728		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9729		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9730		udelay(200);
9731
9732		if (!using_ssc_source) {
9733			drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
9734
9735			/* Turn off the SSC source */
9736			val &= ~DREF_SSC_SOURCE_MASK;
9737			val |= DREF_SSC_SOURCE_DISABLE;
9738
9739			/* Turn off SSC1 */
9740			val &= ~DREF_SSC1_ENABLE;
9741
9742			intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9743			intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9744			udelay(200);
9745		}
9746	}
9747
9748	BUG_ON(val != final);
9749}
9750
9751static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
9752{
9753	u32 tmp;
9754
9755	tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
9756	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
9757	intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
9758
9759	if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
9760			FDI_MPHY_IOSFSB_RESET_STATUS, 100))
9761		drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
9762
9763	tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
9764	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
9765	intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
9766
9767	if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
9768			 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
9769		drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
9770}
9771
9772/* WaMPhyProgramming:hsw */
9773static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
9774{
9775	u32 tmp;
9776
9777	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
9778	tmp &= ~(0xFF << 24);
9779	tmp |= (0x12 << 24);
9780	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
9781
9782	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
9783	tmp |= (1 << 11);
9784	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
9785
9786	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
9787	tmp |= (1 << 11);
9788	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
9789
9790	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
9791	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9792	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
9793
9794	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
9795	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9796	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
9797
9798	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
9799	tmp &= ~(7 << 13);
9800	tmp |= (5 << 13);
9801	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
9802
9803	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
9804	tmp &= ~(7 << 13);
9805	tmp |= (5 << 13);
9806	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
9807
9808	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
9809	tmp &= ~0xFF;
9810	tmp |= 0x1C;
9811	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
9812
9813	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
9814	tmp &= ~0xFF;
9815	tmp |= 0x1C;
9816	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
9817
9818	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
9819	tmp &= ~(0xFF << 16);
9820	tmp |= (0x1C << 16);
9821	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
9822
9823	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
9824	tmp &= ~(0xFF << 16);
9825	tmp |= (0x1C << 16);
9826	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
9827
9828	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
9829	tmp |= (1 << 27);
9830	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
9831
9832	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
9833	tmp |= (1 << 27);
9834	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
9835
9836	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
9837	tmp &= ~(0xF << 28);
9838	tmp |= (4 << 28);
9839	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
9840
9841	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
9842	tmp &= ~(0xF << 28);
9843	tmp |= (4 << 28);
9844	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
9845}
9846
9847/* Implements 3 different sequences from BSpec chapter "Display iCLK
9848 * Programming" based on the parameters passed:
9849 * - Sequence to enable CLKOUT_DP
9850 * - Sequence to enable CLKOUT_DP without spread
9851 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
9852 */
9853static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
9854				 bool with_spread, bool with_fdi)
9855{
9856	u32 reg, tmp;
9857
9858	if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
9859		     "FDI requires downspread\n"))
9860		with_spread = true;
9861	if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
9862		     with_fdi, "LP PCH doesn't have FDI\n"))
9863		with_fdi = false;
9864
9865	mutex_lock(&dev_priv->sb_lock);
9866
9867	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9868	tmp &= ~SBI_SSCCTL_DISABLE;
9869	tmp |= SBI_SSCCTL_PATHALT;
9870	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9871
9872	udelay(24);
9873
9874	if (with_spread) {
9875		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9876		tmp &= ~SBI_SSCCTL_PATHALT;
9877		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9878
9879		if (with_fdi) {
9880			lpt_reset_fdi_mphy(dev_priv);
9881			lpt_program_fdi_mphy(dev_priv);
9882		}
9883	}
9884
9885	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9886	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9887	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9888	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9889
9890	mutex_unlock(&dev_priv->sb_lock);
9891}
9892
9893/* Sequence to disable CLKOUT_DP */
9894void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
9895{
9896	u32 reg, tmp;
9897
9898	mutex_lock(&dev_priv->sb_lock);
9899
9900	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9901	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9902	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9903	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9904
9905	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9906	if (!(tmp & SBI_SSCCTL_DISABLE)) {
9907		if (!(tmp & SBI_SSCCTL_PATHALT)) {
9908			tmp |= SBI_SSCCTL_PATHALT;
9909			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9910			udelay(32);
9911		}
9912		tmp |= SBI_SSCCTL_DISABLE;
9913		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9914	}
9915
9916	mutex_unlock(&dev_priv->sb_lock);
9917}
9918
9919#define BEND_IDX(steps) ((50 + (steps)) / 5)
9920
9921static const u16 sscdivintphase[] = {
9922	[BEND_IDX( 50)] = 0x3B23,
9923	[BEND_IDX( 45)] = 0x3B23,
9924	[BEND_IDX( 40)] = 0x3C23,
9925	[BEND_IDX( 35)] = 0x3C23,
9926	[BEND_IDX( 30)] = 0x3D23,
9927	[BEND_IDX( 25)] = 0x3D23,
9928	[BEND_IDX( 20)] = 0x3E23,
9929	[BEND_IDX( 15)] = 0x3E23,
9930	[BEND_IDX( 10)] = 0x3F23,
9931	[BEND_IDX(  5)] = 0x3F23,
9932	[BEND_IDX(  0)] = 0x0025,
9933	[BEND_IDX( -5)] = 0x0025,
9934	[BEND_IDX(-10)] = 0x0125,
9935	[BEND_IDX(-15)] = 0x0125,
9936	[BEND_IDX(-20)] = 0x0225,
9937	[BEND_IDX(-25)] = 0x0225,
9938	[BEND_IDX(-30)] = 0x0325,
9939	[BEND_IDX(-35)] = 0x0325,
9940	[BEND_IDX(-40)] = 0x0425,
9941	[BEND_IDX(-45)] = 0x0425,
9942	[BEND_IDX(-50)] = 0x0525,
9943};
9944
9945/*
9946 * Bend CLKOUT_DP
9947 * steps -50 to 50 inclusive, in steps of 5
9948 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
9949 * change in clock period = -(steps / 10) * 5.787 ps
9950 */
9951static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
9952{
9953	u32 tmp;
9954	int idx = BEND_IDX(steps);
9955
9956	if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
9957		return;
9958
9959	if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
9960		return;
9961
9962	mutex_lock(&dev_priv->sb_lock);
9963
9964	if (steps % 10 != 0)
9965		tmp = 0xAAAAAAAB;
9966	else
9967		tmp = 0x00000000;
9968	intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
9969
9970	tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
9971	tmp &= 0xffff0000;
9972	tmp |= sscdivintphase[idx];
9973	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
9974
9975	mutex_unlock(&dev_priv->sb_lock);
9976}
9977
9978#undef BEND_IDX
9979
9980static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
9981{
9982	u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
9983	u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
9984
9985	if ((ctl & SPLL_PLL_ENABLE) == 0)
9986		return false;
9987
9988	if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
9989	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9990		return true;
9991
9992	if (IS_BROADWELL(dev_priv) &&
9993	    (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
9994		return true;
9995
9996	return false;
9997}
9998
9999static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
10000			       enum intel_dpll_id id)
10001{
10002	u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
10003	u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
10004
10005	if ((ctl & WRPLL_PLL_ENABLE) == 0)
10006		return false;
10007
10008	if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
10009		return true;
10010
10011	if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
10012	    (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
10013	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
10014		return true;
10015
10016	return false;
10017}
10018
10019static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
10020{
10021	struct intel_encoder *encoder;
10022	bool has_fdi = false;
10023
10024	for_each_intel_encoder(&dev_priv->drm, encoder) {
10025		switch (encoder->type) {
10026		case INTEL_OUTPUT_ANALOG:
10027			has_fdi = true;
10028			break;
10029		default:
10030			break;
10031		}
10032	}
10033
10034	/*
10035	 * The BIOS may have decided to use the PCH SSC
10036	 * reference so we must not disable it until the
10037	 * relevant PLLs have stopped relying on it. We'll
10038	 * just leave the PCH SSC reference enabled in case
10039	 * any active PLL is using it. It will get disabled
10040	 * after runtime suspend if we don't have FDI.
10041	 *
10042	 * TODO: Move the whole reference clock handling
10043	 * to the modeset sequence proper so that we can
10044	 * actually enable/disable/reconfigure these things
10045	 * safely. To do that we need to introduce a real
10046	 * clock hierarchy. That would also allow us to do
10047	 * clock bending finally.
10048	 */
10049	dev_priv->pch_ssc_use = 0;
10050
10051	if (spll_uses_pch_ssc(dev_priv)) {
10052		drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
10053		dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
10054	}
10055
10056	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
10057		drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
10058		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
10059	}
10060
10061	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
10062		drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
10063		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
10064	}
10065
10066	if (dev_priv->pch_ssc_use)
10067		return;
10068
10069	if (has_fdi) {
10070		lpt_bend_clkout_dp(dev_priv, 0);
10071		lpt_enable_clkout_dp(dev_priv, true, true);
10072	} else {
10073		lpt_disable_clkout_dp(dev_priv);
10074	}
10075}
10076
10077/*
10078 * Initialize reference clocks when the driver loads
10079 */
10080void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
10081{
10082	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
10083		ilk_init_pch_refclk(dev_priv);
10084	else if (HAS_PCH_LPT(dev_priv))
10085		lpt_init_pch_refclk(dev_priv);
10086}
10087
10088static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
10089{
10090	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10091	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10092	enum pipe pipe = crtc->pipe;
10093	u32 val;
10094
10095	val = 0;
10096
10097	switch (crtc_state->pipe_bpp) {
10098	case 18:
10099		val |= PIPECONF_6BPC;
10100		break;
10101	case 24:
10102		val |= PIPECONF_8BPC;
10103		break;
10104	case 30:
10105		val |= PIPECONF_10BPC;
10106		break;
10107	case 36:
10108		val |= PIPECONF_12BPC;
10109		break;
10110	default:
10111		/* Case prevented by intel_choose_pipe_bpp_dither. */
10112		BUG();
10113	}
10114
10115	if (crtc_state->dither)
10116		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
10117
10118	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
10119		val |= PIPECONF_INTERLACED_ILK;
10120	else
10121		val |= PIPECONF_PROGRESSIVE;
10122
10123	/*
10124	 * This would end up with an odd purple hue over
10125	 * the entire display. Make sure we don't do it.
10126	 */
10127	drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
10128		    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
10129
10130	if (crtc_state->limited_color_range &&
10131	    !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
10132		val |= PIPECONF_COLOR_RANGE_SELECT;
10133
10134	if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
10135		val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
10136
10137	val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
10138
10139	val |= PIPECONF_FRAME_START_DELAY(0);
10140
10141	intel_de_write(dev_priv, PIPECONF(pipe), val);
10142	intel_de_posting_read(dev_priv, PIPECONF(pipe));
10143}
10144
10145static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
10146{
10147	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10148	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10149	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
10150	u32 val = 0;
10151
10152	if (IS_HASWELL(dev_priv) && crtc_state->dither)
10153		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
10154
10155	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
10156		val |= PIPECONF_INTERLACED_ILK;
10157	else
10158		val |= PIPECONF_PROGRESSIVE;
10159
10160	if (IS_HASWELL(dev_priv) &&
10161	    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
10162		val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
10163
10164	intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
10165	intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
10166}
10167
10168static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
10169{
10170	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10171	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10172	u32 val = 0;
10173
10174	switch (crtc_state->pipe_bpp) {
10175	case 18:
10176		val |= PIPEMISC_DITHER_6_BPC;
10177		break;
10178	case 24:
10179		val |= PIPEMISC_DITHER_8_BPC;
10180		break;
10181	case 30:
10182		val |= PIPEMISC_DITHER_10_BPC;
10183		break;
10184	case 36:
10185		val |= PIPEMISC_DITHER_12_BPC;
10186		break;
10187	default:
10188		MISSING_CASE(crtc_state->pipe_bpp);
10189		break;
10190	}
10191
10192	if (crtc_state->dither)
10193		val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
10194
10195	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
10196	    crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
10197		val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
10198
10199	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
10200		val |= PIPEMISC_YUV420_ENABLE |
10201			PIPEMISC_YUV420_MODE_FULL_BLEND;
10202
10203	if (INTEL_GEN(dev_priv) >= 11 &&
10204	    (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
10205					   BIT(PLANE_CURSOR))) == 0)
10206		val |= PIPEMISC_HDR_MODE_PRECISION;
10207
10208	if (INTEL_GEN(dev_priv) >= 12)
10209		val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
10210
10211	intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
10212}
10213
10214int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
10215{
10216	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10217	u32 tmp;
10218
10219	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
10220
10221	switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
10222	case PIPEMISC_DITHER_6_BPC:
10223		return 18;
10224	case PIPEMISC_DITHER_8_BPC:
10225		return 24;
10226	case PIPEMISC_DITHER_10_BPC:
10227		return 30;
10228	case PIPEMISC_DITHER_12_BPC:
10229		return 36;
10230	default:
10231		MISSING_CASE(tmp);
10232		return 0;
10233	}
10234}
10235
10236int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
10237{
10238	/*
10239	 * Account for spread spectrum to avoid
10240	 * oversubscribing the link. Max center spread
10241	 * is 2.5%; use 5% for safety's sake.
10242	 */
10243	u32 bps = target_clock * bpp * 21 / 20;
10244	return DIV_ROUND_UP(bps, link_bw * 8);
10245}
10246
10247static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor)
10248{
10249	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
10250}
10251
10252static void ilk_compute_dpll(struct intel_crtc *crtc,
10253			     struct intel_crtc_state *crtc_state,
10254			     struct dpll *reduced_clock)
10255{
10256	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10257	u32 dpll, fp, fp2;
10258	int factor;
10259
10260	/* Enable autotuning of the PLL clock (if permissible) */
10261	factor = 21;
10262	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
10263		if ((intel_panel_use_ssc(dev_priv) &&
10264		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
10265		    (HAS_PCH_IBX(dev_priv) &&
10266		     intel_is_dual_link_lvds(dev_priv)))
10267			factor = 25;
10268	} else if (crtc_state->sdvo_tv_clock) {
10269		factor = 20;
10270	}
10271
10272	fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
10273
10274	if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor))
10275		fp |= FP_CB_TUNE;
10276
10277	if (reduced_clock) {
10278		fp2 = i9xx_dpll_compute_fp(reduced_clock);
10279
10280		if (reduced_clock->m < factor * reduced_clock->n)
10281			fp2 |= FP_CB_TUNE;
10282	} else {
10283		fp2 = fp;
10284	}
10285
10286	dpll = 0;
10287
10288	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
10289		dpll |= DPLLB_MODE_LVDS;
10290	else
10291		dpll |= DPLLB_MODE_DAC_SERIAL;
10292
10293	dpll |= (crtc_state->pixel_multiplier - 1)
10294		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
10295
10296	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
10297	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
10298		dpll |= DPLL_SDVO_HIGH_SPEED;
10299
10300	if (intel_crtc_has_dp_encoder(crtc_state))
10301		dpll |= DPLL_SDVO_HIGH_SPEED;
10302
10303	/*
10304	 * The high speed IO clock is only really required for
10305	 * SDVO/HDMI/DP, but we also enable it for CRT to make it
10306	 * possible to share the DPLL between CRT and HDMI. Enabling
10307	 * the clock needlessly does no real harm, except use up a
10308	 * bit of power potentially.
10309	 *
10310	 * We'll limit this to IVB with 3 pipes, since it has only two
10311	 * DPLLs and so DPLL sharing is the only way to get three pipes
10312	 * driving PCH ports at the same time. On SNB we could do this,
10313	 * and potentially avoid enabling the second DPLL, but it's not
10314	 * clear if it''s a win or loss power wise. No point in doing
10315	 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
10316	 */
10317	if (INTEL_NUM_PIPES(dev_priv) == 3 &&
10318	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
10319		dpll |= DPLL_SDVO_HIGH_SPEED;
10320
10321	/* compute bitmask from p1 value */
10322	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
10323	/* also FPA1 */
10324	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
10325
10326	switch (crtc_state->dpll.p2) {
10327	case 5:
10328		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
10329		break;
10330	case 7:
10331		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
10332		break;
10333	case 10:
10334		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
10335		break;
10336	case 14:
10337		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
10338		break;
10339	}
10340
10341	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
10342	    intel_panel_use_ssc(dev_priv))
10343		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
10344	else
10345		dpll |= PLL_REF_INPUT_DREFCLK;
10346
10347	dpll |= DPLL_VCO_ENABLE;
10348
10349	crtc_state->dpll_hw_state.dpll = dpll;
10350	crtc_state->dpll_hw_state.fp0 = fp;
10351	crtc_state->dpll_hw_state.fp1 = fp2;
10352}
10353
10354static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
10355				  struct intel_crtc_state *crtc_state)
10356{
10357	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10358	struct intel_atomic_state *state =
10359		to_intel_atomic_state(crtc_state->uapi.state);
10360	const struct intel_limit *limit;
10361	int refclk = 120000;
10362
10363	memset(&crtc_state->dpll_hw_state, 0,
10364	       sizeof(crtc_state->dpll_hw_state));
10365
10366	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
10367	if (!crtc_state->has_pch_encoder)
10368		return 0;
10369
10370	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
10371		if (intel_panel_use_ssc(dev_priv)) {
10372			drm_dbg_kms(&dev_priv->drm,
10373				    "using SSC reference clock of %d kHz\n",
10374				    dev_priv->vbt.lvds_ssc_freq);
10375			refclk = dev_priv->vbt.lvds_ssc_freq;
10376		}
10377
10378		if (intel_is_dual_link_lvds(dev_priv)) {
10379			if (refclk == 100000)
10380				limit = &ilk_limits_dual_lvds_100m;
10381			else
10382				limit = &ilk_limits_dual_lvds;
10383		} else {
10384			if (refclk == 100000)
10385				limit = &ilk_limits_single_lvds_100m;
10386			else
10387				limit = &ilk_limits_single_lvds;
10388		}
10389	} else {
10390		limit = &ilk_limits_dac;
10391	}
10392
10393	if (!crtc_state->clock_set &&
10394	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
10395				refclk, NULL, &crtc_state->dpll)) {
10396		drm_err(&dev_priv->drm,
10397			"Couldn't find PLL settings for mode!\n");
10398		return -EINVAL;
10399	}
10400
10401	ilk_compute_dpll(crtc, crtc_state, NULL);
10402
10403	if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
10404		drm_dbg_kms(&dev_priv->drm,
10405			    "failed to find PLL for pipe %c\n",
10406			    pipe_name(crtc->pipe));
10407		return -EINVAL;
10408	}
10409
10410	return 0;
10411}
10412
10413static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
10414					 struct intel_link_m_n *m_n)
10415{
10416	struct drm_device *dev = crtc->base.dev;
10417	struct drm_i915_private *dev_priv = to_i915(dev);
10418	enum pipe pipe = crtc->pipe;
10419
10420	m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
10421	m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
10422	m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
10423		& ~TU_SIZE_MASK;
10424	m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
10425	m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
10426		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10427}
10428
10429static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
10430					 enum transcoder transcoder,
10431					 struct intel_link_m_n *m_n,
10432					 struct intel_link_m_n *m2_n2)
10433{
10434	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10435	enum pipe pipe = crtc->pipe;
10436
10437	if (INTEL_GEN(dev_priv) >= 5) {
10438		m_n->link_m = intel_de_read(dev_priv,
10439					    PIPE_LINK_M1(transcoder));
10440		m_n->link_n = intel_de_read(dev_priv,
10441					    PIPE_LINK_N1(transcoder));
10442		m_n->gmch_m = intel_de_read(dev_priv,
10443					    PIPE_DATA_M1(transcoder))
10444			& ~TU_SIZE_MASK;
10445		m_n->gmch_n = intel_de_read(dev_priv,
10446					    PIPE_DATA_N1(transcoder));
10447		m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
10448			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10449
10450		if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
10451			m2_n2->link_m = intel_de_read(dev_priv,
10452						      PIPE_LINK_M2(transcoder));
10453			m2_n2->link_n =	intel_de_read(dev_priv,
10454							     PIPE_LINK_N2(transcoder));
10455			m2_n2->gmch_m =	intel_de_read(dev_priv,
10456							     PIPE_DATA_M2(transcoder))
10457					& ~TU_SIZE_MASK;
10458			m2_n2->gmch_n =	intel_de_read(dev_priv,
10459							     PIPE_DATA_N2(transcoder));
10460			m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
10461					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10462		}
10463	} else {
10464		m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
10465		m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
10466		m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
10467			& ~TU_SIZE_MASK;
10468		m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
10469		m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
10470			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10471	}
10472}
10473
10474void intel_dp_get_m_n(struct intel_crtc *crtc,
10475		      struct intel_crtc_state *pipe_config)
10476{
10477	if (pipe_config->has_pch_encoder)
10478		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
10479	else
10480		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
10481					     &pipe_config->dp_m_n,
10482					     &pipe_config->dp_m2_n2);
10483}
10484
10485static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
10486				   struct intel_crtc_state *pipe_config)
10487{
10488	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
10489				     &pipe_config->fdi_m_n, NULL);
10490}
10491
10492static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
10493				  u32 pos, u32 size)
10494{
10495	drm_rect_init(&crtc_state->pch_pfit.dst,
10496		      pos >> 16, pos & 0xffff,
10497		      size >> 16, size & 0xffff);
10498}
10499
10500static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
10501{
10502	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10503	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10504	struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
10505	int id = -1;
10506	int i;
10507
10508	/* find scaler attached to this pipe */
10509	for (i = 0; i < crtc->num_scalers; i++) {
10510		u32 ctl, pos, size;
10511
10512		ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
10513		if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
10514			continue;
10515
10516		id = i;
10517		crtc_state->pch_pfit.enabled = true;
10518
10519		pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
10520		size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
10521
10522		ilk_get_pfit_pos_size(crtc_state, pos, size);
10523
10524		scaler_state->scalers[i].in_use = true;
10525		break;
10526	}
10527
10528	scaler_state->scaler_id = id;
10529	if (id >= 0)
10530		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
10531	else
10532		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
10533}
10534
10535static void
10536skl_get_initial_plane_config(struct intel_crtc *crtc,
10537			     struct intel_initial_plane_config *plane_config)
10538{
10539	struct drm_device *dev = crtc->base.dev;
10540	struct drm_i915_private *dev_priv = to_i915(dev);
10541	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
10542	enum plane_id plane_id = plane->id;
10543	enum pipe pipe;
10544	u32 val, base, offset, stride_mult, tiling, alpha;
10545	int fourcc, pixel_format;
10546	unsigned int aligned_height;
10547	struct drm_framebuffer *fb;
10548	struct intel_framebuffer *intel_fb;
10549
10550	if (!plane->get_hw_state(plane, &pipe))
10551		return;
10552
10553	drm_WARN_ON(dev, pipe != crtc->pipe);
10554
10555	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10556	if (!intel_fb) {
10557		drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
10558		return;
10559	}
10560
10561	fb = &intel_fb->base;
10562
10563	fb->dev = dev;
10564
10565	val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id));
10566
10567	if (INTEL_GEN(dev_priv) >= 11)
10568		pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
10569	else
10570		pixel_format = val & PLANE_CTL_FORMAT_MASK;
10571
10572	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
10573		alpha = intel_de_read(dev_priv,
10574				      PLANE_COLOR_CTL(pipe, plane_id));
10575		alpha &= PLANE_COLOR_ALPHA_MASK;
10576	} else {
10577		alpha = val & PLANE_CTL_ALPHA_MASK;
10578	}
10579
10580	fourcc = skl_format_to_fourcc(pixel_format,
10581				      val & PLANE_CTL_ORDER_RGBX, alpha);
10582	fb->format = drm_format_info(fourcc);
10583
10584	tiling = val & PLANE_CTL_TILED_MASK;
10585	switch (tiling) {
10586	case PLANE_CTL_TILED_LINEAR:
10587		fb->modifier = DRM_FORMAT_MOD_LINEAR;
10588		break;
10589	case PLANE_CTL_TILED_X:
10590		plane_config->tiling = I915_TILING_X;
10591		fb->modifier = I915_FORMAT_MOD_X_TILED;
10592		break;
10593	case PLANE_CTL_TILED_Y:
10594		plane_config->tiling = I915_TILING_Y;
10595		if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10596			fb->modifier = INTEL_GEN(dev_priv) >= 12 ?
10597				I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS :
10598				I915_FORMAT_MOD_Y_TILED_CCS;
10599		else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE)
10600			fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
10601		else
10602			fb->modifier = I915_FORMAT_MOD_Y_TILED;
10603		break;
10604	case PLANE_CTL_TILED_YF:
10605		if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10606			fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
10607		else
10608			fb->modifier = I915_FORMAT_MOD_Yf_TILED;
10609		break;
10610	default:
10611		MISSING_CASE(tiling);
10612		goto error;
10613	}
10614
10615	/*
10616	 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
10617	 * while i915 HW rotation is clockwise, thats why this swapping.
10618	 */
10619	switch (val & PLANE_CTL_ROTATE_MASK) {
10620	case PLANE_CTL_ROTATE_0:
10621		plane_config->rotation = DRM_MODE_ROTATE_0;
10622		break;
10623	case PLANE_CTL_ROTATE_90:
10624		plane_config->rotation = DRM_MODE_ROTATE_270;
10625		break;
10626	case PLANE_CTL_ROTATE_180:
10627		plane_config->rotation = DRM_MODE_ROTATE_180;
10628		break;
10629	case PLANE_CTL_ROTATE_270:
10630		plane_config->rotation = DRM_MODE_ROTATE_90;
10631		break;
10632	}
10633
10634	if (INTEL_GEN(dev_priv) >= 10 &&
10635	    val & PLANE_CTL_FLIP_HORIZONTAL)
10636		plane_config->rotation |= DRM_MODE_REFLECT_X;
10637
10638	/* 90/270 degree rotation would require extra work */
10639	if (drm_rotation_90_or_270(plane_config->rotation))
10640		goto error;
10641
10642	base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000;
10643	plane_config->base = base;
10644
10645	offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id));
10646
10647	val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id));
10648	fb->height = ((val >> 16) & 0xffff) + 1;
10649	fb->width = ((val >> 0) & 0xffff) + 1;
10650
10651	val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id));
10652	stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
10653	fb->pitches[0] = (val & 0x3ff) * stride_mult;
10654
10655	aligned_height = intel_fb_align_height(fb, 0, fb->height);
10656
10657	plane_config->size = fb->pitches[0] * aligned_height;
10658
10659	drm_dbg_kms(&dev_priv->drm,
10660		    "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
10661		    crtc->base.name, plane->base.name, fb->width, fb->height,
10662		    fb->format->cpp[0] * 8, base, fb->pitches[0],
10663		    plane_config->size);
10664
10665	plane_config->fb = intel_fb;
10666	return;
10667
10668error:
10669	kfree(intel_fb);
10670}
10671
10672static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
10673{
10674	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10675	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10676	u32 ctl, pos, size;
10677
10678	ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
10679	if ((ctl & PF_ENABLE) == 0)
10680		return;
10681
10682	crtc_state->pch_pfit.enabled = true;
10683
10684	pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
10685	size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
10686
10687	ilk_get_pfit_pos_size(crtc_state, pos, size);
10688
10689	/*
10690	 * We currently do not free assignements of panel fitters on
10691	 * ivb/hsw (since we don't use the higher upscaling modes which
10692	 * differentiates them) so just WARN about this case for now.
10693	 */
10694	drm_WARN_ON(&dev_priv->drm, IS_GEN(dev_priv, 7) &&
10695		    (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
10696}
10697
10698static bool ilk_get_pipe_config(struct intel_crtc *crtc,
10699				struct intel_crtc_state *pipe_config)
10700{
10701	struct drm_device *dev = crtc->base.dev;
10702	struct drm_i915_private *dev_priv = to_i915(dev);
10703	enum intel_display_power_domain power_domain;
10704	intel_wakeref_t wakeref;
10705	u32 tmp;
10706	bool ret;
10707
10708	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10709	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10710	if (!wakeref)
10711		return false;
10712
10713	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10714	pipe_config->shared_dpll = NULL;
10715
10716	ret = false;
10717	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
10718	if (!(tmp & PIPECONF_ENABLE))
10719		goto out;
10720
10721	switch (tmp & PIPECONF_BPC_MASK) {
10722	case PIPECONF_6BPC:
10723		pipe_config->pipe_bpp = 18;
10724		break;
10725	case PIPECONF_8BPC:
10726		pipe_config->pipe_bpp = 24;
10727		break;
10728	case PIPECONF_10BPC:
10729		pipe_config->pipe_bpp = 30;
10730		break;
10731	case PIPECONF_12BPC:
10732		pipe_config->pipe_bpp = 36;
10733		break;
10734	default:
10735		break;
10736	}
10737
10738	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
10739		pipe_config->limited_color_range = true;
10740
10741	switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
10742	case PIPECONF_OUTPUT_COLORSPACE_YUV601:
10743	case PIPECONF_OUTPUT_COLORSPACE_YUV709:
10744		pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10745		break;
10746	default:
10747		pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10748		break;
10749	}
10750
10751	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
10752		PIPECONF_GAMMA_MODE_SHIFT;
10753
10754	pipe_config->csc_mode = intel_de_read(dev_priv,
10755					      PIPE_CSC_MODE(crtc->pipe));
10756
10757	i9xx_get_pipe_color_config(pipe_config);
10758	intel_color_get_config(pipe_config);
10759
10760	if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
10761		struct intel_shared_dpll *pll;
10762		enum intel_dpll_id pll_id;
10763
10764		pipe_config->has_pch_encoder = true;
10765
10766		tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
10767		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10768					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
10769
10770		ilk_get_fdi_m_n_config(crtc, pipe_config);
10771
10772		if (HAS_PCH_IBX(dev_priv)) {
10773			/*
10774			 * The pipe->pch transcoder and pch transcoder->pll
10775			 * mapping is fixed.
10776			 */
10777			pll_id = (enum intel_dpll_id) crtc->pipe;
10778		} else {
10779			tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
10780			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
10781				pll_id = DPLL_ID_PCH_PLL_B;
10782			else
10783				pll_id= DPLL_ID_PCH_PLL_A;
10784		}
10785
10786		pipe_config->shared_dpll =
10787			intel_get_shared_dpll_by_id(dev_priv, pll_id);
10788		pll = pipe_config->shared_dpll;
10789
10790		drm_WARN_ON(dev, !pll->info->funcs->get_hw_state(dev_priv, pll,
10791						 &pipe_config->dpll_hw_state));
10792
10793		tmp = pipe_config->dpll_hw_state.dpll;
10794		pipe_config->pixel_multiplier =
10795			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
10796			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
10797
10798		ilk_pch_clock_get(crtc, pipe_config);
10799	} else {
10800		pipe_config->pixel_multiplier = 1;
10801	}
10802
10803	intel_get_pipe_timings(crtc, pipe_config);
10804	intel_get_pipe_src_size(crtc, pipe_config);
10805
10806	ilk_get_pfit_config(pipe_config);
10807
10808	ret = true;
10809
10810out:
10811	intel_display_power_put(dev_priv, power_domain, wakeref);
10812
10813	return ret;
10814}
10815
10816static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
10817				  struct intel_crtc_state *crtc_state)
10818{
10819	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10820	struct intel_atomic_state *state =
10821		to_intel_atomic_state(crtc_state->uapi.state);
10822
10823	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
10824	    INTEL_GEN(dev_priv) >= 11) {
10825		struct intel_encoder *encoder =
10826			intel_get_crtc_new_encoder(state, crtc_state);
10827
10828		if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
10829			drm_dbg_kms(&dev_priv->drm,
10830				    "failed to find PLL for pipe %c\n",
10831				    pipe_name(crtc->pipe));
10832			return -EINVAL;
10833		}
10834	}
10835
10836	return 0;
10837}
10838
10839static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10840			    struct intel_crtc_state *pipe_config)
10841{
10842	enum intel_dpll_id id;
10843	u32 temp;
10844
10845	temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
10846	id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
10847
10848	if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL2))
10849		return;
10850
10851	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10852}
10853
10854static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10855			    struct intel_crtc_state *pipe_config)
10856{
10857	enum phy phy = intel_port_to_phy(dev_priv, port);
10858	enum icl_port_dpll_id port_dpll_id;
10859	enum intel_dpll_id id;
10860	u32 temp;
10861
10862	if (intel_phy_is_combo(dev_priv, phy)) {
10863		u32 mask, shift;
10864
10865		if (IS_ROCKETLAKE(dev_priv)) {
10866			mask = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
10867			shift = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
10868		} else {
10869			mask = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
10870			shift = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
10871		}
10872
10873		temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) & mask;
10874		id = temp >> shift;
10875		port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10876	} else if (intel_phy_is_tc(dev_priv, phy)) {
10877		u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
10878
10879		if (clk_sel == DDI_CLK_SEL_MG) {
10880			id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
10881								    port));
10882			port_dpll_id = ICL_PORT_DPLL_MG_PHY;
10883		} else {
10884			drm_WARN_ON(&dev_priv->drm,
10885				    clk_sel < DDI_CLK_SEL_TBT_162);
10886			id = DPLL_ID_ICL_TBTPLL;
10887			port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10888		}
10889	} else {
10890		drm_WARN(&dev_priv->drm, 1, "Invalid port %x\n", port);
10891		return;
10892	}
10893
10894	pipe_config->icl_port_dplls[port_dpll_id].pll =
10895		intel_get_shared_dpll_by_id(dev_priv, id);
10896
10897	icl_set_active_port_dpll(pipe_config, port_dpll_id);
10898}
10899
10900static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
10901				enum port port,
10902				struct intel_crtc_state *pipe_config)
10903{
10904	enum intel_dpll_id id;
10905
10906	switch (port) {
10907	case PORT_A:
10908		id = DPLL_ID_SKL_DPLL0;
10909		break;
10910	case PORT_B:
10911		id = DPLL_ID_SKL_DPLL1;
10912		break;
10913	case PORT_C:
10914		id = DPLL_ID_SKL_DPLL2;
10915		break;
10916	default:
10917		drm_err(&dev_priv->drm, "Incorrect port type\n");
10918		return;
10919	}
10920
10921	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10922}
10923
10924static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10925			    struct intel_crtc_state *pipe_config)
10926{
10927	enum intel_dpll_id id;
10928	u32 temp;
10929
10930	temp = intel_de_read(dev_priv, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
10931	id = temp >> (port * 3 + 1);
10932
10933	if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL3))
10934		return;
10935
10936	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10937}
10938
10939static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10940			    struct intel_crtc_state *pipe_config)
10941{
10942	enum intel_dpll_id id;
10943	u32 ddi_pll_sel = intel_de_read(dev_priv, PORT_CLK_SEL(port));
10944
10945	switch (ddi_pll_sel) {
10946	case PORT_CLK_SEL_WRPLL1:
10947		id = DPLL_ID_WRPLL1;
10948		break;
10949	case PORT_CLK_SEL_WRPLL2:
10950		id = DPLL_ID_WRPLL2;
10951		break;
10952	case PORT_CLK_SEL_SPLL:
10953		id = DPLL_ID_SPLL;
10954		break;
10955	case PORT_CLK_SEL_LCPLL_810:
10956		id = DPLL_ID_LCPLL_810;
10957		break;
10958	case PORT_CLK_SEL_LCPLL_1350:
10959		id = DPLL_ID_LCPLL_1350;
10960		break;
10961	case PORT_CLK_SEL_LCPLL_2700:
10962		id = DPLL_ID_LCPLL_2700;
10963		break;
10964	default:
10965		MISSING_CASE(ddi_pll_sel);
10966		fallthrough;
10967	case PORT_CLK_SEL_NONE:
10968		return;
10969	}
10970
10971	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10972}
10973
10974static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
10975				     struct intel_crtc_state *pipe_config,
10976				     u64 *power_domain_mask,
10977				     intel_wakeref_t *wakerefs)
10978{
10979	struct drm_device *dev = crtc->base.dev;
10980	struct drm_i915_private *dev_priv = to_i915(dev);
10981	enum intel_display_power_domain power_domain;
10982	unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
10983	unsigned long enabled_panel_transcoders = 0;
10984	enum transcoder panel_transcoder;
10985	intel_wakeref_t wf;
10986	u32 tmp;
10987
10988	if (INTEL_GEN(dev_priv) >= 11)
10989		panel_transcoder_mask |=
10990			BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
10991
10992	/*
10993	 * The pipe->transcoder mapping is fixed with the exception of the eDP
10994	 * and DSI transcoders handled below.
10995	 */
10996	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10997
10998	/*
10999	 * XXX: Do intel_display_power_get_if_enabled before reading this (for
11000	 * consistency and less surprising code; it's in always on power).
11001	 */
11002	for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
11003				       panel_transcoder_mask) {
11004		bool force_thru = false;
11005		enum pipe trans_pipe;
11006
11007		tmp = intel_de_read(dev_priv,
11008				    TRANS_DDI_FUNC_CTL(panel_transcoder));
11009		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
11010			continue;
11011
11012		/*
11013		 * Log all enabled ones, only use the first one.
11014		 *
11015		 * FIXME: This won't work for two separate DSI displays.
11016		 */
11017		enabled_panel_transcoders |= BIT(panel_transcoder);
11018		if (enabled_panel_transcoders != BIT(panel_transcoder))
11019			continue;
11020
11021		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
11022		default:
11023			drm_WARN(dev, 1,
11024				 "unknown pipe linked to transcoder %s\n",
11025				 transcoder_name(panel_transcoder));
11026			fallthrough;
11027		case TRANS_DDI_EDP_INPUT_A_ONOFF:
11028			force_thru = true;
11029			fallthrough;
11030		case TRANS_DDI_EDP_INPUT_A_ON:
11031			trans_pipe = PIPE_A;
11032			break;
11033		case TRANS_DDI_EDP_INPUT_B_ONOFF:
11034			trans_pipe = PIPE_B;
11035			break;
11036		case TRANS_DDI_EDP_INPUT_C_ONOFF:
11037			trans_pipe = PIPE_C;
11038			break;
11039		case TRANS_DDI_EDP_INPUT_D_ONOFF:
11040			trans_pipe = PIPE_D;
11041			break;
11042		}
11043
11044		if (trans_pipe == crtc->pipe) {
11045			pipe_config->cpu_transcoder = panel_transcoder;
11046			pipe_config->pch_pfit.force_thru = force_thru;
11047		}
11048	}
11049
11050	/*
11051	 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
11052	 */
11053	drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
11054		    enabled_panel_transcoders != BIT(TRANSCODER_EDP));
11055
11056	power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
11057	drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
11058
11059	wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
11060	if (!wf)
11061		return false;
11062
11063	wakerefs[power_domain] = wf;
11064	*power_domain_mask |= BIT_ULL(power_domain);
11065
11066	tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
11067
11068	return tmp & PIPECONF_ENABLE;
11069}
11070
11071static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
11072					 struct intel_crtc_state *pipe_config,
11073					 u64 *power_domain_mask,
11074					 intel_wakeref_t *wakerefs)
11075{
11076	struct drm_device *dev = crtc->base.dev;
11077	struct drm_i915_private *dev_priv = to_i915(dev);
11078	enum intel_display_power_domain power_domain;
11079	enum transcoder cpu_transcoder;
11080	intel_wakeref_t wf;
11081	enum port port;
11082	u32 tmp;
11083
11084	for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
11085		if (port == PORT_A)
11086			cpu_transcoder = TRANSCODER_DSI_A;
11087		else
11088			cpu_transcoder = TRANSCODER_DSI_C;
11089
11090		power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
11091		drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
11092
11093		wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
11094		if (!wf)
11095			continue;
11096
11097		wakerefs[power_domain] = wf;
11098		*power_domain_mask |= BIT_ULL(power_domain);
11099
11100		/*
11101		 * The PLL needs to be enabled with a valid divider
11102		 * configuration, otherwise accessing DSI registers will hang
11103		 * the machine. See BSpec North Display Engine
11104		 * registers/MIPI[BXT]. We can break out here early, since we
11105		 * need the same DSI PLL to be enabled for both DSI ports.
11106		 */
11107		if (!bxt_dsi_pll_is_enabled(dev_priv))
11108			break;
11109
11110		/* XXX: this works for video mode only */
11111		tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
11112		if (!(tmp & DPI_ENABLE))
11113			continue;
11114
11115		tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
11116		if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
11117			continue;
11118
11119		pipe_config->cpu_transcoder = cpu_transcoder;
11120		break;
11121	}
11122
11123	return transcoder_is_dsi(pipe_config->cpu_transcoder);
11124}
11125
11126static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
11127				   struct intel_crtc_state *pipe_config)
11128{
11129	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11130	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
11131	struct intel_shared_dpll *pll;
11132	enum port port;
11133	u32 tmp;
11134
11135	if (transcoder_is_dsi(cpu_transcoder)) {
11136		port = (cpu_transcoder == TRANSCODER_DSI_A) ?
11137						PORT_A : PORT_B;
11138	} else {
11139		tmp = intel_de_read(dev_priv,
11140				    TRANS_DDI_FUNC_CTL(cpu_transcoder));
11141		if (INTEL_GEN(dev_priv) >= 12)
11142			port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
11143		else
11144			port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
11145	}
11146
11147	if (INTEL_GEN(dev_priv) >= 11)
11148		icl_get_ddi_pll(dev_priv, port, pipe_config);
11149	else if (IS_CANNONLAKE(dev_priv))
11150		cnl_get_ddi_pll(dev_priv, port, pipe_config);
11151	else if (IS_GEN9_BC(dev_priv))
11152		skl_get_ddi_pll(dev_priv, port, pipe_config);
11153	else if (IS_GEN9_LP(dev_priv))
11154		bxt_get_ddi_pll(dev_priv, port, pipe_config);
11155	else
11156		hsw_get_ddi_pll(dev_priv, port, pipe_config);
11157
11158	pll = pipe_config->shared_dpll;
11159	if (pll) {
11160		drm_WARN_ON(&dev_priv->drm,
11161			    !pll->info->funcs->get_hw_state(dev_priv, pll,
11162						&pipe_config->dpll_hw_state));
11163	}
11164
11165	/*
11166	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
11167	 * DDI E. So just check whether this pipe is wired to DDI E and whether
11168	 * the PCH transcoder is on.
11169	 */
11170	if (INTEL_GEN(dev_priv) < 9 &&
11171	    (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
11172		pipe_config->has_pch_encoder = true;
11173
11174		tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
11175		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
11176					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
11177
11178		ilk_get_fdi_m_n_config(crtc, pipe_config);
11179	}
11180}
11181
11182static bool hsw_get_pipe_config(struct intel_crtc *crtc,
11183				struct intel_crtc_state *pipe_config)
11184{
11185	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11186	intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
11187	enum intel_display_power_domain power_domain;
11188	u64 power_domain_mask;
11189	bool active;
11190	u32 tmp;
11191
11192	pipe_config->master_transcoder = INVALID_TRANSCODER;
11193
11194	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
11195	wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
11196	if (!wf)
11197		return false;
11198
11199	wakerefs[power_domain] = wf;
11200	power_domain_mask = BIT_ULL(power_domain);
11201
11202	pipe_config->shared_dpll = NULL;
11203
11204	active = hsw_get_transcoder_state(crtc, pipe_config,
11205					  &power_domain_mask, wakerefs);
11206
11207	if (IS_GEN9_LP(dev_priv) &&
11208	    bxt_get_dsi_transcoder_state(crtc, pipe_config,
11209					 &power_domain_mask, wakerefs)) {
11210		drm_WARN_ON(&dev_priv->drm, active);
11211		active = true;
11212	}
11213
11214	if (!active)
11215		goto out;
11216
11217	if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
11218	    INTEL_GEN(dev_priv) >= 11) {
11219		hsw_get_ddi_port_state(crtc, pipe_config);
11220		intel_get_pipe_timings(crtc, pipe_config);
11221	}
11222
11223	intel_get_pipe_src_size(crtc, pipe_config);
11224
11225	if (IS_HASWELL(dev_priv)) {
11226		u32 tmp = intel_de_read(dev_priv,
11227					PIPECONF(pipe_config->cpu_transcoder));
11228
11229		if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
11230			pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
11231		else
11232			pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
11233	} else {
11234		pipe_config->output_format =
11235			bdw_get_pipemisc_output_format(crtc);
11236
11237		/*
11238		 * Currently there is no interface defined to
11239		 * check user preference between RGB/YCBCR444
11240		 * or YCBCR420. So the only possible case for
11241		 * YCBCR444 usage is driving YCBCR420 output
11242		 * with LSPCON, when pipe is configured for
11243		 * YCBCR444 output and LSPCON takes care of
11244		 * downsampling it.
11245		 */
11246		pipe_config->lspcon_downsampling =
11247			pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444;
11248	}
11249
11250	pipe_config->gamma_mode = intel_de_read(dev_priv,
11251						GAMMA_MODE(crtc->pipe));
11252
11253	pipe_config->csc_mode = intel_de_read(dev_priv,
11254					      PIPE_CSC_MODE(crtc->pipe));
11255
11256	if (INTEL_GEN(dev_priv) >= 9) {
11257		tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
11258
11259		if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
11260			pipe_config->gamma_enable = true;
11261
11262		if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
11263			pipe_config->csc_enable = true;
11264	} else {
11265		i9xx_get_pipe_color_config(pipe_config);
11266	}
11267
11268	intel_color_get_config(pipe_config);
11269
11270	tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
11271	pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
11272	if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
11273		pipe_config->ips_linetime =
11274			REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
11275
11276	power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
11277	drm_WARN_ON(&dev_priv->drm, power_domain_mask & BIT_ULL(power_domain));
11278
11279	wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
11280	if (wf) {
11281		wakerefs[power_domain] = wf;
11282		power_domain_mask |= BIT_ULL(power_domain);
11283
11284		if (INTEL_GEN(dev_priv) >= 9)
11285			skl_get_pfit_config(pipe_config);
11286		else
11287			ilk_get_pfit_config(pipe_config);
11288	}
11289
11290	if (hsw_crtc_supports_ips(crtc)) {
11291		if (IS_HASWELL(dev_priv))
11292			pipe_config->ips_enabled = intel_de_read(dev_priv,
11293								 IPS_CTL) & IPS_ENABLE;
11294		else {
11295			/*
11296			 * We cannot readout IPS state on broadwell, set to
11297			 * true so we can set it to a defined state on first
11298			 * commit.
11299			 */
11300			pipe_config->ips_enabled = true;
11301		}
11302	}
11303
11304	if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
11305	    !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
11306		pipe_config->pixel_multiplier =
11307			intel_de_read(dev_priv,
11308				      PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
11309	} else {
11310		pipe_config->pixel_multiplier = 1;
11311	}
11312
11313out:
11314	for_each_power_domain(power_domain, power_domain_mask)
11315		intel_display_power_put(dev_priv,
11316					power_domain, wakerefs[power_domain]);
11317
11318	return active;
11319}
11320
11321static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
11322{
11323	struct drm_i915_private *dev_priv =
11324		to_i915(plane_state->uapi.plane->dev);
11325	const struct drm_framebuffer *fb = plane_state->hw.fb;
11326	const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11327	u32 base;
11328
11329	if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
11330		base = sg_dma_address(obj->mm.pages->sgl);
11331	else
11332		base = intel_plane_ggtt_offset(plane_state);
11333
11334	return base + plane_state->color_plane[0].offset;
11335}
11336
11337static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
11338{
11339	int x = plane_state->uapi.dst.x1;
11340	int y = plane_state->uapi.dst.y1;
11341	u32 pos = 0;
11342
11343	if (x < 0) {
11344		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
11345		x = -x;
11346	}
11347	pos |= x << CURSOR_X_SHIFT;
11348
11349	if (y < 0) {
11350		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
11351		y = -y;
11352	}
11353	pos |= y << CURSOR_Y_SHIFT;
11354
11355	return pos;
11356}
11357
11358static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
11359{
11360	const struct drm_mode_config *config =
11361		&plane_state->uapi.plane->dev->mode_config;
11362	int width = drm_rect_width(&plane_state->uapi.dst);
11363	int height = drm_rect_height(&plane_state->uapi.dst);
11364
11365	return width > 0 && width <= config->cursor_width &&
11366		height > 0 && height <= config->cursor_height;
11367}
11368
11369static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
11370{
11371	struct drm_i915_private *dev_priv =
11372		to_i915(plane_state->uapi.plane->dev);
11373	unsigned int rotation = plane_state->hw.rotation;
11374	int src_x, src_y;
11375	u32 offset;
11376	int ret;
11377
11378	ret = intel_plane_compute_gtt(plane_state);
11379	if (ret)
11380		return ret;
11381
11382	if (!plane_state->uapi.visible)
11383		return 0;
11384
11385	src_x = plane_state->uapi.src.x1 >> 16;
11386	src_y = plane_state->uapi.src.y1 >> 16;
11387
11388	intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
11389	offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
11390						    plane_state, 0);
11391
11392	if (src_x != 0 || src_y != 0) {
11393		drm_dbg_kms(&dev_priv->drm,
11394			    "Arbitrary cursor panning not supported\n");
11395		return -EINVAL;
11396	}
11397
11398	/*
11399	 * Put the final coordinates back so that the src
11400	 * coordinate checks will see the right values.
11401	 */
11402	drm_rect_translate_to(&plane_state->uapi.src,
11403			      src_x << 16, src_y << 16);
11404
11405	/* ILK+ do this automagically in hardware */
11406	if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
11407		const struct drm_framebuffer *fb = plane_state->hw.fb;
11408		int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
11409		int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
11410
11411		offset += (src_h * src_w - 1) * fb->format->cpp[0];
11412	}
11413
11414	plane_state->color_plane[0].offset = offset;
11415	plane_state->color_plane[0].x = src_x;
11416	plane_state->color_plane[0].y = src_y;
11417
11418	return 0;
11419}
11420
11421static int intel_check_cursor(struct intel_crtc_state *crtc_state,
11422			      struct intel_plane_state *plane_state)
11423{
11424	const struct drm_framebuffer *fb = plane_state->hw.fb;
11425	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
11426	int ret;
11427
11428	if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
11429		drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n");
11430		return -EINVAL;
11431	}
11432
11433	ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
11434						  &crtc_state->uapi,
11435						  DRM_PLANE_HELPER_NO_SCALING,
11436						  DRM_PLANE_HELPER_NO_SCALING,
11437						  true, true);
11438	if (ret)
11439		return ret;
11440
11441	/* Use the unclipped src/dst rectangles, which we program to hw */
11442	plane_state->uapi.src = drm_plane_state_src(&plane_state->uapi);
11443	plane_state->uapi.dst = drm_plane_state_dest(&plane_state->uapi);
11444
11445	ret = intel_cursor_check_surface(plane_state);
11446	if (ret)
11447		return ret;
11448
11449	if (!plane_state->uapi.visible)
11450		return 0;
11451
11452	ret = intel_plane_check_src_coordinates(plane_state);
11453	if (ret)
11454		return ret;
11455
11456	return 0;
11457}
11458
11459static unsigned int
11460i845_cursor_max_stride(struct intel_plane *plane,
11461		       u32 pixel_format, u64 modifier,
11462		       unsigned int rotation)
11463{
11464	return 2048;
11465}
11466
11467static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
11468{
11469	u32 cntl = 0;
11470
11471	if (crtc_state->gamma_enable)
11472		cntl |= CURSOR_GAMMA_ENABLE;
11473
11474	return cntl;
11475}
11476
11477static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
11478			   const struct intel_plane_state *plane_state)
11479{
11480	return CURSOR_ENABLE |
11481		CURSOR_FORMAT_ARGB |
11482		CURSOR_STRIDE(plane_state->color_plane[0].stride);
11483}
11484
11485static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
11486{
11487	int width = drm_rect_width(&plane_state->uapi.dst);
11488
11489	/*
11490	 * 845g/865g are only limited by the width of their cursors,
11491	 * the height is arbitrary up to the precision of the register.
11492	 */
11493	return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
11494}
11495
11496static int i845_check_cursor(struct intel_crtc_state *crtc_state,
11497			     struct intel_plane_state *plane_state)
11498{
11499	const struct drm_framebuffer *fb = plane_state->hw.fb;
11500	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
11501	int ret;
11502
11503	ret = intel_check_cursor(crtc_state, plane_state);
11504	if (ret)
11505		return ret;
11506
11507	/* if we want to turn off the cursor ignore width and height */
11508	if (!fb)
11509		return 0;
11510
11511	/* Check for which cursor types we support */
11512	if (!i845_cursor_size_ok(plane_state)) {
11513		drm_dbg_kms(&i915->drm,
11514			    "Cursor dimension %dx%d not supported\n",
11515			    drm_rect_width(&plane_state->uapi.dst),
11516			    drm_rect_height(&plane_state->uapi.dst));
11517		return -EINVAL;
11518	}
11519
11520	drm_WARN_ON(&i915->drm, plane_state->uapi.visible &&
11521		    plane_state->color_plane[0].stride != fb->pitches[0]);
11522
11523	switch (fb->pitches[0]) {
11524	case 256:
11525	case 512:
11526	case 1024:
11527	case 2048:
11528		break;
11529	default:
11530		 drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n",
11531			     fb->pitches[0]);
11532		return -EINVAL;
11533	}
11534
11535	plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
11536
11537	return 0;
11538}
11539
11540static void i845_update_cursor(struct intel_plane *plane,
11541			       const struct intel_crtc_state *crtc_state,
11542			       const struct intel_plane_state *plane_state)
11543{
11544	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11545	u32 cntl = 0, base = 0, pos = 0, size = 0;
11546	unsigned long irqflags;
11547
11548	if (plane_state && plane_state->uapi.visible) {
11549		unsigned int width = drm_rect_width(&plane_state->uapi.dst);
11550		unsigned int height = drm_rect_height(&plane_state->uapi.dst);
11551
11552		cntl = plane_state->ctl |
11553			i845_cursor_ctl_crtc(crtc_state);
11554
11555		size = (height << 12) | width;
11556
11557		base = intel_cursor_base(plane_state);
11558		pos = intel_cursor_position(plane_state);
11559	}
11560
11561	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11562
11563	/* On these chipsets we can only modify the base/size/stride
11564	 * whilst the cursor is disabled.
11565	 */
11566	if (plane->cursor.base != base ||
11567	    plane->cursor.size != size ||
11568	    plane->cursor.cntl != cntl) {
11569		intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0);
11570		intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base);
11571		intel_de_write_fw(dev_priv, CURSIZE, size);
11572		intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
11573		intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl);
11574
11575		plane->cursor.base = base;
11576		plane->cursor.size = size;
11577		plane->cursor.cntl = cntl;
11578	} else {
11579		intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
11580	}
11581
11582	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11583}
11584
11585static void i845_disable_cursor(struct intel_plane *plane,
11586				const struct intel_crtc_state *crtc_state)
11587{
11588	i845_update_cursor(plane, crtc_state, NULL);
11589}
11590
11591static bool i845_cursor_get_hw_state(struct intel_plane *plane,
11592				     enum pipe *pipe)
11593{
11594	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11595	enum intel_display_power_domain power_domain;
11596	intel_wakeref_t wakeref;
11597	bool ret;
11598
11599	power_domain = POWER_DOMAIN_PIPE(PIPE_A);
11600	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11601	if (!wakeref)
11602		return false;
11603
11604	ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE;
11605
11606	*pipe = PIPE_A;
11607
11608	intel_display_power_put(dev_priv, power_domain, wakeref);
11609
11610	return ret;
11611}
11612
11613static unsigned int
11614i9xx_cursor_max_stride(struct intel_plane *plane,
11615		       u32 pixel_format, u64 modifier,
11616		       unsigned int rotation)
11617{
11618	return plane->base.dev->mode_config.cursor_width * 4;
11619}
11620
11621static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
11622{
11623	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11624	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11625	u32 cntl = 0;
11626
11627	if (INTEL_GEN(dev_priv) >= 11)
11628		return cntl;
11629
11630	if (crtc_state->gamma_enable)
11631		cntl = MCURSOR_GAMMA_ENABLE;
11632
11633	if (crtc_state->csc_enable)
11634		cntl |= MCURSOR_PIPE_CSC_ENABLE;
11635
11636	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11637		cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
11638
11639	return cntl;
11640}
11641
11642static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
11643			   const struct intel_plane_state *plane_state)
11644{
11645	struct drm_i915_private *dev_priv =
11646		to_i915(plane_state->uapi.plane->dev);
11647	u32 cntl = 0;
11648
11649	if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
11650		cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
11651
11652	switch (drm_rect_width(&plane_state->uapi.dst)) {
11653	case 64:
11654		cntl |= MCURSOR_MODE_64_ARGB_AX;
11655		break;
11656	case 128:
11657		cntl |= MCURSOR_MODE_128_ARGB_AX;
11658		break;
11659	case 256:
11660		cntl |= MCURSOR_MODE_256_ARGB_AX;
11661		break;
11662	default:
11663		MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
11664		return 0;
11665	}
11666
11667	if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
11668		cntl |= MCURSOR_ROTATE_180;
11669
11670	return cntl;
11671}
11672
11673static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
11674{
11675	struct drm_i915_private *dev_priv =
11676		to_i915(plane_state->uapi.plane->dev);
11677	int width = drm_rect_width(&plane_state->uapi.dst);
11678	int height = drm_rect_height(&plane_state->uapi.dst);
11679
11680	if (!intel_cursor_size_ok(plane_state))
11681		return false;
11682
11683	/* Cursor width is limited to a few power-of-two sizes */
11684	switch (width) {
11685	case 256:
11686	case 128:
11687	case 64:
11688		break;
11689	default:
11690		return false;
11691	}
11692
11693	/*
11694	 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
11695	 * height from 8 lines up to the cursor width, when the
11696	 * cursor is not rotated. Everything else requires square
11697	 * cursors.
11698	 */
11699	if (HAS_CUR_FBC(dev_priv) &&
11700	    plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
11701		if (height < 8 || height > width)
11702			return false;
11703	} else {
11704		if (height != width)
11705			return false;
11706	}
11707
11708	return true;
11709}
11710
11711static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
11712			     struct intel_plane_state *plane_state)
11713{
11714	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11715	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11716	const struct drm_framebuffer *fb = plane_state->hw.fb;
11717	enum pipe pipe = plane->pipe;
11718	int ret;
11719
11720	ret = intel_check_cursor(crtc_state, plane_state);
11721	if (ret)
11722		return ret;
11723
11724	/* if we want to turn off the cursor ignore width and height */
11725	if (!fb)
11726		return 0;
11727
11728	/* Check for which cursor types we support */
11729	if (!i9xx_cursor_size_ok(plane_state)) {
11730		drm_dbg(&dev_priv->drm,
11731			"Cursor dimension %dx%d not supported\n",
11732			drm_rect_width(&plane_state->uapi.dst),
11733			drm_rect_height(&plane_state->uapi.dst));
11734		return -EINVAL;
11735	}
11736
11737	drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible &&
11738		    plane_state->color_plane[0].stride != fb->pitches[0]);
11739
11740	if (fb->pitches[0] !=
11741	    drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
11742		drm_dbg_kms(&dev_priv->drm,
11743			    "Invalid cursor stride (%u) (cursor width %d)\n",
11744			    fb->pitches[0],
11745			    drm_rect_width(&plane_state->uapi.dst));
11746		return -EINVAL;
11747	}
11748
11749	/*
11750	 * There's something wrong with the cursor on CHV pipe C.
11751	 * If it straddles the left edge of the screen then
11752	 * moving it away from the edge or disabling it often
11753	 * results in a pipe underrun, and often that can lead to
11754	 * dead pipe (constant underrun reported, and it scans
11755	 * out just a solid color). To recover from that, the
11756	 * display power well must be turned off and on again.
11757	 * Refuse the put the cursor into that compromised position.
11758	 */
11759	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
11760	    plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
11761		drm_dbg_kms(&dev_priv->drm,
11762			    "CHV cursor C not allowed to straddle the left screen edge\n");
11763		return -EINVAL;
11764	}
11765
11766	plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
11767
11768	return 0;
11769}
11770
11771static void i9xx_update_cursor(struct intel_plane *plane,
11772			       const struct intel_crtc_state *crtc_state,
11773			       const struct intel_plane_state *plane_state)
11774{
11775	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11776	enum pipe pipe = plane->pipe;
11777	u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
11778	unsigned long irqflags;
11779
11780	if (plane_state && plane_state->uapi.visible) {
11781		unsigned width = drm_rect_width(&plane_state->uapi.dst);
11782		unsigned height = drm_rect_height(&plane_state->uapi.dst);
11783
11784		cntl = plane_state->ctl |
11785			i9xx_cursor_ctl_crtc(crtc_state);
11786
11787		if (width != height)
11788			fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
11789
11790		base = intel_cursor_base(plane_state);
11791		pos = intel_cursor_position(plane_state);
11792	}
11793
11794	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11795
11796	/*
11797	 * On some platforms writing CURCNTR first will also
11798	 * cause CURPOS to be armed by the CURBASE write.
11799	 * Without the CURCNTR write the CURPOS write would
11800	 * arm itself. Thus we always update CURCNTR before
11801	 * CURPOS.
11802	 *
11803	 * On other platforms CURPOS always requires the
11804	 * CURBASE write to arm the update. Additonally
11805	 * a write to any of the cursor register will cancel
11806	 * an already armed cursor update. Thus leaving out
11807	 * the CURBASE write after CURPOS could lead to a
11808	 * cursor that doesn't appear to move, or even change
11809	 * shape. Thus we always write CURBASE.
11810	 *
11811	 * The other registers are armed by by the CURBASE write
11812	 * except when the plane is getting enabled at which time
11813	 * the CURCNTR write arms the update.
11814	 */
11815
11816	if (INTEL_GEN(dev_priv) >= 9)
11817		skl_write_cursor_wm(plane, crtc_state);
11818
11819	if (plane->cursor.base != base ||
11820	    plane->cursor.size != fbc_ctl ||
11821	    plane->cursor.cntl != cntl) {
11822		if (HAS_CUR_FBC(dev_priv))
11823			intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe),
11824					  fbc_ctl);
11825		intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl);
11826		intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
11827		intel_de_write_fw(dev_priv, CURBASE(pipe), base);
11828
11829		plane->cursor.base = base;
11830		plane->cursor.size = fbc_ctl;
11831		plane->cursor.cntl = cntl;
11832	} else {
11833		intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
11834		intel_de_write_fw(dev_priv, CURBASE(pipe), base);
11835	}
11836
11837	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11838}
11839
11840static void i9xx_disable_cursor(struct intel_plane *plane,
11841				const struct intel_crtc_state *crtc_state)
11842{
11843	i9xx_update_cursor(plane, crtc_state, NULL);
11844}
11845
11846static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
11847				     enum pipe *pipe)
11848{
11849	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11850	enum intel_display_power_domain power_domain;
11851	intel_wakeref_t wakeref;
11852	bool ret;
11853	u32 val;
11854
11855	/*
11856	 * Not 100% correct for planes that can move between pipes,
11857	 * but that's only the case for gen2-3 which don't have any
11858	 * display power wells.
11859	 */
11860	power_domain = POWER_DOMAIN_PIPE(plane->pipe);
11861	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11862	if (!wakeref)
11863		return false;
11864
11865	val = intel_de_read(dev_priv, CURCNTR(plane->pipe));
11866
11867	ret = val & MCURSOR_MODE;
11868
11869	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
11870		*pipe = plane->pipe;
11871	else
11872		*pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
11873			MCURSOR_PIPE_SELECT_SHIFT;
11874
11875	intel_display_power_put(dev_priv, power_domain, wakeref);
11876
11877	return ret;
11878}
11879
11880/* VESA 640x480x72Hz mode to set on the pipe */
11881static const struct drm_display_mode load_detect_mode = {
11882	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
11883		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
11884};
11885
11886struct drm_framebuffer *
11887intel_framebuffer_create(struct drm_i915_gem_object *obj,
11888			 struct drm_mode_fb_cmd2 *mode_cmd)
11889{
11890	struct intel_framebuffer *intel_fb;
11891	int ret;
11892
11893	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
11894	if (!intel_fb)
11895		return ERR_PTR(-ENOMEM);
11896
11897	ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
11898	if (ret)
11899		goto err;
11900
11901	return &intel_fb->base;
11902
11903err:
11904	kfree(intel_fb);
11905	return ERR_PTR(ret);
11906}
11907
11908static int intel_modeset_disable_planes(struct drm_atomic_state *state,
11909					struct drm_crtc *crtc)
11910{
11911	struct drm_plane *plane;
11912	struct drm_plane_state *plane_state;
11913	int ret, i;
11914
11915	ret = drm_atomic_add_affected_planes(state, crtc);
11916	if (ret)
11917		return ret;
11918
11919	for_each_new_plane_in_state(state, plane, plane_state, i) {
11920		if (plane_state->crtc != crtc)
11921			continue;
11922
11923		ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
11924		if (ret)
11925			return ret;
11926
11927		drm_atomic_set_fb_for_plane(plane_state, NULL);
11928	}
11929
11930	return 0;
11931}
11932
11933int intel_get_load_detect_pipe(struct drm_connector *connector,
11934			       struct intel_load_detect_pipe *old,
11935			       struct drm_modeset_acquire_ctx *ctx)
11936{
11937	struct intel_crtc *intel_crtc;
11938	struct intel_encoder *intel_encoder =
11939		intel_attached_encoder(to_intel_connector(connector));
11940	struct drm_crtc *possible_crtc;
11941	struct drm_encoder *encoder = &intel_encoder->base;
11942	struct drm_crtc *crtc = NULL;
11943	struct drm_device *dev = encoder->dev;
11944	struct drm_i915_private *dev_priv = to_i915(dev);
11945	struct drm_mode_config *config = &dev->mode_config;
11946	struct drm_atomic_state *state = NULL, *restore_state = NULL;
11947	struct drm_connector_state *connector_state;
11948	struct intel_crtc_state *crtc_state;
11949	int ret, i = -1;
11950
11951	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11952		    connector->base.id, connector->name,
11953		    encoder->base.id, encoder->name);
11954
11955	old->restore_state = NULL;
11956
11957	drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
11958
11959	/*
11960	 * Algorithm gets a little messy:
11961	 *
11962	 *   - if the connector already has an assigned crtc, use it (but make
11963	 *     sure it's on first)
11964	 *
11965	 *   - try to find the first unused crtc that can drive this connector,
11966	 *     and use that if we find one
11967	 */
11968
11969	/* See if we already have a CRTC for this connector */
11970	if (connector->state->crtc) {
11971		crtc = connector->state->crtc;
11972
11973		ret = drm_modeset_lock(&crtc->mutex, ctx);
11974		if (ret)
11975			goto fail;
11976
11977		/* Make sure the crtc and connector are running */
11978		goto found;
11979	}
11980
11981	/* Find an unused one (if possible) */
11982	for_each_crtc(dev, possible_crtc) {
11983		i++;
11984		if (!(encoder->possible_crtcs & (1 << i)))
11985			continue;
11986
11987		ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
11988		if (ret)
11989			goto fail;
11990
11991		if (possible_crtc->state->enable) {
11992			drm_modeset_unlock(&possible_crtc->mutex);
11993			continue;
11994		}
11995
11996		crtc = possible_crtc;
11997		break;
11998	}
11999
12000	/*
12001	 * If we didn't find an unused CRTC, don't use any.
12002	 */
12003	if (!crtc) {
12004		drm_dbg_kms(&dev_priv->drm,
12005			    "no pipe available for load-detect\n");
12006		ret = -ENODEV;
12007		goto fail;
12008	}
12009
12010found:
12011	intel_crtc = to_intel_crtc(crtc);
12012
12013	state = drm_atomic_state_alloc(dev);
12014	restore_state = drm_atomic_state_alloc(dev);
12015	if (!state || !restore_state) {
12016		ret = -ENOMEM;
12017		goto fail;
12018	}
12019
12020	state->acquire_ctx = ctx;
12021	restore_state->acquire_ctx = ctx;
12022
12023	connector_state = drm_atomic_get_connector_state(state, connector);
12024	if (IS_ERR(connector_state)) {
12025		ret = PTR_ERR(connector_state);
12026		goto fail;
12027	}
12028
12029	ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
12030	if (ret)
12031		goto fail;
12032
12033	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
12034	if (IS_ERR(crtc_state)) {
12035		ret = PTR_ERR(crtc_state);
12036		goto fail;
12037	}
12038
12039	crtc_state->uapi.active = true;
12040
12041	ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
12042					   &load_detect_mode);
12043	if (ret)
12044		goto fail;
12045
12046	ret = intel_modeset_disable_planes(state, crtc);
12047	if (ret)
12048		goto fail;
12049
12050	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
12051	if (!ret)
12052		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
12053	if (!ret)
12054		ret = drm_atomic_add_affected_planes(restore_state, crtc);
12055	if (ret) {
12056		drm_dbg_kms(&dev_priv->drm,
12057			    "Failed to create a copy of old state to restore: %i\n",
12058			    ret);
12059		goto fail;
12060	}
12061
12062	ret = drm_atomic_commit(state);
12063	if (ret) {
12064		drm_dbg_kms(&dev_priv->drm,
12065			    "failed to set mode on load-detect pipe\n");
12066		goto fail;
12067	}
12068
12069	old->restore_state = restore_state;
12070	drm_atomic_state_put(state);
12071
12072	/* let the connector get through one full cycle before testing */
12073	intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
12074	return true;
12075
12076fail:
12077	if (state) {
12078		drm_atomic_state_put(state);
12079		state = NULL;
12080	}
12081	if (restore_state) {
12082		drm_atomic_state_put(restore_state);
12083		restore_state = NULL;
12084	}
12085
12086	if (ret == -EDEADLK)
12087		return ret;
12088
12089	return false;
12090}
12091
12092void intel_release_load_detect_pipe(struct drm_connector *connector,
12093				    struct intel_load_detect_pipe *old,
12094				    struct drm_modeset_acquire_ctx *ctx)
12095{
12096	struct intel_encoder *intel_encoder =
12097		intel_attached_encoder(to_intel_connector(connector));
12098	struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
12099	struct drm_encoder *encoder = &intel_encoder->base;
12100	struct drm_atomic_state *state = old->restore_state;
12101	int ret;
12102
12103	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
12104		    connector->base.id, connector->name,
12105		    encoder->base.id, encoder->name);
12106
12107	if (!state)
12108		return;
12109
12110	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
12111	if (ret)
12112		drm_dbg_kms(&i915->drm,
12113			    "Couldn't release load detect pipe: %i\n", ret);
12114	drm_atomic_state_put(state);
12115}
12116
12117static int i9xx_pll_refclk(struct drm_device *dev,
12118			   const struct intel_crtc_state *pipe_config)
12119{
12120	struct drm_i915_private *dev_priv = to_i915(dev);
12121	u32 dpll = pipe_config->dpll_hw_state.dpll;
12122
12123	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
12124		return dev_priv->vbt.lvds_ssc_freq;
12125	else if (HAS_PCH_SPLIT(dev_priv))
12126		return 120000;
12127	else if (!IS_GEN(dev_priv, 2))
12128		return 96000;
12129	else
12130		return 48000;
12131}
12132
12133/* Returns the clock of the currently programmed mode of the given pipe. */
12134static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
12135				struct intel_crtc_state *pipe_config)
12136{
12137	struct drm_device *dev = crtc->base.dev;
12138	struct drm_i915_private *dev_priv = to_i915(dev);
12139	enum pipe pipe = crtc->pipe;
12140	u32 dpll = pipe_config->dpll_hw_state.dpll;
12141	u32 fp;
12142	struct dpll clock;
12143	int port_clock;
12144	int refclk = i9xx_pll_refclk(dev, pipe_config);
12145
12146	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
12147		fp = pipe_config->dpll_hw_state.fp0;
12148	else
12149		fp = pipe_config->dpll_hw_state.fp1;
12150
12151	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
12152	if (IS_PINEVIEW(dev_priv)) {
12153		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
12154		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
12155	} else {
12156		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
12157		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
12158	}
12159
12160	if (!IS_GEN(dev_priv, 2)) {
12161		if (IS_PINEVIEW(dev_priv))
12162			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
12163				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
12164		else
12165			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
12166			       DPLL_FPA01_P1_POST_DIV_SHIFT);
12167
12168		switch (dpll & DPLL_MODE_MASK) {
12169		case DPLLB_MODE_DAC_SERIAL:
12170			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
12171				5 : 10;
12172			break;
12173		case DPLLB_MODE_LVDS:
12174			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
12175				7 : 14;
12176			break;
12177		default:
12178			drm_dbg_kms(&dev_priv->drm,
12179				    "Unknown DPLL mode %08x in programmed "
12180				    "mode\n", (int)(dpll & DPLL_MODE_MASK));
12181			return;
12182		}
12183
12184		if (IS_PINEVIEW(dev_priv))
12185			port_clock = pnv_calc_dpll_params(refclk, &clock);
12186		else
12187			port_clock = i9xx_calc_dpll_params(refclk, &clock);
12188	} else {
12189		u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
12190								 LVDS);
12191		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
12192
12193		if (is_lvds) {
12194			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
12195				       DPLL_FPA01_P1_POST_DIV_SHIFT);
12196
12197			if (lvds & LVDS_CLKB_POWER_UP)
12198				clock.p2 = 7;
12199			else
12200				clock.p2 = 14;
12201		} else {
12202			if (dpll & PLL_P1_DIVIDE_BY_TWO)
12203				clock.p1 = 2;
12204			else {
12205				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
12206					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
12207			}
12208			if (dpll & PLL_P2_DIVIDE_BY_4)
12209				clock.p2 = 4;
12210			else
12211				clock.p2 = 2;
12212		}
12213
12214		port_clock = i9xx_calc_dpll_params(refclk, &clock);
12215	}
12216
12217	/*
12218	 * This value includes pixel_multiplier. We will use
12219	 * port_clock to compute adjusted_mode.crtc_clock in the
12220	 * encoder's get_config() function.
12221	 */
12222	pipe_config->port_clock = port_clock;
12223}
12224
12225int intel_dotclock_calculate(int link_freq,
12226			     const struct intel_link_m_n *m_n)
12227{
12228	/*
12229	 * The calculation for the data clock is:
12230	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
12231	 * But we want to avoid losing precison if possible, so:
12232	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
12233	 *
12234	 * and the link clock is simpler:
12235	 * link_clock = (m * link_clock) / n
12236	 */
12237
12238	if (!m_n->link_n)
12239		return 0;
12240
12241	return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
12242}
12243
12244static void ilk_pch_clock_get(struct intel_crtc *crtc,
12245			      struct intel_crtc_state *pipe_config)
12246{
12247	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12248
12249	/* read out port_clock from the DPLL */
12250	i9xx_crtc_clock_get(crtc, pipe_config);
12251
12252	/*
12253	 * In case there is an active pipe without active ports,
12254	 * we may need some idea for the dotclock anyway.
12255	 * Calculate one based on the FDI configuration.
12256	 */
12257	pipe_config->hw.adjusted_mode.crtc_clock =
12258		intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
12259					 &pipe_config->fdi_m_n);
12260}
12261
12262static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
12263				   struct intel_crtc *crtc)
12264{
12265	memset(crtc_state, 0, sizeof(*crtc_state));
12266
12267	__drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
12268
12269	crtc_state->cpu_transcoder = INVALID_TRANSCODER;
12270	crtc_state->master_transcoder = INVALID_TRANSCODER;
12271	crtc_state->hsw_workaround_pipe = INVALID_PIPE;
12272	crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
12273	crtc_state->scaler_state.scaler_id = -1;
12274	crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
12275}
12276
12277static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
12278{
12279	struct intel_crtc_state *crtc_state;
12280
12281	crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
12282
12283	if (crtc_state)
12284		intel_crtc_state_reset(crtc_state, crtc);
12285
12286	return crtc_state;
12287}
12288
12289/* Returns the currently programmed mode of the given encoder. */
12290struct drm_display_mode *
12291intel_encoder_current_mode(struct intel_encoder *encoder)
12292{
12293	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
12294	struct intel_crtc_state *crtc_state;
12295	struct drm_display_mode *mode;
12296	struct intel_crtc *crtc;
12297	enum pipe pipe;
12298
12299	if (!encoder->get_hw_state(encoder, &pipe))
12300		return NULL;
12301
12302	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12303
12304	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
12305	if (!mode)
12306		return NULL;
12307
12308	crtc_state = intel_crtc_state_alloc(crtc);
12309	if (!crtc_state) {
12310		kfree(mode);
12311		return NULL;
12312	}
12313
12314	if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
12315		kfree(crtc_state);
12316		kfree(mode);
12317		return NULL;
12318	}
12319
12320	encoder->get_config(encoder, crtc_state);
12321
12322	intel_mode_from_pipe_config(mode, crtc_state);
12323
12324	kfree(crtc_state);
12325
12326	return mode;
12327}
12328
12329static void intel_crtc_destroy(struct drm_crtc *crtc)
12330{
12331	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12332
12333	drm_crtc_cleanup(crtc);
12334	kfree(intel_crtc);
12335}
12336
12337/**
12338 * intel_wm_need_update - Check whether watermarks need updating
12339 * @cur: current plane state
12340 * @new: new plane state
12341 *
12342 * Check current plane state versus the new one to determine whether
12343 * watermarks need to be recalculated.
12344 *
12345 * Returns true or false.
12346 */
12347static bool intel_wm_need_update(const struct intel_plane_state *cur,
12348				 struct intel_plane_state *new)
12349{
12350	/* Update watermarks on tiling or size changes. */
12351	if (new->uapi.visible != cur->uapi.visible)
12352		return true;
12353
12354	if (!cur->hw.fb || !new->hw.fb)
12355		return false;
12356
12357	if (cur->hw.fb->modifier != new->hw.fb->modifier ||
12358	    cur->hw.rotation != new->hw.rotation ||
12359	    drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
12360	    drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
12361	    drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
12362	    drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
12363		return true;
12364
12365	return false;
12366}
12367
12368static bool needs_scaling(const struct intel_plane_state *state)
12369{
12370	int src_w = drm_rect_width(&state->uapi.src) >> 16;
12371	int src_h = drm_rect_height(&state->uapi.src) >> 16;
12372	int dst_w = drm_rect_width(&state->uapi.dst);
12373	int dst_h = drm_rect_height(&state->uapi.dst);
12374
12375	return (src_w != dst_w || src_h != dst_h);
12376}
12377
12378int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
12379				    struct intel_crtc_state *crtc_state,
12380				    const struct intel_plane_state *old_plane_state,
12381				    struct intel_plane_state *plane_state)
12382{
12383	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12384	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
12385	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12386	bool mode_changed = needs_modeset(crtc_state);
12387	bool was_crtc_enabled = old_crtc_state->hw.active;
12388	bool is_crtc_enabled = crtc_state->hw.active;
12389	bool turn_off, turn_on, visible, was_visible;
12390	int ret;
12391
12392	if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
12393		ret = skl_update_scaler_plane(crtc_state, plane_state);
12394		if (ret)
12395			return ret;
12396	}
12397
12398	was_visible = old_plane_state->uapi.visible;
12399	visible = plane_state->uapi.visible;
12400
12401	if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
12402		was_visible = false;
12403
12404	/*
12405	 * Visibility is calculated as if the crtc was on, but
12406	 * after scaler setup everything depends on it being off
12407	 * when the crtc isn't active.
12408	 *
12409	 * FIXME this is wrong for watermarks. Watermarks should also
12410	 * be computed as if the pipe would be active. Perhaps move
12411	 * per-plane wm computation to the .check_plane() hook, and
12412	 * only combine the results from all planes in the current place?
12413	 */
12414	if (!is_crtc_enabled) {
12415		intel_plane_set_invisible(crtc_state, plane_state);
12416		visible = false;
12417	}
12418
12419	if (!was_visible && !visible)
12420		return 0;
12421
12422	turn_off = was_visible && (!visible || mode_changed);
12423	turn_on = visible && (!was_visible || mode_changed);
12424
12425	drm_dbg_atomic(&dev_priv->drm,
12426		       "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
12427		       crtc->base.base.id, crtc->base.name,
12428		       plane->base.base.id, plane->base.name,
12429		       was_visible, visible,
12430		       turn_off, turn_on, mode_changed);
12431
12432	if (turn_on) {
12433		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
12434			crtc_state->update_wm_pre = true;
12435
12436		/* must disable cxsr around plane enable/disable */
12437		if (plane->id != PLANE_CURSOR)
12438			crtc_state->disable_cxsr = true;
12439	} else if (turn_off) {
12440		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
12441			crtc_state->update_wm_post = true;
12442
12443		/* must disable cxsr around plane enable/disable */
12444		if (plane->id != PLANE_CURSOR)
12445			crtc_state->disable_cxsr = true;
12446	} else if (intel_wm_need_update(old_plane_state, plane_state)) {
12447		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
12448			/* FIXME bollocks */
12449			crtc_state->update_wm_pre = true;
12450			crtc_state->update_wm_post = true;
12451		}
12452	}
12453
12454	if (visible || was_visible)
12455		crtc_state->fb_bits |= plane->frontbuffer_bit;
12456
12457	/*
12458	 * ILK/SNB DVSACNTR/Sprite Enable
12459	 * IVB SPR_CTL/Sprite Enable
12460	 * "When in Self Refresh Big FIFO mode, a write to enable the
12461	 *  plane will be internally buffered and delayed while Big FIFO
12462	 *  mode is exiting."
12463	 *
12464	 * Which means that enabling the sprite can take an extra frame
12465	 * when we start in big FIFO mode (LP1+). Thus we need to drop
12466	 * down to LP0 and wait for vblank in order to make sure the
12467	 * sprite gets enabled on the next vblank after the register write.
12468	 * Doing otherwise would risk enabling the sprite one frame after
12469	 * we've already signalled flip completion. We can resume LP1+
12470	 * once the sprite has been enabled.
12471	 *
12472	 *
12473	 * WaCxSRDisabledForSpriteScaling:ivb
12474	 * IVB SPR_SCALE/Scaling Enable
12475	 * "Low Power watermarks must be disabled for at least one
12476	 *  frame before enabling sprite scaling, and kept disabled
12477	 *  until sprite scaling is disabled."
12478	 *
12479	 * ILK/SNB DVSASCALE/Scaling Enable
12480	 * "When in Self Refresh Big FIFO mode, scaling enable will be
12481	 *  masked off while Big FIFO mode is exiting."
12482	 *
12483	 * Despite the w/a only being listed for IVB we assume that
12484	 * the ILK/SNB note has similar ramifications, hence we apply
12485	 * the w/a on all three platforms.
12486	 *
12487	 * With experimental results seems this is needed also for primary
12488	 * plane, not only sprite plane.
12489	 */
12490	if (plane->id != PLANE_CURSOR &&
12491	    (IS_GEN_RANGE(dev_priv, 5, 6) ||
12492	     IS_IVYBRIDGE(dev_priv)) &&
12493	    (turn_on || (!needs_scaling(old_plane_state) &&
12494			 needs_scaling(plane_state))))
12495		crtc_state->disable_lp_wm = true;
12496
12497	return 0;
12498}
12499
12500static bool encoders_cloneable(const struct intel_encoder *a,
12501			       const struct intel_encoder *b)
12502{
12503	/* masks could be asymmetric, so check both ways */
12504	return a == b || (a->cloneable & (1 << b->type) &&
12505			  b->cloneable & (1 << a->type));
12506}
12507
12508static bool check_single_encoder_cloning(struct drm_atomic_state *state,
12509					 struct intel_crtc *crtc,
12510					 struct intel_encoder *encoder)
12511{
12512	struct intel_encoder *source_encoder;
12513	struct drm_connector *connector;
12514	struct drm_connector_state *connector_state;
12515	int i;
12516
12517	for_each_new_connector_in_state(state, connector, connector_state, i) {
12518		if (connector_state->crtc != &crtc->base)
12519			continue;
12520
12521		source_encoder =
12522			to_intel_encoder(connector_state->best_encoder);
12523		if (!encoders_cloneable(encoder, source_encoder))
12524			return false;
12525	}
12526
12527	return true;
12528}
12529
12530static int icl_add_linked_planes(struct intel_atomic_state *state)
12531{
12532	struct intel_plane *plane, *linked;
12533	struct intel_plane_state *plane_state, *linked_plane_state;
12534	int i;
12535
12536	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12537		linked = plane_state->planar_linked_plane;
12538
12539		if (!linked)
12540			continue;
12541
12542		linked_plane_state = intel_atomic_get_plane_state(state, linked);
12543		if (IS_ERR(linked_plane_state))
12544			return PTR_ERR(linked_plane_state);
12545
12546		drm_WARN_ON(state->base.dev,
12547			    linked_plane_state->planar_linked_plane != plane);
12548		drm_WARN_ON(state->base.dev,
12549			    linked_plane_state->planar_slave == plane_state->planar_slave);
12550	}
12551
12552	return 0;
12553}
12554
12555static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
12556{
12557	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12558	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12559	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
12560	struct intel_plane *plane, *linked;
12561	struct intel_plane_state *plane_state;
12562	int i;
12563
12564	if (INTEL_GEN(dev_priv) < 11)
12565		return 0;
12566
12567	/*
12568	 * Destroy all old plane links and make the slave plane invisible
12569	 * in the crtc_state->active_planes mask.
12570	 */
12571	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12572		if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
12573			continue;
12574
12575		plane_state->planar_linked_plane = NULL;
12576		if (plane_state->planar_slave && !plane_state->uapi.visible) {
12577			crtc_state->active_planes &= ~BIT(plane->id);
12578			crtc_state->update_planes |= BIT(plane->id);
12579		}
12580
12581		plane_state->planar_slave = false;
12582	}
12583
12584	if (!crtc_state->nv12_planes)
12585		return 0;
12586
12587	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12588		struct intel_plane_state *linked_state = NULL;
12589
12590		if (plane->pipe != crtc->pipe ||
12591		    !(crtc_state->nv12_planes & BIT(plane->id)))
12592			continue;
12593
12594		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
12595			if (!icl_is_nv12_y_plane(dev_priv, linked->id))
12596				continue;
12597
12598			if (crtc_state->active_planes & BIT(linked->id))
12599				continue;
12600
12601			linked_state = intel_atomic_get_plane_state(state, linked);
12602			if (IS_ERR(linked_state))
12603				return PTR_ERR(linked_state);
12604
12605			break;
12606		}
12607
12608		if (!linked_state) {
12609			drm_dbg_kms(&dev_priv->drm,
12610				    "Need %d free Y planes for planar YUV\n",
12611				    hweight8(crtc_state->nv12_planes));
12612
12613			return -EINVAL;
12614		}
12615
12616		plane_state->planar_linked_plane = linked;
12617
12618		linked_state->planar_slave = true;
12619		linked_state->planar_linked_plane = plane;
12620		crtc_state->active_planes |= BIT(linked->id);
12621		crtc_state->update_planes |= BIT(linked->id);
12622		drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
12623			    linked->base.name, plane->base.name);
12624
12625		/* Copy parameters to slave plane */
12626		linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
12627		linked_state->color_ctl = plane_state->color_ctl;
12628		linked_state->view = plane_state->view;
12629		memcpy(linked_state->color_plane, plane_state->color_plane,
12630		       sizeof(linked_state->color_plane));
12631
12632		intel_plane_copy_uapi_to_hw_state(linked_state, plane_state);
12633		linked_state->uapi.src = plane_state->uapi.src;
12634		linked_state->uapi.dst = plane_state->uapi.dst;
12635
12636		if (icl_is_hdr_plane(dev_priv, plane->id)) {
12637			if (linked->id == PLANE_SPRITE5)
12638				plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
12639			else if (linked->id == PLANE_SPRITE4)
12640				plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
12641			else if (linked->id == PLANE_SPRITE3)
12642				plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
12643			else if (linked->id == PLANE_SPRITE2)
12644				plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
12645			else
12646				MISSING_CASE(linked->id);
12647		}
12648	}
12649
12650	return 0;
12651}
12652
12653static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
12654{
12655	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
12656	struct intel_atomic_state *state =
12657		to_intel_atomic_state(new_crtc_state->uapi.state);
12658	const struct intel_crtc_state *old_crtc_state =
12659		intel_atomic_get_old_crtc_state(state, crtc);
12660
12661	return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
12662}
12663
12664static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
12665{
12666	const struct drm_display_mode *adjusted_mode =
12667		&crtc_state->hw.adjusted_mode;
12668	int linetime_wm;
12669
12670	if (!crtc_state->hw.enable)
12671		return 0;
12672
12673	linetime_wm = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
12674					adjusted_mode->crtc_clock);
12675
12676	return min(linetime_wm, 0x1ff);
12677}
12678
12679static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
12680			       const struct intel_cdclk_state *cdclk_state)
12681{
12682	const struct drm_display_mode *adjusted_mode =
12683		&crtc_state->hw.adjusted_mode;
12684	int linetime_wm;
12685
12686	if (!crtc_state->hw.enable)
12687		return 0;
12688
12689	linetime_wm = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
12690					cdclk_state->logical.cdclk);
12691
12692	return min(linetime_wm, 0x1ff);
12693}
12694
12695static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
12696{
12697	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12698	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12699	const struct drm_display_mode *adjusted_mode =
12700		&crtc_state->hw.adjusted_mode;
12701	int linetime_wm;
12702
12703	if (!crtc_state->hw.enable)
12704		return 0;
12705
12706	linetime_wm = DIV_ROUND_UP(adjusted_mode->crtc_htotal * 1000 * 8,
12707				   crtc_state->pixel_rate);
12708
12709	/* Display WA #1135: BXT:ALL GLK:ALL */
12710	if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
12711		linetime_wm /= 2;
12712
12713	return min(linetime_wm, 0x1ff);
12714}
12715
12716static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
12717				   struct intel_crtc *crtc)
12718{
12719	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12720	struct intel_crtc_state *crtc_state =
12721		intel_atomic_get_new_crtc_state(state, crtc);
12722	const struct intel_cdclk_state *cdclk_state;
12723
12724	if (INTEL_GEN(dev_priv) >= 9)
12725		crtc_state->linetime = skl_linetime_wm(crtc_state);
12726	else
12727		crtc_state->linetime = hsw_linetime_wm(crtc_state);
12728
12729	if (!hsw_crtc_supports_ips(crtc))
12730		return 0;
12731
12732	cdclk_state = intel_atomic_get_cdclk_state(state);
12733	if (IS_ERR(cdclk_state))
12734		return PTR_ERR(cdclk_state);
12735
12736	crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
12737						       cdclk_state);
12738
12739	return 0;
12740}
12741
12742static int intel_crtc_atomic_check(struct intel_atomic_state *state,
12743				   struct intel_crtc *crtc)
12744{
12745	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12746	struct intel_crtc_state *crtc_state =
12747		intel_atomic_get_new_crtc_state(state, crtc);
12748	bool mode_changed = needs_modeset(crtc_state);
12749	int ret;
12750
12751	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
12752	    mode_changed && !crtc_state->hw.active)
12753		crtc_state->update_wm_post = true;
12754
12755	if (mode_changed && crtc_state->hw.enable &&
12756	    dev_priv->display.crtc_compute_clock &&
12757	    !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
12758		ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
12759		if (ret)
12760			return ret;
12761	}
12762
12763	/*
12764	 * May need to update pipe gamma enable bits
12765	 * when C8 planes are getting enabled/disabled.
12766	 */
12767	if (c8_planes_changed(crtc_state))
12768		crtc_state->uapi.color_mgmt_changed = true;
12769
12770	if (mode_changed || crtc_state->update_pipe ||
12771	    crtc_state->uapi.color_mgmt_changed) {
12772		ret = intel_color_check(crtc_state);
12773		if (ret)
12774			return ret;
12775	}
12776
12777	if (dev_priv->display.compute_pipe_wm) {
12778		ret = dev_priv->display.compute_pipe_wm(crtc_state);
12779		if (ret) {
12780			drm_dbg_kms(&dev_priv->drm,
12781				    "Target pipe watermarks are invalid\n");
12782			return ret;
12783		}
12784	}
12785
12786	if (dev_priv->display.compute_intermediate_wm) {
12787		if (drm_WARN_ON(&dev_priv->drm,
12788				!dev_priv->display.compute_pipe_wm))
12789			return 0;
12790
12791		/*
12792		 * Calculate 'intermediate' watermarks that satisfy both the
12793		 * old state and the new state.  We can program these
12794		 * immediately.
12795		 */
12796		ret = dev_priv->display.compute_intermediate_wm(crtc_state);
12797		if (ret) {
12798			drm_dbg_kms(&dev_priv->drm,
12799				    "No valid intermediate pipe watermarks are possible\n");
12800			return ret;
12801		}
12802	}
12803
12804	if (INTEL_GEN(dev_priv) >= 9) {
12805		if (mode_changed || crtc_state->update_pipe) {
12806			ret = skl_update_scaler_crtc(crtc_state);
12807			if (ret)
12808				return ret;
12809		}
12810
12811		ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
12812		if (ret)
12813			return ret;
12814	}
12815
12816	if (HAS_IPS(dev_priv)) {
12817		ret = hsw_compute_ips_config(crtc_state);
12818		if (ret)
12819			return ret;
12820	}
12821
12822	if (INTEL_GEN(dev_priv) >= 9 ||
12823	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
12824		ret = hsw_compute_linetime_wm(state, crtc);
12825		if (ret)
12826			return ret;
12827
12828	}
12829
12830	if (!mode_changed)
12831		intel_psr2_sel_fetch_update(state, crtc);
12832
12833	return 0;
12834}
12835
12836static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12837{
12838	struct intel_connector *connector;
12839	struct drm_connector_list_iter conn_iter;
12840
12841	drm_connector_list_iter_begin(dev, &conn_iter);
12842	for_each_intel_connector_iter(connector, &conn_iter) {
12843		if (connector->base.state->crtc)
12844			drm_connector_put(&connector->base);
12845
12846		if (connector->base.encoder) {
12847			connector->base.state->best_encoder =
12848				connector->base.encoder;
12849			connector->base.state->crtc =
12850				connector->base.encoder->crtc;
12851
12852			drm_connector_get(&connector->base);
12853		} else {
12854			connector->base.state->best_encoder = NULL;
12855			connector->base.state->crtc = NULL;
12856		}
12857	}
12858	drm_connector_list_iter_end(&conn_iter);
12859}
12860
12861static int
12862compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
12863		      struct intel_crtc_state *pipe_config)
12864{
12865	struct drm_connector *connector = conn_state->connector;
12866	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
12867	const struct drm_display_info *info = &connector->display_info;
12868	int bpp;
12869
12870	switch (conn_state->max_bpc) {
12871	case 6 ... 7:
12872		bpp = 6 * 3;
12873		break;
12874	case 8 ... 9:
12875		bpp = 8 * 3;
12876		break;
12877	case 10 ... 11:
12878		bpp = 10 * 3;
12879		break;
12880	case 12 ... 16:
12881		bpp = 12 * 3;
12882		break;
12883	default:
12884		MISSING_CASE(conn_state->max_bpc);
12885		return -EINVAL;
12886	}
12887
12888	if (bpp < pipe_config->pipe_bpp) {
12889		drm_dbg_kms(&i915->drm,
12890			    "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
12891			    "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
12892			    connector->base.id, connector->name,
12893			    bpp, 3 * info->bpc,
12894			    3 * conn_state->max_requested_bpc,
12895			    pipe_config->pipe_bpp);
12896
12897		pipe_config->pipe_bpp = bpp;
12898	}
12899
12900	return 0;
12901}
12902
12903static int
12904compute_baseline_pipe_bpp(struct intel_crtc *crtc,
12905			  struct intel_crtc_state *pipe_config)
12906{
12907	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12908	struct drm_atomic_state *state = pipe_config->uapi.state;
12909	struct drm_connector *connector;
12910	struct drm_connector_state *connector_state;
12911	int bpp, i;
12912
12913	if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
12914	    IS_CHERRYVIEW(dev_priv)))
12915		bpp = 10*3;
12916	else if (INTEL_GEN(dev_priv) >= 5)
12917		bpp = 12*3;
12918	else
12919		bpp = 8*3;
12920
12921	pipe_config->pipe_bpp = bpp;
12922
12923	/* Clamp display bpp to connector max bpp */
12924	for_each_new_connector_in_state(state, connector, connector_state, i) {
12925		int ret;
12926
12927		if (connector_state->crtc != &crtc->base)
12928			continue;
12929
12930		ret = compute_sink_pipe_bpp(connector_state, pipe_config);
12931		if (ret)
12932			return ret;
12933	}
12934
12935	return 0;
12936}
12937
12938static void intel_dump_crtc_timings(struct drm_i915_private *i915,
12939				    const struct drm_display_mode *mode)
12940{
12941	drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
12942		    "type: 0x%x flags: 0x%x\n",
12943		    mode->crtc_clock,
12944		    mode->crtc_hdisplay, mode->crtc_hsync_start,
12945		    mode->crtc_hsync_end, mode->crtc_htotal,
12946		    mode->crtc_vdisplay, mode->crtc_vsync_start,
12947		    mode->crtc_vsync_end, mode->crtc_vtotal,
12948		    mode->type, mode->flags);
12949}
12950
12951static void
12952intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
12953		      const char *id, unsigned int lane_count,
12954		      const struct intel_link_m_n *m_n)
12955{
12956	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
12957
12958	drm_dbg_kms(&i915->drm,
12959		    "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12960		    id, lane_count,
12961		    m_n->gmch_m, m_n->gmch_n,
12962		    m_n->link_m, m_n->link_n, m_n->tu);
12963}
12964
12965static void
12966intel_dump_infoframe(struct drm_i915_private *dev_priv,
12967		     const union hdmi_infoframe *frame)
12968{
12969	if (!drm_debug_enabled(DRM_UT_KMS))
12970		return;
12971
12972	hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
12973}
12974
12975static void
12976intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
12977		      const struct drm_dp_vsc_sdp *vsc)
12978{
12979	if (!drm_debug_enabled(DRM_UT_KMS))
12980		return;
12981
12982	drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
12983}
12984
12985#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
12986
12987static const char * const output_type_str[] = {
12988	OUTPUT_TYPE(UNUSED),
12989	OUTPUT_TYPE(ANALOG),
12990	OUTPUT_TYPE(DVO),
12991	OUTPUT_TYPE(SDVO),
12992	OUTPUT_TYPE(LVDS),
12993	OUTPUT_TYPE(TVOUT),
12994	OUTPUT_TYPE(HDMI),
12995	OUTPUT_TYPE(DP),
12996	OUTPUT_TYPE(EDP),
12997	OUTPUT_TYPE(DSI),
12998	OUTPUT_TYPE(DDI),
12999	OUTPUT_TYPE(DP_MST),
13000};
13001
13002#undef OUTPUT_TYPE
13003
13004static void snprintf_output_types(char *buf, size_t len,
13005				  unsigned int output_types)
13006{
13007	char *str = buf;
13008	int i;
13009
13010	str[0] = '\0';
13011
13012	for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
13013		int r;
13014
13015		if ((output_types & BIT(i)) == 0)
13016			continue;
13017
13018		r = snprintf(str, len, "%s%s",
13019			     str != buf ? "," : "", output_type_str[i]);
13020		if (r >= len)
13021			break;
13022		str += r;
13023		len -= r;
13024
13025		output_types &= ~BIT(i);
13026	}
13027
13028	WARN_ON_ONCE(output_types != 0);
13029}
13030
13031static const char * const output_format_str[] = {
13032	[INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
13033	[INTEL_OUTPUT_FORMAT_RGB] = "RGB",
13034	[INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
13035	[INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
13036};
13037
13038static const char *output_formats(enum intel_output_format format)
13039{
13040	if (format >= ARRAY_SIZE(output_format_str))
13041		format = INTEL_OUTPUT_FORMAT_INVALID;
13042	return output_format_str[format];
13043}
13044
13045static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
13046{
13047	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
13048	struct drm_i915_private *i915 = to_i915(plane->base.dev);
13049	const struct drm_framebuffer *fb = plane_state->hw.fb;
13050	struct drm_format_name_buf format_name;
13051
13052	if (!fb) {
13053		drm_dbg_kms(&i915->drm,
13054			    "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
13055			    plane->base.base.id, plane->base.name,
13056			    yesno(plane_state->uapi.visible));
13057		return;
13058	}
13059
13060	drm_dbg_kms(&i915->drm,
13061		    "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
13062		    plane->base.base.id, plane->base.name,
13063		    fb->base.id, fb->width, fb->height,
13064		    drm_get_format_name(fb->format->format, &format_name),
13065		    yesno(plane_state->uapi.visible));
13066	drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
13067		    plane_state->hw.rotation, plane_state->scaler_id);
13068	if (plane_state->uapi.visible)
13069		drm_dbg_kms(&i915->drm,
13070			    "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
13071			    DRM_RECT_FP_ARG(&plane_state->uapi.src),
13072			    DRM_RECT_ARG(&plane_state->uapi.dst));
13073}
13074
13075static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
13076				   struct intel_atomic_state *state,
13077				   const char *context)
13078{
13079	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
13080	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13081	const struct intel_plane_state *plane_state;
13082	struct intel_plane *plane;
13083	char buf[64];
13084	int i;
13085
13086	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
13087		    crtc->base.base.id, crtc->base.name,
13088		    yesno(pipe_config->hw.enable), context);
13089
13090	if (!pipe_config->hw.enable)
13091		goto dump_planes;
13092
13093	snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
13094	drm_dbg_kms(&dev_priv->drm,
13095		    "active: %s, output_types: %s (0x%x), output format: %s\n",
13096		    yesno(pipe_config->hw.active),
13097		    buf, pipe_config->output_types,
13098		    output_formats(pipe_config->output_format));
13099
13100	drm_dbg_kms(&dev_priv->drm,
13101		    "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
13102		    transcoder_name(pipe_config->cpu_transcoder),
13103		    pipe_config->pipe_bpp, pipe_config->dither);
13104
13105	drm_dbg_kms(&dev_priv->drm,
13106		    "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
13107		    transcoder_name(pipe_config->master_transcoder),
13108		    pipe_config->sync_mode_slaves_mask);
13109
13110	if (pipe_config->has_pch_encoder)
13111		intel_dump_m_n_config(pipe_config, "fdi",
13112				      pipe_config->fdi_lanes,
13113				      &pipe_config->fdi_m_n);
13114
13115	if (intel_crtc_has_dp_encoder(pipe_config)) {
13116		intel_dump_m_n_config(pipe_config, "dp m_n",
13117				pipe_config->lane_count, &pipe_config->dp_m_n);
13118		if (pipe_config->has_drrs)
13119			intel_dump_m_n_config(pipe_config, "dp m2_n2",
13120					      pipe_config->lane_count,
13121					      &pipe_config->dp_m2_n2);
13122	}
13123
13124	drm_dbg_kms(&dev_priv->drm,
13125		    "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
13126		    pipe_config->has_audio, pipe_config->has_infoframe,
13127		    pipe_config->infoframes.enable);
13128
13129	if (pipe_config->infoframes.enable &
13130	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
13131		drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
13132			    pipe_config->infoframes.gcp);
13133	if (pipe_config->infoframes.enable &
13134	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
13135		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
13136	if (pipe_config->infoframes.enable &
13137	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
13138		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
13139	if (pipe_config->infoframes.enable &
13140	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
13141		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
13142	if (pipe_config->infoframes.enable &
13143	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
13144		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
13145	if (pipe_config->infoframes.enable &
13146	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
13147		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
13148	if (pipe_config->infoframes.enable &
13149	    intel_hdmi_infoframe_enable(DP_SDP_VSC))
13150		intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
13151
13152	drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
13153	drm_mode_debug_printmodeline(&pipe_config->hw.mode);
13154	drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
13155	drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
13156	intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
13157	drm_dbg_kms(&dev_priv->drm,
13158		    "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
13159		    pipe_config->port_clock,
13160		    pipe_config->pipe_src_w, pipe_config->pipe_src_h,
13161		    pipe_config->pixel_rate);
13162
13163	drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
13164		    pipe_config->linetime, pipe_config->ips_linetime);
13165
13166	if (INTEL_GEN(dev_priv) >= 9)
13167		drm_dbg_kms(&dev_priv->drm,
13168			    "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
13169			    crtc->num_scalers,
13170			    pipe_config->scaler_state.scaler_users,
13171			    pipe_config->scaler_state.scaler_id);
13172
13173	if (HAS_GMCH(dev_priv))
13174		drm_dbg_kms(&dev_priv->drm,
13175			    "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
13176			    pipe_config->gmch_pfit.control,
13177			    pipe_config->gmch_pfit.pgm_ratios,
13178			    pipe_config->gmch_pfit.lvds_border_bits);
13179	else
13180		drm_dbg_kms(&dev_priv->drm,
13181			    "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
13182			    DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
13183			    enableddisabled(pipe_config->pch_pfit.enabled),
13184			    yesno(pipe_config->pch_pfit.force_thru));
13185
13186	drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
13187		    pipe_config->ips_enabled, pipe_config->double_wide);
13188
13189	intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
13190
13191	if (IS_CHERRYVIEW(dev_priv))
13192		drm_dbg_kms(&dev_priv->drm,
13193			    "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
13194			    pipe_config->cgm_mode, pipe_config->gamma_mode,
13195			    pipe_config->gamma_enable, pipe_config->csc_enable);
13196	else
13197		drm_dbg_kms(&dev_priv->drm,
13198			    "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
13199			    pipe_config->csc_mode, pipe_config->gamma_mode,
13200			    pipe_config->gamma_enable, pipe_config->csc_enable);
13201
13202	drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
13203		    transcoder_name(pipe_config->mst_master_transcoder));
13204
13205dump_planes:
13206	if (!state)
13207		return;
13208
13209	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
13210		if (plane->pipe == crtc->pipe)
13211			intel_dump_plane_state(plane_state);
13212	}
13213}
13214
13215static bool check_digital_port_conflicts(struct intel_atomic_state *state)
13216{
13217	struct drm_device *dev = state->base.dev;
13218	struct drm_connector *connector;
13219	struct drm_connector_list_iter conn_iter;
13220	unsigned int used_ports = 0;
13221	unsigned int used_mst_ports = 0;
13222	bool ret = true;
13223
13224	/*
13225	 * We're going to peek into connector->state,
13226	 * hence connection_mutex must be held.
13227	 */
13228	drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
13229
13230	/*
13231	 * Walk the connector list instead of the encoder
13232	 * list to detect the problem on ddi platforms
13233	 * where there's just one encoder per digital port.
13234	 */
13235	drm_connector_list_iter_begin(dev, &conn_iter);
13236	drm_for_each_connector_iter(connector, &conn_iter) {
13237		struct drm_connector_state *connector_state;
13238		struct intel_encoder *encoder;
13239
13240		connector_state =
13241			drm_atomic_get_new_connector_state(&state->base,
13242							   connector);
13243		if (!connector_state)
13244			connector_state = connector->state;
13245
13246		if (!connector_state->best_encoder)
13247			continue;
13248
13249		encoder = to_intel_encoder(connector_state->best_encoder);
13250
13251		drm_WARN_ON(dev, !connector_state->crtc);
13252
13253		switch (encoder->type) {
13254		case INTEL_OUTPUT_DDI:
13255			if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
13256				break;
13257			fallthrough;
13258		case INTEL_OUTPUT_DP:
13259		case INTEL_OUTPUT_HDMI:
13260		case INTEL_OUTPUT_EDP:
13261			/* the same port mustn't appear more than once */
13262			if (used_ports & BIT(encoder->port))
13263				ret = false;
13264
13265			used_ports |= BIT(encoder->port);
13266			break;
13267		case INTEL_OUTPUT_DP_MST:
13268			used_mst_ports |=
13269				1 << encoder->port;
13270			break;
13271		default:
13272			break;
13273		}
13274	}
13275	drm_connector_list_iter_end(&conn_iter);
13276
13277	/* can't mix MST and SST/HDMI on the same port */
13278	if (used_ports & used_mst_ports)
13279		return false;
13280
13281	return ret;
13282}
13283
13284static void
13285intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_crtc_state *crtc_state)
13286{
13287	intel_crtc_copy_color_blobs(crtc_state);
13288}
13289
13290static void
13291intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state)
13292{
13293	crtc_state->hw.enable = crtc_state->uapi.enable;
13294	crtc_state->hw.active = crtc_state->uapi.active;
13295	crtc_state->hw.mode = crtc_state->uapi.mode;
13296	crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
13297	intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state);
13298}
13299
13300static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
13301{
13302	crtc_state->uapi.enable = crtc_state->hw.enable;
13303	crtc_state->uapi.active = crtc_state->hw.active;
13304	drm_WARN_ON(crtc_state->uapi.crtc->dev,
13305		    drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
13306
13307	crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
13308
13309	/* copy color blobs to uapi */
13310	drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
13311				  crtc_state->hw.degamma_lut);
13312	drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
13313				  crtc_state->hw.gamma_lut);
13314	drm_property_replace_blob(&crtc_state->uapi.ctm,
13315				  crtc_state->hw.ctm);
13316}
13317
13318static int
13319intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state)
13320{
13321	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
13322	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13323	struct intel_crtc_state *saved_state;
13324
13325	saved_state = intel_crtc_state_alloc(crtc);
13326	if (!saved_state)
13327		return -ENOMEM;
13328
13329	/* free the old crtc_state->hw members */
13330	intel_crtc_free_hw_state(crtc_state);
13331
13332	/* FIXME: before the switch to atomic started, a new pipe_config was
13333	 * kzalloc'd. Code that depends on any field being zero should be
13334	 * fixed, so that the crtc_state can be safely duplicated. For now,
13335	 * only fields that are know to not cause problems are preserved. */
13336
13337	saved_state->uapi = crtc_state->uapi;
13338	saved_state->inherited = crtc_state->inherited;
13339	saved_state->scaler_state = crtc_state->scaler_state;
13340	saved_state->shared_dpll = crtc_state->shared_dpll;
13341	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
13342	memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
13343	       sizeof(saved_state->icl_port_dplls));
13344	saved_state->crc_enabled = crtc_state->crc_enabled;
13345	if (IS_G4X(dev_priv) ||
13346	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13347		saved_state->wm = crtc_state->wm;
13348
13349	memcpy(crtc_state, saved_state, sizeof(*crtc_state));
13350	kfree(saved_state);
13351
13352	intel_crtc_copy_uapi_to_hw_state(crtc_state);
13353
13354	return 0;
13355}
13356
13357static int
13358intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
13359{
13360	struct drm_crtc *crtc = pipe_config->uapi.crtc;
13361	struct drm_atomic_state *state = pipe_config->uapi.state;
13362	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
13363	struct drm_connector *connector;
13364	struct drm_connector_state *connector_state;
13365	int base_bpp, ret, i;
13366	bool retry = true;
13367
13368	pipe_config->cpu_transcoder =
13369		(enum transcoder) to_intel_crtc(crtc)->pipe;
13370
13371	/*
13372	 * Sanitize sync polarity flags based on requested ones. If neither
13373	 * positive or negative polarity is requested, treat this as meaning
13374	 * negative polarity.
13375	 */
13376	if (!(pipe_config->hw.adjusted_mode.flags &
13377	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
13378		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
13379
13380	if (!(pipe_config->hw.adjusted_mode.flags &
13381	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
13382		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
13383
13384	ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
13385					pipe_config);
13386	if (ret)
13387		return ret;
13388
13389	base_bpp = pipe_config->pipe_bpp;
13390
13391	/*
13392	 * Determine the real pipe dimensions. Note that stereo modes can
13393	 * increase the actual pipe size due to the frame doubling and
13394	 * insertion of additional space for blanks between the frame. This
13395	 * is stored in the crtc timings. We use the requested mode to do this
13396	 * computation to clearly distinguish it from the adjusted mode, which
13397	 * can be changed by the connectors in the below retry loop.
13398	 */
13399	drm_mode_get_hv_timing(&pipe_config->hw.mode,
13400			       &pipe_config->pipe_src_w,
13401			       &pipe_config->pipe_src_h);
13402
13403	for_each_new_connector_in_state(state, connector, connector_state, i) {
13404		struct intel_encoder *encoder =
13405			to_intel_encoder(connector_state->best_encoder);
13406
13407		if (connector_state->crtc != crtc)
13408			continue;
13409
13410		if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
13411			drm_dbg_kms(&i915->drm,
13412				    "rejecting invalid cloning configuration\n");
13413			return -EINVAL;
13414		}
13415
13416		/*
13417		 * Determine output_types before calling the .compute_config()
13418		 * hooks so that the hooks can use this information safely.
13419		 */
13420		if (encoder->compute_output_type)
13421			pipe_config->output_types |=
13422				BIT(encoder->compute_output_type(encoder, pipe_config,
13423								 connector_state));
13424		else
13425			pipe_config->output_types |= BIT(encoder->type);
13426	}
13427
13428encoder_retry:
13429	/* Ensure the port clock defaults are reset when retrying. */
13430	pipe_config->port_clock = 0;
13431	pipe_config->pixel_multiplier = 1;
13432
13433	/* Fill in default crtc timings, allow encoders to overwrite them. */
13434	drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
13435			      CRTC_STEREO_DOUBLE);
13436
13437	/* Pass our mode to the connectors and the CRTC to give them a chance to
13438	 * adjust it according to limitations or connector properties, and also
13439	 * a chance to reject the mode entirely.
13440	 */
13441	for_each_new_connector_in_state(state, connector, connector_state, i) {
13442		struct intel_encoder *encoder =
13443			to_intel_encoder(connector_state->best_encoder);
13444
13445		if (connector_state->crtc != crtc)
13446			continue;
13447
13448		ret = encoder->compute_config(encoder, pipe_config,
13449					      connector_state);
13450		if (ret < 0) {
13451			if (ret != -EDEADLK)
13452				drm_dbg_kms(&i915->drm,
13453					    "Encoder config failure: %d\n",
13454					    ret);
13455			return ret;
13456		}
13457	}
13458
13459	/* Set default port clock if not overwritten by the encoder. Needs to be
13460	 * done afterwards in case the encoder adjusts the mode. */
13461	if (!pipe_config->port_clock)
13462		pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
13463			* pipe_config->pixel_multiplier;
13464
13465	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
13466	if (ret == -EDEADLK)
13467		return ret;
13468	if (ret < 0) {
13469		drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
13470		return ret;
13471	}
13472
13473	if (ret == RETRY) {
13474		if (drm_WARN(&i915->drm, !retry,
13475			     "loop in pipe configuration computation\n"))
13476			return -EINVAL;
13477
13478		drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
13479		retry = false;
13480		goto encoder_retry;
13481	}
13482
13483	/* Dithering seems to not pass-through bits correctly when it should, so
13484	 * only enable it on 6bpc panels and when its not a compliance
13485	 * test requesting 6bpc video pattern.
13486	 */
13487	pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
13488		!pipe_config->dither_force_disable;
13489	drm_dbg_kms(&i915->drm,
13490		    "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
13491		    base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
13492
13493	return 0;
13494}
13495
13496static int
13497intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
13498{
13499	struct intel_atomic_state *state =
13500		to_intel_atomic_state(crtc_state->uapi.state);
13501	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
13502	struct drm_connector_state *conn_state;
13503	struct drm_connector *connector;
13504	int i;
13505
13506	for_each_new_connector_in_state(&state->base, connector,
13507					conn_state, i) {
13508		struct intel_encoder *encoder =
13509			to_intel_encoder(conn_state->best_encoder);
13510		int ret;
13511
13512		if (conn_state->crtc != &crtc->base ||
13513		    !encoder->compute_config_late)
13514			continue;
13515
13516		ret = encoder->compute_config_late(encoder, crtc_state,
13517						   conn_state);
13518		if (ret)
13519			return ret;
13520	}
13521
13522	return 0;
13523}
13524
13525bool intel_fuzzy_clock_check(int clock1, int clock2)
13526{
13527	int diff;
13528
13529	if (clock1 == clock2)
13530		return true;
13531
13532	if (!clock1 || !clock2)
13533		return false;
13534
13535	diff = abs(clock1 - clock2);
13536
13537	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
13538		return true;
13539
13540	return false;
13541}
13542
13543static bool
13544intel_compare_m_n(unsigned int m, unsigned int n,
13545		  unsigned int m2, unsigned int n2,
13546		  bool exact)
13547{
13548	if (m == m2 && n == n2)
13549		return true;
13550
13551	if (exact || !m || !n || !m2 || !n2)
13552		return false;
13553
13554	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
13555
13556	if (n > n2) {
13557		while (n > n2) {
13558			m2 <<= 1;
13559			n2 <<= 1;
13560		}
13561	} else if (n < n2) {
13562		while (n < n2) {
13563			m <<= 1;
13564			n <<= 1;
13565		}
13566	}
13567
13568	if (n != n2)
13569		return false;
13570
13571	return intel_fuzzy_clock_check(m, m2);
13572}
13573
13574static bool
13575intel_compare_link_m_n(const struct intel_link_m_n *m_n,
13576		       const struct intel_link_m_n *m2_n2,
13577		       bool exact)
13578{
13579	return m_n->tu == m2_n2->tu &&
13580		intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
13581				  m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
13582		intel_compare_m_n(m_n->link_m, m_n->link_n,
13583				  m2_n2->link_m, m2_n2->link_n, exact);
13584}
13585
13586static bool
13587intel_compare_infoframe(const union hdmi_infoframe *a,
13588			const union hdmi_infoframe *b)
13589{
13590	return memcmp(a, b, sizeof(*a)) == 0;
13591}
13592
13593static bool
13594intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
13595			 const struct drm_dp_vsc_sdp *b)
13596{
13597	return memcmp(a, b, sizeof(*a)) == 0;
13598}
13599
13600static void
13601pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
13602			       bool fastset, const char *name,
13603			       const union hdmi_infoframe *a,
13604			       const union hdmi_infoframe *b)
13605{
13606	if (fastset) {
13607		if (!drm_debug_enabled(DRM_UT_KMS))
13608			return;
13609
13610		drm_dbg_kms(&dev_priv->drm,
13611			    "fastset mismatch in %s infoframe\n", name);
13612		drm_dbg_kms(&dev_priv->drm, "expected:\n");
13613		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
13614		drm_dbg_kms(&dev_priv->drm, "found:\n");
13615		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
13616	} else {
13617		drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
13618		drm_err(&dev_priv->drm, "expected:\n");
13619		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
13620		drm_err(&dev_priv->drm, "found:\n");
13621		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
13622	}
13623}
13624
13625static void
13626pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
13627				bool fastset, const char *name,
13628				const struct drm_dp_vsc_sdp *a,
13629				const struct drm_dp_vsc_sdp *b)
13630{
13631	if (fastset) {
13632		if (!drm_debug_enabled(DRM_UT_KMS))
13633			return;
13634
13635		drm_dbg_kms(&dev_priv->drm,
13636			    "fastset mismatch in %s dp sdp\n", name);
13637		drm_dbg_kms(&dev_priv->drm, "expected:\n");
13638		drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
13639		drm_dbg_kms(&dev_priv->drm, "found:\n");
13640		drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
13641	} else {
13642		drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
13643		drm_err(&dev_priv->drm, "expected:\n");
13644		drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
13645		drm_err(&dev_priv->drm, "found:\n");
13646		drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
13647	}
13648}
13649
13650static void __printf(4, 5)
13651pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
13652		     const char *name, const char *format, ...)
13653{
13654	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
13655	struct va_format vaf;
13656	va_list args;
13657
13658	va_start(args, format);
13659	vaf.fmt = format;
13660	vaf.va = &args;
13661
13662	if (fastset)
13663		drm_dbg_kms(&i915->drm,
13664			    "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
13665			    crtc->base.base.id, crtc->base.name, name, &vaf);
13666	else
13667		drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
13668			crtc->base.base.id, crtc->base.name, name, &vaf);
13669
13670	va_end(args);
13671}
13672
13673static bool fastboot_enabled(struct drm_i915_private *dev_priv)
13674{
13675	if (dev_priv->params.fastboot != -1)
13676		return dev_priv->params.fastboot;
13677
13678	/* Enable fastboot by default on Skylake and newer */
13679	if (INTEL_GEN(dev_priv) >= 9)
13680		return true;
13681
13682	/* Enable fastboot by default on VLV and CHV */
13683	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13684		return true;
13685
13686	/* Disabled by default on all others */
13687	return false;
13688}
13689
13690static bool
13691intel_pipe_config_compare(const struct intel_crtc_state *current_config,
13692			  const struct intel_crtc_state *pipe_config,
13693			  bool fastset)
13694{
13695	struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
13696	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
13697	bool ret = true;
13698	u32 bp_gamma = 0;
13699	bool fixup_inherited = fastset &&
13700		current_config->inherited && !pipe_config->inherited;
13701
13702	if (fixup_inherited && !fastboot_enabled(dev_priv)) {
13703		drm_dbg_kms(&dev_priv->drm,
13704			    "initial modeset and fastboot not set\n");
13705		ret = false;
13706	}
13707
13708#define PIPE_CONF_CHECK_X(name) do { \
13709	if (current_config->name != pipe_config->name) { \
13710		pipe_config_mismatch(fastset, crtc, __stringify(name), \
13711				     "(expected 0x%08x, found 0x%08x)", \
13712				     current_config->name, \
13713				     pipe_config->name); \
13714		ret = false; \
13715	} \
13716} while (0)
13717
13718#define PIPE_CONF_CHECK_I(name) do { \
13719	if (current_config->name != pipe_config->name) { \
13720		pipe_config_mismatch(fastset, crtc, __stringify(name), \
13721				     "(expected %i, found %i)", \
13722				     current_config->name, \
13723				     pipe_config->name); \
13724		ret = false; \
13725	} \
13726} while (0)
13727
13728#define PIPE_CONF_CHECK_BOOL(name) do { \
13729	if (current_config->name != pipe_config->name) { \
13730		pipe_config_mismatch(fastset, crtc,  __stringify(name), \
13731				     "(expected %s, found %s)", \
13732				     yesno(current_config->name), \
13733				     yesno(pipe_config->name)); \
13734		ret = false; \
13735	} \
13736} while (0)
13737
13738/*
13739 * Checks state where we only read out the enabling, but not the entire
13740 * state itself (like full infoframes or ELD for audio). These states
13741 * require a full modeset on bootup to fix up.
13742 */
13743#define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
13744	if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
13745		PIPE_CONF_CHECK_BOOL(name); \
13746	} else { \
13747		pipe_config_mismatch(fastset, crtc, __stringify(name), \
13748				     "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
13749				     yesno(current_config->name), \
13750				     yesno(pipe_config->name)); \
13751		ret = false; \
13752	} \
13753} while (0)
13754
13755#define PIPE_CONF_CHECK_P(name) do { \
13756	if (current_config->name != pipe_config->name) { \
13757		pipe_config_mismatch(fastset, crtc, __stringify(name), \
13758				     "(expected %p, found %p)", \
13759				     current_config->name, \
13760				     pipe_config->name); \
13761		ret = false; \
13762	} \
13763} while (0)
13764
13765#define PIPE_CONF_CHECK_M_N(name) do { \
13766	if (!intel_compare_link_m_n(&current_config->name, \
13767				    &pipe_config->name,\
13768				    !fastset)) { \
13769		pipe_config_mismatch(fastset, crtc, __stringify(name), \
13770				     "(expected tu %i gmch %i/%i link %i/%i, " \
13771				     "found tu %i, gmch %i/%i link %i/%i)", \
13772				     current_config->name.tu, \
13773				     current_config->name.gmch_m, \
13774				     current_config->name.gmch_n, \
13775				     current_config->name.link_m, \
13776				     current_config->name.link_n, \
13777				     pipe_config->name.tu, \
13778				     pipe_config->name.gmch_m, \
13779				     pipe_config->name.gmch_n, \
13780				     pipe_config->name.link_m, \
13781				     pipe_config->name.link_n); \
13782		ret = false; \
13783	} \
13784} while (0)
13785
13786/* This is required for BDW+ where there is only one set of registers for
13787 * switching between high and low RR.
13788 * This macro can be used whenever a comparison has to be made between one
13789 * hw state and multiple sw state variables.
13790 */
13791#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
13792	if (!intel_compare_link_m_n(&current_config->name, \
13793				    &pipe_config->name, !fastset) && \
13794	    !intel_compare_link_m_n(&current_config->alt_name, \
13795				    &pipe_config->name, !fastset)) { \
13796		pipe_config_mismatch(fastset, crtc, __stringify(name), \
13797				     "(expected tu %i gmch %i/%i link %i/%i, " \
13798				     "or tu %i gmch %i/%i link %i/%i, " \
13799				     "found tu %i, gmch %i/%i link %i/%i)", \
13800				     current_config->name.tu, \
13801				     current_config->name.gmch_m, \
13802				     current_config->name.gmch_n, \
13803				     current_config->name.link_m, \
13804				     current_config->name.link_n, \
13805				     current_config->alt_name.tu, \
13806				     current_config->alt_name.gmch_m, \
13807				     current_config->alt_name.gmch_n, \
13808				     current_config->alt_name.link_m, \
13809				     current_config->alt_name.link_n, \
13810				     pipe_config->name.tu, \
13811				     pipe_config->name.gmch_m, \
13812				     pipe_config->name.gmch_n, \
13813				     pipe_config->name.link_m, \
13814				     pipe_config->name.link_n); \
13815		ret = false; \
13816	} \
13817} while (0)
13818
13819#define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
13820	if ((current_config->name ^ pipe_config->name) & (mask)) { \
13821		pipe_config_mismatch(fastset, crtc, __stringify(name), \
13822				     "(%x) (expected %i, found %i)", \
13823				     (mask), \
13824				     current_config->name & (mask), \
13825				     pipe_config->name & (mask)); \
13826		ret = false; \
13827	} \
13828} while (0)
13829
13830#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
13831	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
13832		pipe_config_mismatch(fastset, crtc, __stringify(name), \
13833				     "(expected %i, found %i)", \
13834				     current_config->name, \
13835				     pipe_config->name); \
13836		ret = false; \
13837	} \
13838} while (0)
13839
13840#define PIPE_CONF_CHECK_INFOFRAME(name) do { \
13841	if (!intel_compare_infoframe(&current_config->infoframes.name, \
13842				     &pipe_config->infoframes.name)) { \
13843		pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
13844					       &current_config->infoframes.name, \
13845					       &pipe_config->infoframes.name); \
13846		ret = false; \
13847	} \
13848} while (0)
13849
13850#define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
13851	if (!current_config->has_psr && !pipe_config->has_psr && \
13852	    !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
13853				      &pipe_config->infoframes.name)) { \
13854		pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
13855						&current_config->infoframes.name, \
13856						&pipe_config->infoframes.name); \
13857		ret = false; \
13858	} \
13859} while (0)
13860
13861#define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
13862	if (current_config->name1 != pipe_config->name1) { \
13863		pipe_config_mismatch(fastset, crtc, __stringify(name1), \
13864				"(expected %i, found %i, won't compare lut values)", \
13865				current_config->name1, \
13866				pipe_config->name1); \
13867		ret = false;\
13868	} else { \
13869		if (!intel_color_lut_equal(current_config->name2, \
13870					pipe_config->name2, pipe_config->name1, \
13871					bit_precision)) { \
13872			pipe_config_mismatch(fastset, crtc, __stringify(name2), \
13873					"hw_state doesn't match sw_state"); \
13874			ret = false; \
13875		} \
13876	} \
13877} while (0)
13878
13879#define PIPE_CONF_QUIRK(quirk) \
13880	((current_config->quirks | pipe_config->quirks) & (quirk))
13881
13882	PIPE_CONF_CHECK_I(cpu_transcoder);
13883
13884	PIPE_CONF_CHECK_BOOL(has_pch_encoder);
13885	PIPE_CONF_CHECK_I(fdi_lanes);
13886	PIPE_CONF_CHECK_M_N(fdi_m_n);
13887
13888	PIPE_CONF_CHECK_I(lane_count);
13889	PIPE_CONF_CHECK_X(lane_lat_optim_mask);
13890
13891	if (INTEL_GEN(dev_priv) < 8) {
13892		PIPE_CONF_CHECK_M_N(dp_m_n);
13893
13894		if (current_config->has_drrs)
13895			PIPE_CONF_CHECK_M_N(dp_m2_n2);
13896	} else
13897		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
13898
13899	PIPE_CONF_CHECK_X(output_types);
13900
13901	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
13902	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
13903	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
13904	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
13905	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
13906	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
13907
13908	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
13909	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
13910	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
13911	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
13912	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
13913	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
13914
13915	PIPE_CONF_CHECK_I(pixel_multiplier);
13916	PIPE_CONF_CHECK_I(output_format);
13917	PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
13918	if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
13919	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13920		PIPE_CONF_CHECK_BOOL(limited_color_range);
13921
13922	PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
13923	PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
13924	PIPE_CONF_CHECK_BOOL(has_infoframe);
13925	PIPE_CONF_CHECK_BOOL(fec_enable);
13926
13927	PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
13928
13929	PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13930			      DRM_MODE_FLAG_INTERLACE);
13931
13932	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
13933		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13934				      DRM_MODE_FLAG_PHSYNC);
13935		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13936				      DRM_MODE_FLAG_NHSYNC);
13937		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13938				      DRM_MODE_FLAG_PVSYNC);
13939		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13940				      DRM_MODE_FLAG_NVSYNC);
13941	}
13942
13943	PIPE_CONF_CHECK_X(gmch_pfit.control);
13944	/* pfit ratios are autocomputed by the hw on gen4+ */
13945	if (INTEL_GEN(dev_priv) < 4)
13946		PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
13947	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
13948
13949	/*
13950	 * Changing the EDP transcoder input mux
13951	 * (A_ONOFF vs. A_ON) requires a full modeset.
13952	 */
13953	PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
13954
13955	if (!fastset) {
13956		PIPE_CONF_CHECK_I(pipe_src_w);
13957		PIPE_CONF_CHECK_I(pipe_src_h);
13958
13959		PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
13960		if (current_config->pch_pfit.enabled) {
13961			PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
13962			PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
13963			PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
13964			PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
13965		}
13966
13967		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
13968		PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
13969
13970		PIPE_CONF_CHECK_X(gamma_mode);
13971		if (IS_CHERRYVIEW(dev_priv))
13972			PIPE_CONF_CHECK_X(cgm_mode);
13973		else
13974			PIPE_CONF_CHECK_X(csc_mode);
13975		PIPE_CONF_CHECK_BOOL(gamma_enable);
13976		PIPE_CONF_CHECK_BOOL(csc_enable);
13977
13978		PIPE_CONF_CHECK_I(linetime);
13979		PIPE_CONF_CHECK_I(ips_linetime);
13980
13981		bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
13982		if (bp_gamma)
13983			PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
13984	}
13985
13986	PIPE_CONF_CHECK_BOOL(double_wide);
13987
13988	PIPE_CONF_CHECK_P(shared_dpll);
13989	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
13990	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
13991	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
13992	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
13993	PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
13994	PIPE_CONF_CHECK_X(dpll_hw_state.spll);
13995	PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
13996	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
13997	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
13998	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
13999	PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
14000	PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
14001	PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
14002	PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
14003	PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
14004	PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
14005	PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
14006	PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
14007	PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
14008	PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
14009	PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
14010	PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
14011	PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
14012	PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
14013	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
14014	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
14015	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
14016	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
14017	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
14018	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
14019	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
14020
14021	PIPE_CONF_CHECK_X(dsi_pll.ctrl);
14022	PIPE_CONF_CHECK_X(dsi_pll.div);
14023
14024	if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
14025		PIPE_CONF_CHECK_I(pipe_bpp);
14026
14027	PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
14028	PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
14029
14030	PIPE_CONF_CHECK_I(min_voltage_level);
14031
14032	PIPE_CONF_CHECK_X(infoframes.enable);
14033	PIPE_CONF_CHECK_X(infoframes.gcp);
14034	PIPE_CONF_CHECK_INFOFRAME(avi);
14035	PIPE_CONF_CHECK_INFOFRAME(spd);
14036	PIPE_CONF_CHECK_INFOFRAME(hdmi);
14037	PIPE_CONF_CHECK_INFOFRAME(drm);
14038	PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
14039
14040	PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
14041	PIPE_CONF_CHECK_I(master_transcoder);
14042
14043	PIPE_CONF_CHECK_I(dsc.compression_enable);
14044	PIPE_CONF_CHECK_I(dsc.dsc_split);
14045	PIPE_CONF_CHECK_I(dsc.compressed_bpp);
14046
14047	PIPE_CONF_CHECK_I(mst_master_transcoder);
14048
14049#undef PIPE_CONF_CHECK_X
14050#undef PIPE_CONF_CHECK_I
14051#undef PIPE_CONF_CHECK_BOOL
14052#undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
14053#undef PIPE_CONF_CHECK_P
14054#undef PIPE_CONF_CHECK_FLAGS
14055#undef PIPE_CONF_CHECK_CLOCK_FUZZY
14056#undef PIPE_CONF_CHECK_COLOR_LUT
14057#undef PIPE_CONF_QUIRK
14058
14059	return ret;
14060}
14061
14062static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
14063					   const struct intel_crtc_state *pipe_config)
14064{
14065	if (pipe_config->has_pch_encoder) {
14066		int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
14067							    &pipe_config->fdi_m_n);
14068		int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
14069
14070		/*
14071		 * FDI already provided one idea for the dotclock.
14072		 * Yell if the encoder disagrees.
14073		 */
14074		drm_WARN(&dev_priv->drm,
14075			 !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
14076			 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
14077			 fdi_dotclock, dotclock);
14078	}
14079}
14080
14081static void verify_wm_state(struct intel_crtc *crtc,
14082			    struct intel_crtc_state *new_crtc_state)
14083{
14084	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14085	struct skl_hw_state {
14086		struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
14087		struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
14088		struct skl_pipe_wm wm;
14089	} *hw;
14090	struct skl_pipe_wm *sw_wm;
14091	struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
14092	u8 hw_enabled_slices;
14093	const enum pipe pipe = crtc->pipe;
14094	int plane, level, max_level = ilk_wm_max_level(dev_priv);
14095
14096	if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
14097		return;
14098
14099	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
14100	if (!hw)
14101		return;
14102
14103	skl_pipe_wm_get_hw_state(crtc, &hw->wm);
14104	sw_wm = &new_crtc_state->wm.skl.optimal;
14105
14106	skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
14107
14108	hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
14109
14110	if (INTEL_GEN(dev_priv) >= 11 &&
14111	    hw_enabled_slices != dev_priv->dbuf.enabled_slices)
14112		drm_err(&dev_priv->drm,
14113			"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
14114			dev_priv->dbuf.enabled_slices,
14115			hw_enabled_slices);
14116
14117	/* planes */
14118	for_each_universal_plane(dev_priv, pipe, plane) {
14119		struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
14120
14121		hw_plane_wm = &hw->wm.planes[plane];
14122		sw_plane_wm = &sw_wm->planes[plane];
14123
14124		/* Watermarks */
14125		for (level = 0; level <= max_level; level++) {
14126			if (skl_wm_level_equals(&hw_plane_wm->wm[level],
14127						&sw_plane_wm->wm[level]) ||
14128			    (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
14129							       &sw_plane_wm->sagv_wm0)))
14130				continue;
14131
14132			drm_err(&dev_priv->drm,
14133				"mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
14134				pipe_name(pipe), plane + 1, level,
14135				sw_plane_wm->wm[level].plane_en,
14136				sw_plane_wm->wm[level].plane_res_b,
14137				sw_plane_wm->wm[level].plane_res_l,
14138				hw_plane_wm->wm[level].plane_en,
14139				hw_plane_wm->wm[level].plane_res_b,
14140				hw_plane_wm->wm[level].plane_res_l);
14141		}
14142
14143		if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
14144					 &sw_plane_wm->trans_wm)) {
14145			drm_err(&dev_priv->drm,
14146				"mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
14147				pipe_name(pipe), plane + 1,
14148				sw_plane_wm->trans_wm.plane_en,
14149				sw_plane_wm->trans_wm.plane_res_b,
14150				sw_plane_wm->trans_wm.plane_res_l,
14151				hw_plane_wm->trans_wm.plane_en,
14152				hw_plane_wm->trans_wm.plane_res_b,
14153				hw_plane_wm->trans_wm.plane_res_l);
14154		}
14155
14156		/* DDB */
14157		hw_ddb_entry = &hw->ddb_y[plane];
14158		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
14159
14160		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
14161			drm_err(&dev_priv->drm,
14162				"mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
14163				pipe_name(pipe), plane + 1,
14164				sw_ddb_entry->start, sw_ddb_entry->end,
14165				hw_ddb_entry->start, hw_ddb_entry->end);
14166		}
14167	}
14168
14169	/*
14170	 * cursor
14171	 * If the cursor plane isn't active, we may not have updated it's ddb
14172	 * allocation. In that case since the ddb allocation will be updated
14173	 * once the plane becomes visible, we can skip this check
14174	 */
14175	if (1) {
14176		struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
14177
14178		hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
14179		sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
14180
14181		/* Watermarks */
14182		for (level = 0; level <= max_level; level++) {
14183			if (skl_wm_level_equals(&hw_plane_wm->wm[level],
14184						&sw_plane_wm->wm[level]) ||
14185			    (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
14186							       &sw_plane_wm->sagv_wm0)))
14187				continue;
14188
14189			drm_err(&dev_priv->drm,
14190				"mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
14191				pipe_name(pipe), level,
14192				sw_plane_wm->wm[level].plane_en,
14193				sw_plane_wm->wm[level].plane_res_b,
14194				sw_plane_wm->wm[level].plane_res_l,
14195				hw_plane_wm->wm[level].plane_en,
14196				hw_plane_wm->wm[level].plane_res_b,
14197				hw_plane_wm->wm[level].plane_res_l);
14198		}
14199
14200		if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
14201					 &sw_plane_wm->trans_wm)) {
14202			drm_err(&dev_priv->drm,
14203				"mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
14204				pipe_name(pipe),
14205				sw_plane_wm->trans_wm.plane_en,
14206				sw_plane_wm->trans_wm.plane_res_b,
14207				sw_plane_wm->trans_wm.plane_res_l,
14208				hw_plane_wm->trans_wm.plane_en,
14209				hw_plane_wm->trans_wm.plane_res_b,
14210				hw_plane_wm->trans_wm.plane_res_l);
14211		}
14212
14213		/* DDB */
14214		hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
14215		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
14216
14217		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
14218			drm_err(&dev_priv->drm,
14219				"mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
14220				pipe_name(pipe),
14221				sw_ddb_entry->start, sw_ddb_entry->end,
14222				hw_ddb_entry->start, hw_ddb_entry->end);
14223		}
14224	}
14225
14226	kfree(hw);
14227}
14228
14229static void
14230verify_connector_state(struct intel_atomic_state *state,
14231		       struct intel_crtc *crtc)
14232{
14233	struct drm_connector *connector;
14234	struct drm_connector_state *new_conn_state;
14235	int i;
14236
14237	for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
14238		struct drm_encoder *encoder = connector->encoder;
14239		struct intel_crtc_state *crtc_state = NULL;
14240
14241		if (new_conn_state->crtc != &crtc->base)
14242			continue;
14243
14244		if (crtc)
14245			crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
14246
14247		intel_connector_verify_state(crtc_state, new_conn_state);
14248
14249		I915_STATE_WARN(new_conn_state->best_encoder != encoder,
14250		     "connector's atomic encoder doesn't match legacy encoder\n");
14251	}
14252}
14253
14254static void
14255verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
14256{
14257	struct intel_encoder *encoder;
14258	struct drm_connector *connector;
14259	struct drm_connector_state *old_conn_state, *new_conn_state;
14260	int i;
14261
14262	for_each_intel_encoder(&dev_priv->drm, encoder) {
14263		bool enabled = false, found = false;
14264		enum pipe pipe;
14265
14266		drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
14267			    encoder->base.base.id,
14268			    encoder->base.name);
14269
14270		for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
14271						   new_conn_state, i) {
14272			if (old_conn_state->best_encoder == &encoder->base)
14273				found = true;
14274
14275			if (new_conn_state->best_encoder != &encoder->base)
14276				continue;
14277			found = enabled = true;
14278
14279			I915_STATE_WARN(new_conn_state->crtc !=
14280					encoder->base.crtc,
14281			     "connector's crtc doesn't match encoder crtc\n");
14282		}
14283
14284		if (!found)
14285			continue;
14286
14287		I915_STATE_WARN(!!encoder->base.crtc != enabled,
14288		     "encoder's enabled state mismatch "
14289		     "(expected %i, found %i)\n",
14290		     !!encoder->base.crtc, enabled);
14291
14292		if (!encoder->base.crtc) {
14293			bool active;
14294
14295			active = encoder->get_hw_state(encoder, &pipe);
14296			I915_STATE_WARN(active,
14297			     "encoder detached but still enabled on pipe %c.\n",
14298			     pipe_name(pipe));
14299		}
14300	}
14301}
14302
14303static void
14304verify_crtc_state(struct intel_crtc *crtc,
14305		  struct intel_crtc_state *old_crtc_state,
14306		  struct intel_crtc_state *new_crtc_state)
14307{
14308	struct drm_device *dev = crtc->base.dev;
14309	struct drm_i915_private *dev_priv = to_i915(dev);
14310	struct intel_encoder *encoder;
14311	struct intel_crtc_state *pipe_config = old_crtc_state;
14312	struct drm_atomic_state *state = old_crtc_state->uapi.state;
14313
14314	__drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
14315	intel_crtc_free_hw_state(old_crtc_state);
14316	intel_crtc_state_reset(old_crtc_state, crtc);
14317	old_crtc_state->uapi.state = state;
14318
14319	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
14320		    crtc->base.name);
14321
14322	pipe_config->hw.enable = new_crtc_state->hw.enable;
14323
14324	pipe_config->hw.active =
14325		dev_priv->display.get_pipe_config(crtc, pipe_config);
14326
14327	/* we keep both pipes enabled on 830 */
14328	if (IS_I830(dev_priv) && pipe_config->hw.active)
14329		pipe_config->hw.active = new_crtc_state->hw.active;
14330
14331	I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
14332			"crtc active state doesn't match with hw state "
14333			"(expected %i, found %i)\n",
14334			new_crtc_state->hw.active, pipe_config->hw.active);
14335
14336	I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
14337			"transitional active state does not match atomic hw state "
14338			"(expected %i, found %i)\n",
14339			new_crtc_state->hw.active, crtc->active);
14340
14341	for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
14342		enum pipe pipe;
14343		bool active;
14344
14345		active = encoder->get_hw_state(encoder, &pipe);
14346		I915_STATE_WARN(active != new_crtc_state->hw.active,
14347				"[ENCODER:%i] active %i with crtc active %i\n",
14348				encoder->base.base.id, active,
14349				new_crtc_state->hw.active);
14350
14351		I915_STATE_WARN(active && crtc->pipe != pipe,
14352				"Encoder connected to wrong pipe %c\n",
14353				pipe_name(pipe));
14354
14355		if (active)
14356			encoder->get_config(encoder, pipe_config);
14357	}
14358
14359	intel_crtc_compute_pixel_rate(pipe_config);
14360
14361	if (!new_crtc_state->hw.active)
14362		return;
14363
14364	intel_pipe_config_sanity_check(dev_priv, pipe_config);
14365
14366	if (!intel_pipe_config_compare(new_crtc_state,
14367				       pipe_config, false)) {
14368		I915_STATE_WARN(1, "pipe state doesn't match!\n");
14369		intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
14370		intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
14371	}
14372}
14373
14374static void
14375intel_verify_planes(struct intel_atomic_state *state)
14376{
14377	struct intel_plane *plane;
14378	const struct intel_plane_state *plane_state;
14379	int i;
14380
14381	for_each_new_intel_plane_in_state(state, plane,
14382					  plane_state, i)
14383		assert_plane(plane, plane_state->planar_slave ||
14384			     plane_state->uapi.visible);
14385}
14386
14387static void
14388verify_single_dpll_state(struct drm_i915_private *dev_priv,
14389			 struct intel_shared_dpll *pll,
14390			 struct intel_crtc *crtc,
14391			 struct intel_crtc_state *new_crtc_state)
14392{
14393	struct intel_dpll_hw_state dpll_hw_state;
14394	unsigned int crtc_mask;
14395	bool active;
14396
14397	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
14398
14399	drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
14400
14401	active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
14402
14403	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
14404		I915_STATE_WARN(!pll->on && pll->active_mask,
14405		     "pll in active use but not on in sw tracking\n");
14406		I915_STATE_WARN(pll->on && !pll->active_mask,
14407		     "pll is on but not used by any active crtc\n");
14408		I915_STATE_WARN(pll->on != active,
14409		     "pll on state mismatch (expected %i, found %i)\n",
14410		     pll->on, active);
14411	}
14412
14413	if (!crtc) {
14414		I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
14415				"more active pll users than references: %x vs %x\n",
14416				pll->active_mask, pll->state.crtc_mask);
14417
14418		return;
14419	}
14420
14421	crtc_mask = drm_crtc_mask(&crtc->base);
14422
14423	if (new_crtc_state->hw.active)
14424		I915_STATE_WARN(!(pll->active_mask & crtc_mask),
14425				"pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
14426				pipe_name(crtc->pipe), pll->active_mask);
14427	else
14428		I915_STATE_WARN(pll->active_mask & crtc_mask,
14429				"pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
14430				pipe_name(crtc->pipe), pll->active_mask);
14431
14432	I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
14433			"pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
14434			crtc_mask, pll->state.crtc_mask);
14435
14436	I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
14437					  &dpll_hw_state,
14438					  sizeof(dpll_hw_state)),
14439			"pll hw state mismatch\n");
14440}
14441
14442static void
14443verify_shared_dpll_state(struct intel_crtc *crtc,
14444			 struct intel_crtc_state *old_crtc_state,
14445			 struct intel_crtc_state *new_crtc_state)
14446{
14447	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14448
14449	if (new_crtc_state->shared_dpll)
14450		verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
14451
14452	if (old_crtc_state->shared_dpll &&
14453	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
14454		unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
14455		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
14456
14457		I915_STATE_WARN(pll->active_mask & crtc_mask,
14458				"pll active mismatch (didn't expect pipe %c in active mask)\n",
14459				pipe_name(crtc->pipe));
14460		I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
14461				"pll enabled crtcs mismatch (found %x in enabled mask)\n",
14462				pipe_name(crtc->pipe));
14463	}
14464}
14465
14466static void
14467intel_modeset_verify_crtc(struct intel_crtc *crtc,
14468			  struct intel_atomic_state *state,
14469			  struct intel_crtc_state *old_crtc_state,
14470			  struct intel_crtc_state *new_crtc_state)
14471{
14472	if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
14473		return;
14474
14475	verify_wm_state(crtc, new_crtc_state);
14476	verify_connector_state(state, crtc);
14477	verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
14478	verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
14479}
14480
14481static void
14482verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
14483{
14484	int i;
14485
14486	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
14487		verify_single_dpll_state(dev_priv,
14488					 &dev_priv->dpll.shared_dplls[i],
14489					 NULL, NULL);
14490}
14491
14492static void
14493intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
14494			      struct intel_atomic_state *state)
14495{
14496	verify_encoder_state(dev_priv, state);
14497	verify_connector_state(state, NULL);
14498	verify_disabled_dpll_state(dev_priv);
14499}
14500
14501static void
14502intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
14503{
14504	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
14505	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14506	const struct drm_display_mode *adjusted_mode =
14507		&crtc_state->hw.adjusted_mode;
14508
14509	drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
14510
14511	crtc->mode_flags = crtc_state->mode_flags;
14512
14513	/*
14514	 * The scanline counter increments at the leading edge of hsync.
14515	 *
14516	 * On most platforms it starts counting from vtotal-1 on the
14517	 * first active line. That means the scanline counter value is
14518	 * always one less than what we would expect. Ie. just after
14519	 * start of vblank, which also occurs at start of hsync (on the
14520	 * last active line), the scanline counter will read vblank_start-1.
14521	 *
14522	 * On gen2 the scanline counter starts counting from 1 instead
14523	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
14524	 * to keep the value positive), instead of adding one.
14525	 *
14526	 * On HSW+ the behaviour of the scanline counter depends on the output
14527	 * type. For DP ports it behaves like most other platforms, but on HDMI
14528	 * there's an extra 1 line difference. So we need to add two instead of
14529	 * one to the value.
14530	 *
14531	 * On VLV/CHV DSI the scanline counter would appear to increment
14532	 * approx. 1/3 of a scanline before start of vblank. Unfortunately
14533	 * that means we can't tell whether we're in vblank or not while
14534	 * we're on that particular line. We must still set scanline_offset
14535	 * to 1 so that the vblank timestamps come out correct when we query
14536	 * the scanline counter from within the vblank interrupt handler.
14537	 * However if queried just before the start of vblank we'll get an
14538	 * answer that's slightly in the future.
14539	 */
14540	if (IS_GEN(dev_priv, 2)) {
14541		int vtotal;
14542
14543		vtotal = adjusted_mode->crtc_vtotal;
14544		if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
14545			vtotal /= 2;
14546
14547		crtc->scanline_offset = vtotal - 1;
14548	} else if (HAS_DDI(dev_priv) &&
14549		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
14550		crtc->scanline_offset = 2;
14551	} else {
14552		crtc->scanline_offset = 1;
14553	}
14554}
14555
14556static void intel_modeset_clear_plls(struct intel_atomic_state *state)
14557{
14558	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14559	struct intel_crtc_state *new_crtc_state;
14560	struct intel_crtc *crtc;
14561	int i;
14562
14563	if (!dev_priv->display.crtc_compute_clock)
14564		return;
14565
14566	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14567		if (!needs_modeset(new_crtc_state))
14568			continue;
14569
14570		intel_release_shared_dplls(state, crtc);
14571	}
14572}
14573
14574/*
14575 * This implements the workaround described in the "notes" section of the mode
14576 * set sequence documentation. When going from no pipes or single pipe to
14577 * multiple pipes, and planes are enabled after the pipe, we need to wait at
14578 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
14579 */
14580static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
14581{
14582	struct intel_crtc_state *crtc_state;
14583	struct intel_crtc *crtc;
14584	struct intel_crtc_state *first_crtc_state = NULL;
14585	struct intel_crtc_state *other_crtc_state = NULL;
14586	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
14587	int i;
14588
14589	/* look at all crtc's that are going to be enabled in during modeset */
14590	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14591		if (!crtc_state->hw.active ||
14592		    !needs_modeset(crtc_state))
14593			continue;
14594
14595		if (first_crtc_state) {
14596			other_crtc_state = crtc_state;
14597			break;
14598		} else {
14599			first_crtc_state = crtc_state;
14600			first_pipe = crtc->pipe;
14601		}
14602	}
14603
14604	/* No workaround needed? */
14605	if (!first_crtc_state)
14606		return 0;
14607
14608	/* w/a possibly needed, check how many crtc's are already enabled. */
14609	for_each_intel_crtc(state->base.dev, crtc) {
14610		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
14611		if (IS_ERR(crtc_state))
14612			return PTR_ERR(crtc_state);
14613
14614		crtc_state->hsw_workaround_pipe = INVALID_PIPE;
14615
14616		if (!crtc_state->hw.active ||
14617		    needs_modeset(crtc_state))
14618			continue;
14619
14620		/* 2 or more enabled crtcs means no need for w/a */
14621		if (enabled_pipe != INVALID_PIPE)
14622			return 0;
14623
14624		enabled_pipe = crtc->pipe;
14625	}
14626
14627	if (enabled_pipe != INVALID_PIPE)
14628		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
14629	else if (other_crtc_state)
14630		other_crtc_state->hsw_workaround_pipe = first_pipe;
14631
14632	return 0;
14633}
14634
14635u8 intel_calc_active_pipes(struct intel_atomic_state *state,
14636			   u8 active_pipes)
14637{
14638	const struct intel_crtc_state *crtc_state;
14639	struct intel_crtc *crtc;
14640	int i;
14641
14642	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14643		if (crtc_state->hw.active)
14644			active_pipes |= BIT(crtc->pipe);
14645		else
14646			active_pipes &= ~BIT(crtc->pipe);
14647	}
14648
14649	return active_pipes;
14650}
14651
14652static int intel_modeset_checks(struct intel_atomic_state *state)
14653{
14654	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14655
14656	state->modeset = true;
14657
14658	if (IS_HASWELL(dev_priv))
14659		return hsw_mode_set_planes_workaround(state);
14660
14661	return 0;
14662}
14663
14664/*
14665 * Handle calculation of various watermark data at the end of the atomic check
14666 * phase.  The code here should be run after the per-crtc and per-plane 'check'
14667 * handlers to ensure that all derived state has been updated.
14668 */
14669static int calc_watermark_data(struct intel_atomic_state *state)
14670{
14671	struct drm_device *dev = state->base.dev;
14672	struct drm_i915_private *dev_priv = to_i915(dev);
14673
14674	/* Is there platform-specific watermark information to calculate? */
14675	if (dev_priv->display.compute_global_watermarks)
14676		return dev_priv->display.compute_global_watermarks(state);
14677
14678	return 0;
14679}
14680
14681static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
14682				     struct intel_crtc_state *new_crtc_state)
14683{
14684	if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
14685		return;
14686
14687	new_crtc_state->uapi.mode_changed = false;
14688	new_crtc_state->update_pipe = true;
14689}
14690
14691static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
14692				    struct intel_crtc_state *new_crtc_state)
14693{
14694	/*
14695	 * If we're not doing the full modeset we want to
14696	 * keep the current M/N values as they may be
14697	 * sufficiently different to the computed values
14698	 * to cause problems.
14699	 *
14700	 * FIXME: should really copy more fuzzy state here
14701	 */
14702	new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
14703	new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
14704	new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
14705	new_crtc_state->has_drrs = old_crtc_state->has_drrs;
14706}
14707
14708static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
14709					  struct intel_crtc *crtc,
14710					  u8 plane_ids_mask)
14711{
14712	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14713	struct intel_plane *plane;
14714
14715	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
14716		struct intel_plane_state *plane_state;
14717
14718		if ((plane_ids_mask & BIT(plane->id)) == 0)
14719			continue;
14720
14721		plane_state = intel_atomic_get_plane_state(state, plane);
14722		if (IS_ERR(plane_state))
14723			return PTR_ERR(plane_state);
14724	}
14725
14726	return 0;
14727}
14728
14729static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
14730{
14731	/* See {hsw,vlv,ivb}_plane_ratio() */
14732	return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
14733		IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
14734		IS_IVYBRIDGE(dev_priv) || (INTEL_GEN(dev_priv) >= 11);
14735}
14736
14737static int intel_atomic_check_planes(struct intel_atomic_state *state)
14738{
14739	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14740	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14741	struct intel_plane_state *plane_state;
14742	struct intel_plane *plane;
14743	struct intel_crtc *crtc;
14744	int i, ret;
14745
14746	ret = icl_add_linked_planes(state);
14747	if (ret)
14748		return ret;
14749
14750	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
14751		ret = intel_plane_atomic_check(state, plane);
14752		if (ret) {
14753			drm_dbg_atomic(&dev_priv->drm,
14754				       "[PLANE:%d:%s] atomic driver check failed\n",
14755				       plane->base.base.id, plane->base.name);
14756			return ret;
14757		}
14758	}
14759
14760	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14761					    new_crtc_state, i) {
14762		u8 old_active_planes, new_active_planes;
14763
14764		ret = icl_check_nv12_planes(new_crtc_state);
14765		if (ret)
14766			return ret;
14767
14768		/*
14769		 * On some platforms the number of active planes affects
14770		 * the planes' minimum cdclk calculation. Add such planes
14771		 * to the state before we compute the minimum cdclk.
14772		 */
14773		if (!active_planes_affects_min_cdclk(dev_priv))
14774			continue;
14775
14776		old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14777		new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14778
14779		/*
14780		 * Not only the number of planes, but if the plane configuration had
14781		 * changed might already mean we need to recompute min CDCLK,
14782		 * because different planes might consume different amount of Dbuf bandwidth
14783		 * according to formula: Bw per plane = Pixel rate * bpp * pipe/plane scale factor
14784		 */
14785		if (old_active_planes == new_active_planes)
14786			continue;
14787
14788		ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
14789		if (ret)
14790			return ret;
14791	}
14792
14793	return 0;
14794}
14795
14796static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
14797				    bool *need_cdclk_calc)
14798{
14799	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14800	const struct intel_cdclk_state *old_cdclk_state;
14801	const struct intel_cdclk_state *new_cdclk_state;
14802	struct intel_plane_state *plane_state;
14803	struct intel_bw_state *new_bw_state;
14804	struct intel_plane *plane;
14805	int min_cdclk = 0;
14806	enum pipe pipe;
14807	int ret;
14808	int i;
14809	/*
14810	 * active_planes bitmask has been updated, and potentially
14811	 * affected planes are part of the state. We can now
14812	 * compute the minimum cdclk for each plane.
14813	 */
14814	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
14815		ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
14816		if (ret)
14817			return ret;
14818	}
14819
14820	old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
14821	new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
14822
14823	if (new_cdclk_state &&
14824	    old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
14825		*need_cdclk_calc = true;
14826
14827	ret = dev_priv->display.bw_calc_min_cdclk(state);
14828	if (ret)
14829		return ret;
14830
14831	new_bw_state = intel_atomic_get_new_bw_state(state);
14832
14833	if (!new_cdclk_state || !new_bw_state)
14834		return 0;
14835
14836	for_each_pipe(dev_priv, pipe) {
14837		min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
14838
14839		/*
14840		 * Currently do this change only if we need to increase
14841		 */
14842		if (new_bw_state->min_cdclk > min_cdclk)
14843			*need_cdclk_calc = true;
14844	}
14845
14846	return 0;
14847}
14848
14849static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
14850{
14851	struct intel_crtc_state *crtc_state;
14852	struct intel_crtc *crtc;
14853	int i;
14854
14855	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14856		int ret = intel_crtc_atomic_check(state, crtc);
14857		struct drm_i915_private *i915 = to_i915(crtc->base.dev);
14858		if (ret) {
14859			drm_dbg_atomic(&i915->drm,
14860				       "[CRTC:%d:%s] atomic driver check failed\n",
14861				       crtc->base.base.id, crtc->base.name);
14862			return ret;
14863		}
14864	}
14865
14866	return 0;
14867}
14868
14869static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
14870					       u8 transcoders)
14871{
14872	const struct intel_crtc_state *new_crtc_state;
14873	struct intel_crtc *crtc;
14874	int i;
14875
14876	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14877		if (new_crtc_state->hw.enable &&
14878		    transcoders & BIT(new_crtc_state->cpu_transcoder) &&
14879		    needs_modeset(new_crtc_state))
14880			return true;
14881	}
14882
14883	return false;
14884}
14885
14886/**
14887 * intel_atomic_check - validate state object
14888 * @dev: drm device
14889 * @_state: state to validate
14890 */
14891static int intel_atomic_check(struct drm_device *dev,
14892			      struct drm_atomic_state *_state)
14893{
14894	struct drm_i915_private *dev_priv = to_i915(dev);
14895	struct intel_atomic_state *state = to_intel_atomic_state(_state);
14896	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14897	struct intel_crtc *crtc;
14898	int ret, i;
14899	bool any_ms = false;
14900
14901	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14902					    new_crtc_state, i) {
14903		if (new_crtc_state->inherited != old_crtc_state->inherited)
14904			new_crtc_state->uapi.mode_changed = true;
14905	}
14906
14907	ret = drm_atomic_helper_check_modeset(dev, &state->base);
14908	if (ret)
14909		goto fail;
14910
14911	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14912					    new_crtc_state, i) {
14913		if (!needs_modeset(new_crtc_state)) {
14914			/* Light copy */
14915			intel_crtc_copy_uapi_to_hw_state_nomodeset(new_crtc_state);
14916
14917			continue;
14918		}
14919
14920		ret = intel_crtc_prepare_cleared_state(new_crtc_state);
14921		if (ret)
14922			goto fail;
14923
14924		if (!new_crtc_state->hw.enable)
14925			continue;
14926
14927		ret = intel_modeset_pipe_config(new_crtc_state);
14928		if (ret)
14929			goto fail;
14930	}
14931
14932	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14933					    new_crtc_state, i) {
14934		if (!needs_modeset(new_crtc_state))
14935			continue;
14936
14937		ret = intel_modeset_pipe_config_late(new_crtc_state);
14938		if (ret)
14939			goto fail;
14940
14941		intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
14942	}
14943
14944	/**
14945	 * Check if fastset is allowed by external dependencies like other
14946	 * pipes and transcoders.
14947	 *
14948	 * Right now it only forces a fullmodeset when the MST master
14949	 * transcoder did not changed but the pipe of the master transcoder
14950	 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
14951	 * in case of port synced crtcs, if one of the synced crtcs
14952	 * needs a full modeset, all other synced crtcs should be
14953	 * forced a full modeset.
14954	 */
14955	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14956		if (!new_crtc_state->hw.enable || needs_modeset(new_crtc_state))
14957			continue;
14958
14959		if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
14960			enum transcoder master = new_crtc_state->mst_master_transcoder;
14961
14962			if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
14963				new_crtc_state->uapi.mode_changed = true;
14964				new_crtc_state->update_pipe = false;
14965			}
14966		}
14967
14968		if (is_trans_port_sync_mode(new_crtc_state)) {
14969			u8 trans = new_crtc_state->sync_mode_slaves_mask;
14970
14971			if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
14972				trans |= BIT(new_crtc_state->master_transcoder);
14973
14974			if (intel_cpu_transcoders_need_modeset(state, trans)) {
14975				new_crtc_state->uapi.mode_changed = true;
14976				new_crtc_state->update_pipe = false;
14977			}
14978		}
14979	}
14980
14981	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14982					    new_crtc_state, i) {
14983		if (needs_modeset(new_crtc_state)) {
14984			any_ms = true;
14985			continue;
14986		}
14987
14988		if (!new_crtc_state->update_pipe)
14989			continue;
14990
14991		intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
14992	}
14993
14994	if (any_ms && !check_digital_port_conflicts(state)) {
14995		drm_dbg_kms(&dev_priv->drm,
14996			    "rejecting conflicting digital port configuration\n");
14997		ret = -EINVAL;
14998		goto fail;
14999	}
15000
15001	ret = drm_dp_mst_atomic_check(&state->base);
15002	if (ret)
15003		goto fail;
15004
15005	ret = intel_atomic_check_planes(state);
15006	if (ret)
15007		goto fail;
15008
15009	/*
15010	 * distrust_bios_wm will force a full dbuf recomputation
15011	 * but the hardware state will only get updated accordingly
15012	 * if state->modeset==true. Hence distrust_bios_wm==true &&
15013	 * state->modeset==false is an invalid combination which
15014	 * would cause the hardware and software dbuf state to get
15015	 * out of sync. We must prevent that.
15016	 *
15017	 * FIXME clean up this mess and introduce better
15018	 * state tracking for dbuf.
15019	 */
15020	if (dev_priv->wm.distrust_bios_wm)
15021		any_ms = true;
15022
15023	intel_fbc_choose_crtc(dev_priv, state);
15024	ret = calc_watermark_data(state);
15025	if (ret)
15026		goto fail;
15027
15028	ret = intel_bw_atomic_check(state);
15029	if (ret)
15030		goto fail;
15031
15032	ret = intel_atomic_check_cdclk(state, &any_ms);
15033	if (ret)
15034		goto fail;
15035
15036	if (any_ms) {
15037		ret = intel_modeset_checks(state);
15038		if (ret)
15039			goto fail;
15040
15041		ret = intel_modeset_calc_cdclk(state);
15042		if (ret)
15043			return ret;
15044
15045		intel_modeset_clear_plls(state);
15046	}
15047
15048	ret = intel_atomic_check_crtcs(state);
15049	if (ret)
15050		goto fail;
15051
15052	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15053					    new_crtc_state, i) {
15054		if (!needs_modeset(new_crtc_state) &&
15055		    !new_crtc_state->update_pipe)
15056			continue;
15057
15058		intel_dump_pipe_config(new_crtc_state, state,
15059				       needs_modeset(new_crtc_state) ?
15060				       "[modeset]" : "[fastset]");
15061	}
15062
15063	return 0;
15064
15065 fail:
15066	if (ret == -EDEADLK)
15067		return ret;
15068
15069	/*
15070	 * FIXME would probably be nice to know which crtc specifically
15071	 * caused the failure, in cases where we can pinpoint it.
15072	 */
15073	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15074					    new_crtc_state, i)
15075		intel_dump_pipe_config(new_crtc_state, state, "[failed]");
15076
15077	return ret;
15078}
15079
15080static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
15081{
15082	struct intel_crtc_state *crtc_state;
15083	struct intel_crtc *crtc;
15084	int i, ret;
15085
15086	ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
15087	if (ret < 0)
15088		return ret;
15089
15090	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
15091		bool mode_changed = needs_modeset(crtc_state);
15092
15093		if (mode_changed || crtc_state->update_pipe ||
15094		    crtc_state->uapi.color_mgmt_changed) {
15095			intel_dsb_prepare(crtc_state);
15096		}
15097	}
15098
15099	return 0;
15100}
15101
15102u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
15103{
15104	struct drm_device *dev = crtc->base.dev;
15105	struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
15106
15107	if (!vblank->max_vblank_count)
15108		return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
15109
15110	return crtc->base.funcs->get_vblank_counter(&crtc->base);
15111}
15112
15113void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
15114				  struct intel_crtc_state *crtc_state)
15115{
15116	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
15117
15118	if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes)
15119		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
15120
15121	if (crtc_state->has_pch_encoder) {
15122		enum pipe pch_transcoder =
15123			intel_crtc_pch_transcoder(crtc);
15124
15125		intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
15126	}
15127}
15128
15129static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
15130			       const struct intel_crtc_state *new_crtc_state)
15131{
15132	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
15133	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
15134
15135	/*
15136	 * Update pipe size and adjust fitter if needed: the reason for this is
15137	 * that in compute_mode_changes we check the native mode (not the pfit
15138	 * mode) to see if we can flip rather than do a full mode set. In the
15139	 * fastboot case, we'll flip, but if we don't update the pipesrc and
15140	 * pfit state, we'll end up with a big fb scanned out into the wrong
15141	 * sized surface.
15142	 */
15143	intel_set_pipe_src_size(new_crtc_state);
15144
15145	/* on skylake this is done by detaching scalers */
15146	if (INTEL_GEN(dev_priv) >= 9) {
15147		skl_detach_scalers(new_crtc_state);
15148
15149		if (new_crtc_state->pch_pfit.enabled)
15150			skl_pfit_enable(new_crtc_state);
15151	} else if (HAS_PCH_SPLIT(dev_priv)) {
15152		if (new_crtc_state->pch_pfit.enabled)
15153			ilk_pfit_enable(new_crtc_state);
15154		else if (old_crtc_state->pch_pfit.enabled)
15155			ilk_pfit_disable(old_crtc_state);
15156	}
15157
15158	/*
15159	 * The register is supposedly single buffered so perhaps
15160	 * not 100% correct to do this here. But SKL+ calculate
15161	 * this based on the adjust pixel rate so pfit changes do
15162	 * affect it and so it must be updated for fastsets.
15163	 * HSW/BDW only really need this here for fastboot, after
15164	 * that the value should not change without a full modeset.
15165	 */
15166	if (INTEL_GEN(dev_priv) >= 9 ||
15167	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
15168		hsw_set_linetime_wm(new_crtc_state);
15169
15170	if (INTEL_GEN(dev_priv) >= 11)
15171		icl_set_pipe_chicken(crtc);
15172}
15173
15174static void commit_pipe_config(struct intel_atomic_state *state,
15175			       struct intel_crtc *crtc)
15176{
15177	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15178	const struct intel_crtc_state *old_crtc_state =
15179		intel_atomic_get_old_crtc_state(state, crtc);
15180	const struct intel_crtc_state *new_crtc_state =
15181		intel_atomic_get_new_crtc_state(state, crtc);
15182	bool modeset = needs_modeset(new_crtc_state);
15183
15184	/*
15185	 * During modesets pipe configuration was programmed as the
15186	 * CRTC was enabled.
15187	 */
15188	if (!modeset) {
15189		if (new_crtc_state->uapi.color_mgmt_changed ||
15190		    new_crtc_state->update_pipe)
15191			intel_color_commit(new_crtc_state);
15192
15193		if (INTEL_GEN(dev_priv) >= 9)
15194			skl_detach_scalers(new_crtc_state);
15195
15196		if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
15197			bdw_set_pipemisc(new_crtc_state);
15198
15199		if (new_crtc_state->update_pipe)
15200			intel_pipe_fastset(old_crtc_state, new_crtc_state);
15201
15202		intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
15203	}
15204
15205	if (dev_priv->display.atomic_update_watermarks)
15206		dev_priv->display.atomic_update_watermarks(state, crtc);
15207}
15208
15209static void intel_enable_crtc(struct intel_atomic_state *state,
15210			      struct intel_crtc *crtc)
15211{
15212	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15213	const struct intel_crtc_state *new_crtc_state =
15214		intel_atomic_get_new_crtc_state(state, crtc);
15215
15216	if (!needs_modeset(new_crtc_state))
15217		return;
15218
15219	intel_crtc_update_active_timings(new_crtc_state);
15220
15221	dev_priv->display.crtc_enable(state, crtc);
15222
15223	/* vblanks work again, re-enable pipe CRC. */
15224	intel_crtc_enable_pipe_crc(crtc);
15225}
15226
15227static void intel_update_crtc(struct intel_atomic_state *state,
15228			      struct intel_crtc *crtc)
15229{
15230	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15231	const struct intel_crtc_state *old_crtc_state =
15232		intel_atomic_get_old_crtc_state(state, crtc);
15233	struct intel_crtc_state *new_crtc_state =
15234		intel_atomic_get_new_crtc_state(state, crtc);
15235	bool modeset = needs_modeset(new_crtc_state);
15236
15237	if (!modeset) {
15238		if (new_crtc_state->preload_luts &&
15239		    (new_crtc_state->uapi.color_mgmt_changed ||
15240		     new_crtc_state->update_pipe))
15241			intel_color_load_luts(new_crtc_state);
15242
15243		intel_pre_plane_update(state, crtc);
15244
15245		if (new_crtc_state->update_pipe)
15246			intel_encoders_update_pipe(state, crtc);
15247	}
15248
15249	if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
15250		intel_fbc_disable(crtc);
15251	else
15252		intel_fbc_enable(state, crtc);
15253
15254	/* Perform vblank evasion around commit operation */
15255	intel_pipe_update_start(new_crtc_state);
15256
15257	commit_pipe_config(state, crtc);
15258
15259	if (INTEL_GEN(dev_priv) >= 9)
15260		skl_update_planes_on_crtc(state, crtc);
15261	else
15262		i9xx_update_planes_on_crtc(state, crtc);
15263
15264	intel_pipe_update_end(new_crtc_state);
15265
15266	/*
15267	 * We usually enable FIFO underrun interrupts as part of the
15268	 * CRTC enable sequence during modesets.  But when we inherit a
15269	 * valid pipe configuration from the BIOS we need to take care
15270	 * of enabling them on the CRTC's first fastset.
15271	 */
15272	if (new_crtc_state->update_pipe && !modeset &&
15273	    old_crtc_state->inherited)
15274		intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
15275}
15276
15277
15278static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
15279					  struct intel_crtc_state *old_crtc_state,
15280					  struct intel_crtc_state *new_crtc_state,
15281					  struct intel_crtc *crtc)
15282{
15283	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15284
15285	intel_crtc_disable_planes(state, crtc);
15286
15287	/*
15288	 * We need to disable pipe CRC before disabling the pipe,
15289	 * or we race against vblank off.
15290	 */
15291	intel_crtc_disable_pipe_crc(crtc);
15292
15293	dev_priv->display.crtc_disable(state, crtc);
15294	crtc->active = false;
15295	intel_fbc_disable(crtc);
15296	intel_disable_shared_dpll(old_crtc_state);
15297
15298	/* FIXME unify this for all platforms */
15299	if (!new_crtc_state->hw.active &&
15300	    !HAS_GMCH(dev_priv) &&
15301	    dev_priv->display.initial_watermarks)
15302		dev_priv->display.initial_watermarks(state, crtc);
15303}
15304
15305static void intel_commit_modeset_disables(struct intel_atomic_state *state)
15306{
15307	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
15308	struct intel_crtc *crtc;
15309	u32 handled = 0;
15310	int i;
15311
15312	/* Only disable port sync and MST slaves */
15313	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15314					    new_crtc_state, i) {
15315		if (!needs_modeset(new_crtc_state))
15316			continue;
15317
15318		if (!old_crtc_state->hw.active)
15319			continue;
15320
15321		/* In case of Transcoder port Sync master slave CRTCs can be
15322		 * assigned in any order and we need to make sure that
15323		 * slave CRTCs are disabled first and then master CRTC since
15324		 * Slave vblanks are masked till Master Vblanks.
15325		 */
15326		if (!is_trans_port_sync_slave(old_crtc_state) &&
15327		    !intel_dp_mst_is_slave_trans(old_crtc_state))
15328			continue;
15329
15330		intel_pre_plane_update(state, crtc);
15331		intel_old_crtc_state_disables(state, old_crtc_state,
15332					      new_crtc_state, crtc);
15333		handled |= BIT(crtc->pipe);
15334	}
15335
15336	/* Disable everything else left on */
15337	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15338					    new_crtc_state, i) {
15339		if (!needs_modeset(new_crtc_state) ||
15340		    (handled & BIT(crtc->pipe)))
15341			continue;
15342
15343		intel_pre_plane_update(state, crtc);
15344		if (old_crtc_state->hw.active)
15345			intel_old_crtc_state_disables(state, old_crtc_state,
15346						      new_crtc_state, crtc);
15347	}
15348}
15349
15350static void intel_commit_modeset_enables(struct intel_atomic_state *state)
15351{
15352	struct intel_crtc_state *new_crtc_state;
15353	struct intel_crtc *crtc;
15354	int i;
15355
15356	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15357		if (!new_crtc_state->hw.active)
15358			continue;
15359
15360		intel_enable_crtc(state, crtc);
15361		intel_update_crtc(state, crtc);
15362	}
15363}
15364
15365static void skl_commit_modeset_enables(struct intel_atomic_state *state)
15366{
15367	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15368	struct intel_crtc *crtc;
15369	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
15370	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
15371	u8 update_pipes = 0, modeset_pipes = 0;
15372	int i;
15373
15374	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
15375		enum pipe pipe = crtc->pipe;
15376
15377		if (!new_crtc_state->hw.active)
15378			continue;
15379
15380		/* ignore allocations for crtc's that have been turned off. */
15381		if (!needs_modeset(new_crtc_state)) {
15382			entries[pipe] = old_crtc_state->wm.skl.ddb;
15383			update_pipes |= BIT(pipe);
15384		} else {
15385			modeset_pipes |= BIT(pipe);
15386		}
15387	}
15388
15389	/*
15390	 * Whenever the number of active pipes changes, we need to make sure we
15391	 * update the pipes in the right order so that their ddb allocations
15392	 * never overlap with each other between CRTC updates. Otherwise we'll
15393	 * cause pipe underruns and other bad stuff.
15394	 *
15395	 * So first lets enable all pipes that do not need a fullmodeset as
15396	 * those don't have any external dependency.
15397	 */
15398	while (update_pipes) {
15399		for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15400						    new_crtc_state, i) {
15401			enum pipe pipe = crtc->pipe;
15402
15403			if ((update_pipes & BIT(pipe)) == 0)
15404				continue;
15405
15406			if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
15407							entries, I915_MAX_PIPES, pipe))
15408				continue;
15409
15410			entries[pipe] = new_crtc_state->wm.skl.ddb;
15411			update_pipes &= ~BIT(pipe);
15412
15413			intel_update_crtc(state, crtc);
15414
15415			/*
15416			 * If this is an already active pipe, it's DDB changed,
15417			 * and this isn't the last pipe that needs updating
15418			 * then we need to wait for a vblank to pass for the
15419			 * new ddb allocation to take effect.
15420			 */
15421			if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
15422						 &old_crtc_state->wm.skl.ddb) &&
15423			    (update_pipes | modeset_pipes))
15424				intel_wait_for_vblank(dev_priv, pipe);
15425		}
15426	}
15427
15428	update_pipes = modeset_pipes;
15429
15430	/*
15431	 * Enable all pipes that needs a modeset and do not depends on other
15432	 * pipes
15433	 */
15434	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15435		enum pipe pipe = crtc->pipe;
15436
15437		if ((modeset_pipes & BIT(pipe)) == 0)
15438			continue;
15439
15440		if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
15441		    is_trans_port_sync_master(new_crtc_state))
15442			continue;
15443
15444		modeset_pipes &= ~BIT(pipe);
15445
15446		intel_enable_crtc(state, crtc);
15447	}
15448
15449	/*
15450	 * Then we enable all remaining pipes that depend on other
15451	 * pipes: MST slaves and port sync masters.
15452	 */
15453	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15454		enum pipe pipe = crtc->pipe;
15455
15456		if ((modeset_pipes & BIT(pipe)) == 0)
15457			continue;
15458
15459		modeset_pipes &= ~BIT(pipe);
15460
15461		intel_enable_crtc(state, crtc);
15462	}
15463
15464	/*
15465	 * Finally we do the plane updates/etc. for all pipes that got enabled.
15466	 */
15467	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15468		enum pipe pipe = crtc->pipe;
15469
15470		if ((update_pipes & BIT(pipe)) == 0)
15471			continue;
15472
15473		drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
15474									entries, I915_MAX_PIPES, pipe));
15475
15476		entries[pipe] = new_crtc_state->wm.skl.ddb;
15477		update_pipes &= ~BIT(pipe);
15478
15479		intel_update_crtc(state, crtc);
15480	}
15481
15482	drm_WARN_ON(&dev_priv->drm, modeset_pipes);
15483	drm_WARN_ON(&dev_priv->drm, update_pipes);
15484}
15485
15486static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
15487{
15488	struct intel_atomic_state *state, *next;
15489	struct llist_node *freed;
15490
15491	freed = llist_del_all(&dev_priv->atomic_helper.free_list);
15492	llist_for_each_entry_safe(state, next, freed, freed)
15493		drm_atomic_state_put(&state->base);
15494}
15495
15496static void intel_atomic_helper_free_state_worker(struct work_struct *work)
15497{
15498	struct drm_i915_private *dev_priv =
15499		container_of(work, typeof(*dev_priv), atomic_helper.free_work);
15500
15501	intel_atomic_helper_free_state(dev_priv);
15502}
15503
15504static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
15505{
15506	struct wait_queue_entry wait_fence, wait_reset;
15507	struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
15508
15509	init_wait_entry(&wait_fence, 0);
15510	init_wait_entry(&wait_reset, 0);
15511	for (;;) {
15512		prepare_to_wait(&intel_state->commit_ready.wait,
15513				&wait_fence, TASK_UNINTERRUPTIBLE);
15514		prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
15515					      I915_RESET_MODESET),
15516				&wait_reset, TASK_UNINTERRUPTIBLE);
15517
15518
15519		if (i915_sw_fence_done(&intel_state->commit_ready) ||
15520		    test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
15521			break;
15522
15523		schedule();
15524	}
15525	finish_wait(&intel_state->commit_ready.wait, &wait_fence);
15526	finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
15527				  I915_RESET_MODESET),
15528		    &wait_reset);
15529}
15530
15531static void intel_cleanup_dsbs(struct intel_atomic_state *state)
15532{
15533	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
15534	struct intel_crtc *crtc;
15535	int i;
15536
15537	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15538					    new_crtc_state, i)
15539		intel_dsb_cleanup(old_crtc_state);
15540}
15541
15542static void intel_atomic_cleanup_work(struct work_struct *work)
15543{
15544	struct intel_atomic_state *state =
15545		container_of(work, struct intel_atomic_state, base.commit_work);
15546	struct drm_i915_private *i915 = to_i915(state->base.dev);
15547
15548	intel_cleanup_dsbs(state);
15549	drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
15550	drm_atomic_helper_commit_cleanup_done(&state->base);
15551	drm_atomic_state_put(&state->base);
15552
15553	intel_atomic_helper_free_state(i915);
15554}
15555
15556static void intel_atomic_commit_tail(struct intel_atomic_state *state)
15557{
15558	struct drm_device *dev = state->base.dev;
15559	struct drm_i915_private *dev_priv = to_i915(dev);
15560	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
15561	struct intel_crtc *crtc;
15562	u64 put_domains[I915_MAX_PIPES] = {};
15563	intel_wakeref_t wakeref = 0;
15564	int i;
15565
15566	intel_atomic_commit_fence_wait(state);
15567
15568	drm_atomic_helper_wait_for_dependencies(&state->base);
15569
15570	if (state->modeset)
15571		wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
15572
15573	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15574					    new_crtc_state, i) {
15575		if (needs_modeset(new_crtc_state) ||
15576		    new_crtc_state->update_pipe) {
15577
15578			put_domains[crtc->pipe] =
15579				modeset_get_crtc_power_domains(new_crtc_state);
15580		}
15581	}
15582
15583	intel_commit_modeset_disables(state);
15584
15585	/* FIXME: Eventually get rid of our crtc->config pointer */
15586	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
15587		crtc->config = new_crtc_state;
15588
15589	if (state->modeset) {
15590		drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
15591
15592		intel_set_cdclk_pre_plane_update(state);
15593
15594		intel_modeset_verify_disabled(dev_priv, state);
15595	}
15596
15597	intel_sagv_pre_plane_update(state);
15598
15599	/* Complete the events for pipes that have now been disabled */
15600	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15601		bool modeset = needs_modeset(new_crtc_state);
15602
15603		/* Complete events for now disable pipes here. */
15604		if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
15605			spin_lock_irq(&dev->event_lock);
15606			drm_crtc_send_vblank_event(&crtc->base,
15607						   new_crtc_state->uapi.event);
15608			spin_unlock_irq(&dev->event_lock);
15609
15610			new_crtc_state->uapi.event = NULL;
15611		}
15612	}
15613
15614	if (state->modeset)
15615		intel_encoders_update_prepare(state);
15616
15617	intel_dbuf_pre_plane_update(state);
15618
15619	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
15620	dev_priv->display.commit_modeset_enables(state);
15621
15622	if (state->modeset) {
15623		intel_encoders_update_complete(state);
15624
15625		intel_set_cdclk_post_plane_update(state);
15626	}
15627
15628	/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
15629	 * already, but still need the state for the delayed optimization. To
15630	 * fix this:
15631	 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
15632	 * - schedule that vblank worker _before_ calling hw_done
15633	 * - at the start of commit_tail, cancel it _synchrously
15634	 * - switch over to the vblank wait helper in the core after that since
15635	 *   we don't need out special handling any more.
15636	 */
15637	drm_atomic_helper_wait_for_flip_done(dev, &state->base);
15638
15639	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15640		if (new_crtc_state->hw.active &&
15641		    !needs_modeset(new_crtc_state) &&
15642		    !new_crtc_state->preload_luts &&
15643		    (new_crtc_state->uapi.color_mgmt_changed ||
15644		     new_crtc_state->update_pipe))
15645			intel_color_load_luts(new_crtc_state);
15646	}
15647
15648	/*
15649	 * Now that the vblank has passed, we can go ahead and program the
15650	 * optimal watermarks on platforms that need two-step watermark
15651	 * programming.
15652	 *
15653	 * TODO: Move this (and other cleanup) to an async worker eventually.
15654	 */
15655	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15656					    new_crtc_state, i) {
15657		/*
15658		 * Gen2 reports pipe underruns whenever all planes are disabled.
15659		 * So re-enable underrun reporting after some planes get enabled.
15660		 *
15661		 * We do this before .optimize_watermarks() so that we have a
15662		 * chance of catching underruns with the intermediate watermarks
15663		 * vs. the new plane configuration.
15664		 */
15665		if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
15666			intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
15667
15668		if (dev_priv->display.optimize_watermarks)
15669			dev_priv->display.optimize_watermarks(state, crtc);
15670	}
15671
15672	intel_dbuf_post_plane_update(state);
15673
15674	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
15675		intel_post_plane_update(state, crtc);
15676
15677		if (put_domains[i])
15678			modeset_put_power_domains(dev_priv, put_domains[i]);
15679
15680		intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
15681
15682		/*
15683		 * DSB cleanup is done in cleanup_work aligning with framebuffer
15684		 * cleanup. So copy and reset the dsb structure to sync with
15685		 * commit_done and later do dsb cleanup in cleanup_work.
15686		 */
15687		old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
15688	}
15689
15690	/* Underruns don't always raise interrupts, so check manually */
15691	intel_check_cpu_fifo_underruns(dev_priv);
15692	intel_check_pch_fifo_underruns(dev_priv);
15693
15694	if (state->modeset)
15695		intel_verify_planes(state);
15696
15697	intel_sagv_post_plane_update(state);
15698
15699	drm_atomic_helper_commit_hw_done(&state->base);
15700
15701	if (state->modeset) {
15702		/* As one of the primary mmio accessors, KMS has a high
15703		 * likelihood of triggering bugs in unclaimed access. After we
15704		 * finish modesetting, see if an error has been flagged, and if
15705		 * so enable debugging for the next modeset - and hope we catch
15706		 * the culprit.
15707		 */
15708		intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
15709		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
15710	}
15711	intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15712
15713	/*
15714	 * Defer the cleanup of the old state to a separate worker to not
15715	 * impede the current task (userspace for blocking modesets) that
15716	 * are executed inline. For out-of-line asynchronous modesets/flips,
15717	 * deferring to a new worker seems overkill, but we would place a
15718	 * schedule point (cond_resched()) here anyway to keep latencies
15719	 * down.
15720	 */
15721	INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
15722	queue_work(system_highpri_wq, &state->base.commit_work);
15723}
15724
15725static void intel_atomic_commit_work(struct work_struct *work)
15726{
15727	struct intel_atomic_state *state =
15728		container_of(work, struct intel_atomic_state, base.commit_work);
15729
15730	intel_atomic_commit_tail(state);
15731}
15732
15733static int __i915_sw_fence_call
15734intel_atomic_commit_ready(struct i915_sw_fence *fence,
15735			  enum i915_sw_fence_notify notify)
15736{
15737	struct intel_atomic_state *state =
15738		container_of(fence, struct intel_atomic_state, commit_ready);
15739
15740	switch (notify) {
15741	case FENCE_COMPLETE:
15742		/* we do blocking waits in the worker, nothing to do here */
15743		break;
15744	case FENCE_FREE:
15745		{
15746			struct intel_atomic_helper *helper =
15747				&to_i915(state->base.dev)->atomic_helper;
15748
15749			if (llist_add(&state->freed, &helper->free_list))
15750				schedule_work(&helper->free_work);
15751			break;
15752		}
15753	}
15754
15755	return NOTIFY_DONE;
15756}
15757
15758static void intel_atomic_track_fbs(struct intel_atomic_state *state)
15759{
15760	struct intel_plane_state *old_plane_state, *new_plane_state;
15761	struct intel_plane *plane;
15762	int i;
15763
15764	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
15765					     new_plane_state, i)
15766		intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
15767					to_intel_frontbuffer(new_plane_state->hw.fb),
15768					plane->frontbuffer_bit);
15769}
15770
15771static int intel_atomic_commit(struct drm_device *dev,
15772			       struct drm_atomic_state *_state,
15773			       bool nonblock)
15774{
15775	struct intel_atomic_state *state = to_intel_atomic_state(_state);
15776	struct drm_i915_private *dev_priv = to_i915(dev);
15777	int ret = 0;
15778
15779	state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
15780
15781	drm_atomic_state_get(&state->base);
15782	i915_sw_fence_init(&state->commit_ready,
15783			   intel_atomic_commit_ready);
15784
15785	/*
15786	 * The intel_legacy_cursor_update() fast path takes care
15787	 * of avoiding the vblank waits for simple cursor
15788	 * movement and flips. For cursor on/off and size changes,
15789	 * we want to perform the vblank waits so that watermark
15790	 * updates happen during the correct frames. Gen9+ have
15791	 * double buffered watermarks and so shouldn't need this.
15792	 *
15793	 * Unset state->legacy_cursor_update before the call to
15794	 * drm_atomic_helper_setup_commit() because otherwise
15795	 * drm_atomic_helper_wait_for_flip_done() is a noop and
15796	 * we get FIFO underruns because we didn't wait
15797	 * for vblank.
15798	 *
15799	 * FIXME doing watermarks and fb cleanup from a vblank worker
15800	 * (assuming we had any) would solve these problems.
15801	 */
15802	if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
15803		struct intel_crtc_state *new_crtc_state;
15804		struct intel_crtc *crtc;
15805		int i;
15806
15807		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
15808			if (new_crtc_state->wm.need_postvbl_update ||
15809			    new_crtc_state->update_wm_post)
15810				state->base.legacy_cursor_update = false;
15811	}
15812
15813	ret = intel_atomic_prepare_commit(state);
15814	if (ret) {
15815		drm_dbg_atomic(&dev_priv->drm,
15816			       "Preparing state failed with %i\n", ret);
15817		i915_sw_fence_commit(&state->commit_ready);
15818		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15819		return ret;
15820	}
15821
15822	ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
15823	if (!ret)
15824		ret = drm_atomic_helper_swap_state(&state->base, true);
15825	if (!ret)
15826		intel_atomic_swap_global_state(state);
15827
15828	if (ret) {
15829		struct intel_crtc_state *new_crtc_state;
15830		struct intel_crtc *crtc;
15831		int i;
15832
15833		i915_sw_fence_commit(&state->commit_ready);
15834
15835		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
15836			intel_dsb_cleanup(new_crtc_state);
15837
15838		drm_atomic_helper_cleanup_planes(dev, &state->base);
15839		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15840		return ret;
15841	}
15842	dev_priv->wm.distrust_bios_wm = false;
15843	intel_shared_dpll_swap_state(state);
15844	intel_atomic_track_fbs(state);
15845
15846	drm_atomic_state_get(&state->base);
15847	INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
15848
15849	i915_sw_fence_commit(&state->commit_ready);
15850	if (nonblock && state->modeset) {
15851		queue_work(dev_priv->modeset_wq, &state->base.commit_work);
15852	} else if (nonblock) {
15853		queue_work(dev_priv->flip_wq, &state->base.commit_work);
15854	} else {
15855		if (state->modeset)
15856			flush_workqueue(dev_priv->modeset_wq);
15857		intel_atomic_commit_tail(state);
15858	}
15859
15860	return 0;
15861}
15862
15863struct wait_rps_boost {
15864	struct wait_queue_entry wait;
15865
15866	struct drm_crtc *crtc;
15867	struct i915_request *request;
15868};
15869
15870static int do_rps_boost(struct wait_queue_entry *_wait,
15871			unsigned mode, int sync, void *key)
15872{
15873	struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
15874	struct i915_request *rq = wait->request;
15875
15876	/*
15877	 * If we missed the vblank, but the request is already running it
15878	 * is reasonable to assume that it will complete before the next
15879	 * vblank without our intervention, so leave RPS alone.
15880	 */
15881	if (!i915_request_started(rq))
15882		intel_rps_boost(rq);
15883	i915_request_put(rq);
15884
15885	drm_crtc_vblank_put(wait->crtc);
15886
15887	list_del(&wait->wait.entry);
15888	kfree(wait);
15889	return 1;
15890}
15891
15892static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
15893				       struct dma_fence *fence)
15894{
15895	struct wait_rps_boost *wait;
15896
15897	if (!dma_fence_is_i915(fence))
15898		return;
15899
15900	if (INTEL_GEN(to_i915(crtc->dev)) < 6)
15901		return;
15902
15903	if (drm_crtc_vblank_get(crtc))
15904		return;
15905
15906	wait = kmalloc(sizeof(*wait), GFP_KERNEL);
15907	if (!wait) {
15908		drm_crtc_vblank_put(crtc);
15909		return;
15910	}
15911
15912	wait->request = to_request(dma_fence_get(fence));
15913	wait->crtc = crtc;
15914
15915	wait->wait.func = do_rps_boost;
15916	wait->wait.flags = 0;
15917
15918	add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
15919}
15920
15921static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
15922{
15923	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
15924	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15925	struct drm_framebuffer *fb = plane_state->hw.fb;
15926	struct i915_vma *vma;
15927
15928	if (plane->id == PLANE_CURSOR &&
15929	    INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
15930		struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15931		const int align = intel_cursor_alignment(dev_priv);
15932		int err;
15933
15934		err = i915_gem_object_attach_phys(obj, align);
15935		if (err)
15936			return err;
15937	}
15938
15939	vma = intel_pin_and_fence_fb_obj(fb,
15940					 &plane_state->view,
15941					 intel_plane_uses_fence(plane_state),
15942					 &plane_state->flags);
15943	if (IS_ERR(vma))
15944		return PTR_ERR(vma);
15945
15946	plane_state->vma = vma;
15947
15948	return 0;
15949}
15950
15951static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
15952{
15953	struct i915_vma *vma;
15954
15955	vma = fetch_and_zero(&old_plane_state->vma);
15956	if (vma)
15957		intel_unpin_fb_vma(vma, old_plane_state->flags);
15958}
15959
15960static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
15961{
15962	struct i915_sched_attr attr = {
15963		.priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
15964	};
15965
15966	i915_gem_object_wait_priority(obj, 0, &attr);
15967}
15968
15969/**
15970 * intel_prepare_plane_fb - Prepare fb for usage on plane
15971 * @_plane: drm plane to prepare for
15972 * @_new_plane_state: the plane state being prepared
15973 *
15974 * Prepares a framebuffer for usage on a display plane.  Generally this
15975 * involves pinning the underlying object and updating the frontbuffer tracking
15976 * bits.  Some older platforms need special physical address handling for
15977 * cursor planes.
15978 *
15979 * Returns 0 on success, negative error code on failure.
15980 */
15981int
15982intel_prepare_plane_fb(struct drm_plane *_plane,
15983		       struct drm_plane_state *_new_plane_state)
15984{
15985	struct intel_plane *plane = to_intel_plane(_plane);
15986	struct intel_plane_state *new_plane_state =
15987		to_intel_plane_state(_new_plane_state);
15988	struct intel_atomic_state *state =
15989		to_intel_atomic_state(new_plane_state->uapi.state);
15990	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15991	const struct intel_plane_state *old_plane_state =
15992		intel_atomic_get_old_plane_state(state, plane);
15993	struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
15994	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
15995	int ret;
15996
15997	if (old_obj) {
15998		const struct intel_crtc_state *crtc_state =
15999			intel_atomic_get_new_crtc_state(state,
16000							to_intel_crtc(old_plane_state->hw.crtc));
16001
16002		/* Big Hammer, we also need to ensure that any pending
16003		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
16004		 * current scanout is retired before unpinning the old
16005		 * framebuffer. Note that we rely on userspace rendering
16006		 * into the buffer attached to the pipe they are waiting
16007		 * on. If not, userspace generates a GPU hang with IPEHR
16008		 * point to the MI_WAIT_FOR_EVENT.
16009		 *
16010		 * This should only fail upon a hung GPU, in which case we
16011		 * can safely continue.
16012		 */
16013		if (needs_modeset(crtc_state)) {
16014			ret = i915_sw_fence_await_reservation(&state->commit_ready,
16015							      old_obj->base.resv, NULL,
16016							      false, 0,
16017							      GFP_KERNEL);
16018			if (ret < 0)
16019				return ret;
16020		}
16021	}
16022
16023	if (new_plane_state->uapi.fence) { /* explicit fencing */
16024		ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
16025						    new_plane_state->uapi.fence,
16026						    i915_fence_timeout(dev_priv),
16027						    GFP_KERNEL);
16028		if (ret < 0)
16029			return ret;
16030	}
16031
16032	if (!obj)
16033		return 0;
16034
16035	ret = i915_gem_object_pin_pages(obj);
16036	if (ret)
16037		return ret;
16038
16039	ret = intel_plane_pin_fb(new_plane_state);
16040
16041	i915_gem_object_unpin_pages(obj);
16042	if (ret)
16043		return ret;
16044
16045	fb_obj_bump_render_priority(obj);
16046	i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
16047
16048	if (!new_plane_state->uapi.fence) { /* implicit fencing */
16049		struct dma_fence *fence;
16050
16051		ret = i915_sw_fence_await_reservation(&state->commit_ready,
16052						      obj->base.resv, NULL,
16053						      false,
16054						      i915_fence_timeout(dev_priv),
16055						      GFP_KERNEL);
16056		if (ret < 0)
16057			goto unpin_fb;
16058
16059		fence = dma_resv_get_excl_rcu(obj->base.resv);
16060		if (fence) {
16061			add_rps_boost_after_vblank(new_plane_state->hw.crtc,
16062						   fence);
16063			dma_fence_put(fence);
16064		}
16065	} else {
16066		add_rps_boost_after_vblank(new_plane_state->hw.crtc,
16067					   new_plane_state->uapi.fence);
16068	}
16069
16070	/*
16071	 * We declare pageflips to be interactive and so merit a small bias
16072	 * towards upclocking to deliver the frame on time. By only changing
16073	 * the RPS thresholds to sample more regularly and aim for higher
16074	 * clocks we can hopefully deliver low power workloads (like kodi)
16075	 * that are not quite steady state without resorting to forcing
16076	 * maximum clocks following a vblank miss (see do_rps_boost()).
16077	 */
16078	if (!state->rps_interactive) {
16079		intel_rps_mark_interactive(&dev_priv->gt.rps, true);
16080		state->rps_interactive = true;
16081	}
16082
16083	return 0;
16084
16085unpin_fb:
16086	intel_plane_unpin_fb(new_plane_state);
16087
16088	return ret;
16089}
16090
16091/**
16092 * intel_cleanup_plane_fb - Cleans up an fb after plane use
16093 * @plane: drm plane to clean up for
16094 * @_old_plane_state: the state from the previous modeset
16095 *
16096 * Cleans up a framebuffer that has just been removed from a plane.
16097 */
16098void
16099intel_cleanup_plane_fb(struct drm_plane *plane,
16100		       struct drm_plane_state *_old_plane_state)
16101{
16102	struct intel_plane_state *old_plane_state =
16103		to_intel_plane_state(_old_plane_state);
16104	struct intel_atomic_state *state =
16105		to_intel_atomic_state(old_plane_state->uapi.state);
16106	struct drm_i915_private *dev_priv = to_i915(plane->dev);
16107	struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
16108
16109	if (!obj)
16110		return;
16111
16112	if (state->rps_interactive) {
16113		intel_rps_mark_interactive(&dev_priv->gt.rps, false);
16114		state->rps_interactive = false;
16115	}
16116
16117	/* Should only be called after a successful intel_prepare_plane_fb()! */
16118	intel_plane_unpin_fb(old_plane_state);
16119}
16120
16121/**
16122 * intel_plane_destroy - destroy a plane
16123 * @plane: plane to destroy
16124 *
16125 * Common destruction function for all types of planes (primary, cursor,
16126 * sprite).
16127 */
16128void intel_plane_destroy(struct drm_plane *plane)
16129{
16130	drm_plane_cleanup(plane);
16131	kfree(to_intel_plane(plane));
16132}
16133
16134static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
16135					    u32 format, u64 modifier)
16136{
16137	switch (modifier) {
16138	case DRM_FORMAT_MOD_LINEAR:
16139	case I915_FORMAT_MOD_X_TILED:
16140		break;
16141	default:
16142		return false;
16143	}
16144
16145	switch (format) {
16146	case DRM_FORMAT_C8:
16147	case DRM_FORMAT_RGB565:
16148	case DRM_FORMAT_XRGB1555:
16149	case DRM_FORMAT_XRGB8888:
16150		return modifier == DRM_FORMAT_MOD_LINEAR ||
16151			modifier == I915_FORMAT_MOD_X_TILED;
16152	default:
16153		return false;
16154	}
16155}
16156
16157static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
16158					    u32 format, u64 modifier)
16159{
16160	switch (modifier) {
16161	case DRM_FORMAT_MOD_LINEAR:
16162	case I915_FORMAT_MOD_X_TILED:
16163		break;
16164	default:
16165		return false;
16166	}
16167
16168	switch (format) {
16169	case DRM_FORMAT_C8:
16170	case DRM_FORMAT_RGB565:
16171	case DRM_FORMAT_XRGB8888:
16172	case DRM_FORMAT_XBGR8888:
16173	case DRM_FORMAT_ARGB8888:
16174	case DRM_FORMAT_ABGR8888:
16175	case DRM_FORMAT_XRGB2101010:
16176	case DRM_FORMAT_XBGR2101010:
16177	case DRM_FORMAT_ARGB2101010:
16178	case DRM_FORMAT_ABGR2101010:
16179	case DRM_FORMAT_XBGR16161616F:
16180		return modifier == DRM_FORMAT_MOD_LINEAR ||
16181			modifier == I915_FORMAT_MOD_X_TILED;
16182	default:
16183		return false;
16184	}
16185}
16186
16187static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
16188					      u32 format, u64 modifier)
16189{
16190	return modifier == DRM_FORMAT_MOD_LINEAR &&
16191		format == DRM_FORMAT_ARGB8888;
16192}
16193
16194static const struct drm_plane_funcs i965_plane_funcs = {
16195	.update_plane = drm_atomic_helper_update_plane,
16196	.disable_plane = drm_atomic_helper_disable_plane,
16197	.destroy = intel_plane_destroy,
16198	.atomic_duplicate_state = intel_plane_duplicate_state,
16199	.atomic_destroy_state = intel_plane_destroy_state,
16200	.format_mod_supported = i965_plane_format_mod_supported,
16201};
16202
16203static const struct drm_plane_funcs i8xx_plane_funcs = {
16204	.update_plane = drm_atomic_helper_update_plane,
16205	.disable_plane = drm_atomic_helper_disable_plane,
16206	.destroy = intel_plane_destroy,
16207	.atomic_duplicate_state = intel_plane_duplicate_state,
16208	.atomic_destroy_state = intel_plane_destroy_state,
16209	.format_mod_supported = i8xx_plane_format_mod_supported,
16210};
16211
16212static int
16213intel_legacy_cursor_update(struct drm_plane *_plane,
16214			   struct drm_crtc *_crtc,
16215			   struct drm_framebuffer *fb,
16216			   int crtc_x, int crtc_y,
16217			   unsigned int crtc_w, unsigned int crtc_h,
16218			   u32 src_x, u32 src_y,
16219			   u32 src_w, u32 src_h,
16220			   struct drm_modeset_acquire_ctx *ctx)
16221{
16222	struct intel_plane *plane = to_intel_plane(_plane);
16223	struct intel_crtc *crtc = to_intel_crtc(_crtc);
16224	struct intel_plane_state *old_plane_state =
16225		to_intel_plane_state(plane->base.state);
16226	struct intel_plane_state *new_plane_state;
16227	struct intel_crtc_state *crtc_state =
16228		to_intel_crtc_state(crtc->base.state);
16229	struct intel_crtc_state *new_crtc_state;
16230	int ret;
16231
16232	/*
16233	 * When crtc is inactive or there is a modeset pending,
16234	 * wait for it to complete in the slowpath
16235	 */
16236	if (!crtc_state->hw.active || needs_modeset(crtc_state) ||
16237	    crtc_state->update_pipe)
16238		goto slow;
16239
16240	/*
16241	 * Don't do an async update if there is an outstanding commit modifying
16242	 * the plane.  This prevents our async update's changes from getting
16243	 * overridden by a previous synchronous update's state.
16244	 */
16245	if (old_plane_state->uapi.commit &&
16246	    !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
16247		goto slow;
16248
16249	/*
16250	 * If any parameters change that may affect watermarks,
16251	 * take the slowpath. Only changing fb or position should be
16252	 * in the fastpath.
16253	 */
16254	if (old_plane_state->uapi.crtc != &crtc->base ||
16255	    old_plane_state->uapi.src_w != src_w ||
16256	    old_plane_state->uapi.src_h != src_h ||
16257	    old_plane_state->uapi.crtc_w != crtc_w ||
16258	    old_plane_state->uapi.crtc_h != crtc_h ||
16259	    !old_plane_state->uapi.fb != !fb)
16260		goto slow;
16261
16262	new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
16263	if (!new_plane_state)
16264		return -ENOMEM;
16265
16266	new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
16267	if (!new_crtc_state) {
16268		ret = -ENOMEM;
16269		goto out_free;
16270	}
16271
16272	drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
16273
16274	new_plane_state->uapi.src_x = src_x;
16275	new_plane_state->uapi.src_y = src_y;
16276	new_plane_state->uapi.src_w = src_w;
16277	new_plane_state->uapi.src_h = src_h;
16278	new_plane_state->uapi.crtc_x = crtc_x;
16279	new_plane_state->uapi.crtc_y = crtc_y;
16280	new_plane_state->uapi.crtc_w = crtc_w;
16281	new_plane_state->uapi.crtc_h = crtc_h;
16282
16283	intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state);
16284
16285	ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
16286						  old_plane_state, new_plane_state);
16287	if (ret)
16288		goto out_free;
16289
16290	ret = intel_plane_pin_fb(new_plane_state);
16291	if (ret)
16292		goto out_free;
16293
16294	intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
16295				ORIGIN_FLIP);
16296	intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
16297				to_intel_frontbuffer(new_plane_state->hw.fb),
16298				plane->frontbuffer_bit);
16299
16300	/* Swap plane state */
16301	plane->base.state = &new_plane_state->uapi;
16302
16303	/*
16304	 * We cannot swap crtc_state as it may be in use by an atomic commit or
16305	 * page flip that's running simultaneously. If we swap crtc_state and
16306	 * destroy the old state, we will cause a use-after-free there.
16307	 *
16308	 * Only update active_planes, which is needed for our internal
16309	 * bookkeeping. Either value will do the right thing when updating
16310	 * planes atomically. If the cursor was part of the atomic update then
16311	 * we would have taken the slowpath.
16312	 */
16313	crtc_state->active_planes = new_crtc_state->active_planes;
16314
16315	if (new_plane_state->uapi.visible)
16316		intel_update_plane(plane, crtc_state, new_plane_state);
16317	else
16318		intel_disable_plane(plane, crtc_state);
16319
16320	intel_plane_unpin_fb(old_plane_state);
16321
16322out_free:
16323	if (new_crtc_state)
16324		intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
16325	if (ret)
16326		intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
16327	else
16328		intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
16329	return ret;
16330
16331slow:
16332	return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
16333					      crtc_x, crtc_y, crtc_w, crtc_h,
16334					      src_x, src_y, src_w, src_h, ctx);
16335}
16336
16337static const struct drm_plane_funcs intel_cursor_plane_funcs = {
16338	.update_plane = intel_legacy_cursor_update,
16339	.disable_plane = drm_atomic_helper_disable_plane,
16340	.destroy = intel_plane_destroy,
16341	.atomic_duplicate_state = intel_plane_duplicate_state,
16342	.atomic_destroy_state = intel_plane_destroy_state,
16343	.format_mod_supported = intel_cursor_format_mod_supported,
16344};
16345
16346static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
16347			       enum i9xx_plane_id i9xx_plane)
16348{
16349	if (!HAS_FBC(dev_priv))
16350		return false;
16351
16352	if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
16353		return i9xx_plane == PLANE_A; /* tied to pipe A */
16354	else if (IS_IVYBRIDGE(dev_priv))
16355		return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
16356			i9xx_plane == PLANE_C;
16357	else if (INTEL_GEN(dev_priv) >= 4)
16358		return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
16359	else
16360		return i9xx_plane == PLANE_A;
16361}
16362
16363static struct intel_plane *
16364intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
16365{
16366	struct intel_plane *plane;
16367	const struct drm_plane_funcs *plane_funcs;
16368	unsigned int supported_rotations;
16369	const u32 *formats;
16370	int num_formats;
16371	int ret, zpos;
16372
16373	if (INTEL_GEN(dev_priv) >= 9)
16374		return skl_universal_plane_create(dev_priv, pipe,
16375						  PLANE_PRIMARY);
16376
16377	plane = intel_plane_alloc();
16378	if (IS_ERR(plane))
16379		return plane;
16380
16381	plane->pipe = pipe;
16382	/*
16383	 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
16384	 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
16385	 */
16386	if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4 &&
16387	    INTEL_NUM_PIPES(dev_priv) == 2)
16388		plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
16389	else
16390		plane->i9xx_plane = (enum i9xx_plane_id) pipe;
16391	plane->id = PLANE_PRIMARY;
16392	plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
16393
16394	plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
16395	if (plane->has_fbc) {
16396		struct intel_fbc *fbc = &dev_priv->fbc;
16397
16398		fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
16399	}
16400
16401	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16402		formats = vlv_primary_formats;
16403		num_formats = ARRAY_SIZE(vlv_primary_formats);
16404	} else if (INTEL_GEN(dev_priv) >= 4) {
16405		/*
16406		 * WaFP16GammaEnabling:ivb
16407		 * "Workaround : When using the 64-bit format, the plane
16408		 *  output on each color channel has one quarter amplitude.
16409		 *  It can be brought up to full amplitude by using pipe
16410		 *  gamma correction or pipe color space conversion to
16411		 *  multiply the plane output by four."
16412		 *
16413		 * There is no dedicated plane gamma for the primary plane,
16414		 * and using the pipe gamma/csc could conflict with other
16415		 * planes, so we choose not to expose fp16 on IVB primary
16416		 * planes. HSW primary planes no longer have this problem.
16417		 */
16418		if (IS_IVYBRIDGE(dev_priv)) {
16419			formats = ivb_primary_formats;
16420			num_formats = ARRAY_SIZE(ivb_primary_formats);
16421		} else {
16422			formats = i965_primary_formats;
16423			num_formats = ARRAY_SIZE(i965_primary_formats);
16424		}
16425	} else {
16426		formats = i8xx_primary_formats;
16427		num_formats = ARRAY_SIZE(i8xx_primary_formats);
16428	}
16429
16430	if (INTEL_GEN(dev_priv) >= 4)
16431		plane_funcs = &i965_plane_funcs;
16432	else
16433		plane_funcs = &i8xx_plane_funcs;
16434
16435	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16436		plane->min_cdclk = vlv_plane_min_cdclk;
16437	else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
16438		plane->min_cdclk = hsw_plane_min_cdclk;
16439	else if (IS_IVYBRIDGE(dev_priv))
16440		plane->min_cdclk = ivb_plane_min_cdclk;
16441	else
16442		plane->min_cdclk = i9xx_plane_min_cdclk;
16443
16444	plane->max_stride = i9xx_plane_max_stride;
16445	plane->update_plane = i9xx_update_plane;
16446	plane->disable_plane = i9xx_disable_plane;
16447	plane->get_hw_state = i9xx_plane_get_hw_state;
16448	plane->check_plane = i9xx_plane_check;
16449
16450	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
16451		ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
16452					       0, plane_funcs,
16453					       formats, num_formats,
16454					       i9xx_format_modifiers,
16455					       DRM_PLANE_TYPE_PRIMARY,
16456					       "primary %c", pipe_name(pipe));
16457	else
16458		ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
16459					       0, plane_funcs,
16460					       formats, num_formats,
16461					       i9xx_format_modifiers,
16462					       DRM_PLANE_TYPE_PRIMARY,
16463					       "plane %c",
16464					       plane_name(plane->i9xx_plane));
16465	if (ret)
16466		goto fail;
16467
16468	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
16469		supported_rotations =
16470			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
16471			DRM_MODE_REFLECT_X;
16472	} else if (INTEL_GEN(dev_priv) >= 4) {
16473		supported_rotations =
16474			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
16475	} else {
16476		supported_rotations = DRM_MODE_ROTATE_0;
16477	}
16478
16479	if (INTEL_GEN(dev_priv) >= 4)
16480		drm_plane_create_rotation_property(&plane->base,
16481						   DRM_MODE_ROTATE_0,
16482						   supported_rotations);
16483
16484	zpos = 0;
16485	drm_plane_create_zpos_immutable_property(&plane->base, zpos);
16486
16487	drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
16488
16489	return plane;
16490
16491fail:
16492	intel_plane_free(plane);
16493
16494	return ERR_PTR(ret);
16495}
16496
16497static struct intel_plane *
16498intel_cursor_plane_create(struct drm_i915_private *dev_priv,
16499			  enum pipe pipe)
16500{
16501	struct intel_plane *cursor;
16502	int ret, zpos;
16503
16504	cursor = intel_plane_alloc();
16505	if (IS_ERR(cursor))
16506		return cursor;
16507
16508	cursor->pipe = pipe;
16509	cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
16510	cursor->id = PLANE_CURSOR;
16511	cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
16512
16513	if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
16514		cursor->max_stride = i845_cursor_max_stride;
16515		cursor->update_plane = i845_update_cursor;
16516		cursor->disable_plane = i845_disable_cursor;
16517		cursor->get_hw_state = i845_cursor_get_hw_state;
16518		cursor->check_plane = i845_check_cursor;
16519	} else {
16520		cursor->max_stride = i9xx_cursor_max_stride;
16521		cursor->update_plane = i9xx_update_cursor;
16522		cursor->disable_plane = i9xx_disable_cursor;
16523		cursor->get_hw_state = i9xx_cursor_get_hw_state;
16524		cursor->check_plane = i9xx_check_cursor;
16525	}
16526
16527	cursor->cursor.base = ~0;
16528	cursor->cursor.cntl = ~0;
16529
16530	if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
16531		cursor->cursor.size = ~0;
16532
16533	ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
16534				       0, &intel_cursor_plane_funcs,
16535				       intel_cursor_formats,
16536				       ARRAY_SIZE(intel_cursor_formats),
16537				       cursor_format_modifiers,
16538				       DRM_PLANE_TYPE_CURSOR,
16539				       "cursor %c", pipe_name(pipe));
16540	if (ret)
16541		goto fail;
16542
16543	if (INTEL_GEN(dev_priv) >= 4)
16544		drm_plane_create_rotation_property(&cursor->base,
16545						   DRM_MODE_ROTATE_0,
16546						   DRM_MODE_ROTATE_0 |
16547						   DRM_MODE_ROTATE_180);
16548
16549	zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
16550	drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
16551
16552	if (INTEL_GEN(dev_priv) >= 12)
16553		drm_plane_enable_fb_damage_clips(&cursor->base);
16554
16555	drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
16556
16557	return cursor;
16558
16559fail:
16560	intel_plane_free(cursor);
16561
16562	return ERR_PTR(ret);
16563}
16564
16565#define INTEL_CRTC_FUNCS \
16566	.gamma_set = drm_atomic_helper_legacy_gamma_set, \
16567	.set_config = drm_atomic_helper_set_config, \
16568	.destroy = intel_crtc_destroy, \
16569	.page_flip = drm_atomic_helper_page_flip, \
16570	.atomic_duplicate_state = intel_crtc_duplicate_state, \
16571	.atomic_destroy_state = intel_crtc_destroy_state, \
16572	.set_crc_source = intel_crtc_set_crc_source, \
16573	.verify_crc_source = intel_crtc_verify_crc_source, \
16574	.get_crc_sources = intel_crtc_get_crc_sources
16575
16576static const struct drm_crtc_funcs bdw_crtc_funcs = {
16577	INTEL_CRTC_FUNCS,
16578
16579	.get_vblank_counter = g4x_get_vblank_counter,
16580	.enable_vblank = bdw_enable_vblank,
16581	.disable_vblank = bdw_disable_vblank,
16582	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
16583};
16584
16585static const struct drm_crtc_funcs ilk_crtc_funcs = {
16586	INTEL_CRTC_FUNCS,
16587
16588	.get_vblank_counter = g4x_get_vblank_counter,
16589	.enable_vblank = ilk_enable_vblank,
16590	.disable_vblank = ilk_disable_vblank,
16591	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
16592};
16593
16594static const struct drm_crtc_funcs g4x_crtc_funcs = {
16595	INTEL_CRTC_FUNCS,
16596
16597	.get_vblank_counter = g4x_get_vblank_counter,
16598	.enable_vblank = i965_enable_vblank,
16599	.disable_vblank = i965_disable_vblank,
16600	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
16601};
16602
16603static const struct drm_crtc_funcs i965_crtc_funcs = {
16604	INTEL_CRTC_FUNCS,
16605
16606	.get_vblank_counter = i915_get_vblank_counter,
16607	.enable_vblank = i965_enable_vblank,
16608	.disable_vblank = i965_disable_vblank,
16609	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
16610};
16611
16612static const struct drm_crtc_funcs i915gm_crtc_funcs = {
16613	INTEL_CRTC_FUNCS,
16614
16615	.get_vblank_counter = i915_get_vblank_counter,
16616	.enable_vblank = i915gm_enable_vblank,
16617	.disable_vblank = i915gm_disable_vblank,
16618	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
16619};
16620
16621static const struct drm_crtc_funcs i915_crtc_funcs = {
16622	INTEL_CRTC_FUNCS,
16623
16624	.get_vblank_counter = i915_get_vblank_counter,
16625	.enable_vblank = i8xx_enable_vblank,
16626	.disable_vblank = i8xx_disable_vblank,
16627	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
16628};
16629
16630static const struct drm_crtc_funcs i8xx_crtc_funcs = {
16631	INTEL_CRTC_FUNCS,
16632
16633	/* no hw vblank counter */
16634	.enable_vblank = i8xx_enable_vblank,
16635	.disable_vblank = i8xx_disable_vblank,
16636	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
16637};
16638
16639static struct intel_crtc *intel_crtc_alloc(void)
16640{
16641	struct intel_crtc_state *crtc_state;
16642	struct intel_crtc *crtc;
16643
16644	crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
16645	if (!crtc)
16646		return ERR_PTR(-ENOMEM);
16647
16648	crtc_state = intel_crtc_state_alloc(crtc);
16649	if (!crtc_state) {
16650		kfree(crtc);
16651		return ERR_PTR(-ENOMEM);
16652	}
16653
16654	crtc->base.state = &crtc_state->uapi;
16655	crtc->config = crtc_state;
16656
16657	return crtc;
16658}
16659
16660static void intel_crtc_free(struct intel_crtc *crtc)
16661{
16662	intel_crtc_destroy_state(&crtc->base, crtc->base.state);
16663	kfree(crtc);
16664}
16665
16666static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
16667{
16668	struct intel_plane *plane;
16669
16670	for_each_intel_plane(&dev_priv->drm, plane) {
16671		struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
16672								  plane->pipe);
16673
16674		plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
16675	}
16676}
16677
16678static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
16679{
16680	struct intel_plane *primary, *cursor;
16681	const struct drm_crtc_funcs *funcs;
16682	struct intel_crtc *crtc;
16683	int sprite, ret;
16684
16685	crtc = intel_crtc_alloc();
16686	if (IS_ERR(crtc))
16687		return PTR_ERR(crtc);
16688
16689	crtc->pipe = pipe;
16690	crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
16691
16692	primary = intel_primary_plane_create(dev_priv, pipe);
16693	if (IS_ERR(primary)) {
16694		ret = PTR_ERR(primary);
16695		goto fail;
16696	}
16697	crtc->plane_ids_mask |= BIT(primary->id);
16698
16699	for_each_sprite(dev_priv, pipe, sprite) {
16700		struct intel_plane *plane;
16701
16702		plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
16703		if (IS_ERR(plane)) {
16704			ret = PTR_ERR(plane);
16705			goto fail;
16706		}
16707		crtc->plane_ids_mask |= BIT(plane->id);
16708	}
16709
16710	cursor = intel_cursor_plane_create(dev_priv, pipe);
16711	if (IS_ERR(cursor)) {
16712		ret = PTR_ERR(cursor);
16713		goto fail;
16714	}
16715	crtc->plane_ids_mask |= BIT(cursor->id);
16716
16717	if (HAS_GMCH(dev_priv)) {
16718		if (IS_CHERRYVIEW(dev_priv) ||
16719		    IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
16720			funcs = &g4x_crtc_funcs;
16721		else if (IS_GEN(dev_priv, 4))
16722			funcs = &i965_crtc_funcs;
16723		else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
16724			funcs = &i915gm_crtc_funcs;
16725		else if (IS_GEN(dev_priv, 3))
16726			funcs = &i915_crtc_funcs;
16727		else
16728			funcs = &i8xx_crtc_funcs;
16729	} else {
16730		if (INTEL_GEN(dev_priv) >= 8)
16731			funcs = &bdw_crtc_funcs;
16732		else
16733			funcs = &ilk_crtc_funcs;
16734	}
16735
16736	ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
16737					&primary->base, &cursor->base,
16738					funcs, "pipe %c", pipe_name(pipe));
16739	if (ret)
16740		goto fail;
16741
16742	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
16743	       dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
16744	dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
16745
16746	if (INTEL_GEN(dev_priv) < 9) {
16747		enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
16748
16749		BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
16750		       dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
16751		dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
16752	}
16753
16754	intel_color_init(crtc);
16755
16756	intel_crtc_crc_init(crtc);
16757
16758	drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
16759
16760	return 0;
16761
16762fail:
16763	intel_crtc_free(crtc);
16764
16765	return ret;
16766}
16767
16768int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
16769				      struct drm_file *file)
16770{
16771	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
16772	struct drm_crtc *drmmode_crtc;
16773	struct intel_crtc *crtc;
16774
16775	drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
16776	if (!drmmode_crtc)
16777		return -ENOENT;
16778
16779	crtc = to_intel_crtc(drmmode_crtc);
16780	pipe_from_crtc_id->pipe = crtc->pipe;
16781
16782	return 0;
16783}
16784
16785static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
16786{
16787	struct drm_device *dev = encoder->base.dev;
16788	struct intel_encoder *source_encoder;
16789	u32 possible_clones = 0;
16790
16791	for_each_intel_encoder(dev, source_encoder) {
16792		if (encoders_cloneable(encoder, source_encoder))
16793			possible_clones |= drm_encoder_mask(&source_encoder->base);
16794	}
16795
16796	return possible_clones;
16797}
16798
16799static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
16800{
16801	struct drm_device *dev = encoder->base.dev;
16802	struct intel_crtc *crtc;
16803	u32 possible_crtcs = 0;
16804
16805	for_each_intel_crtc(dev, crtc) {
16806		if (encoder->pipe_mask & BIT(crtc->pipe))
16807			possible_crtcs |= drm_crtc_mask(&crtc->base);
16808	}
16809
16810	return possible_crtcs;
16811}
16812
16813static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
16814{
16815	if (!IS_MOBILE(dev_priv))
16816		return false;
16817
16818	if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
16819		return false;
16820
16821	if (IS_GEN(dev_priv, 5) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
16822		return false;
16823
16824	return true;
16825}
16826
16827static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
16828{
16829	if (INTEL_GEN(dev_priv) >= 9)
16830		return false;
16831
16832	if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
16833		return false;
16834
16835	if (HAS_PCH_LPT_H(dev_priv) &&
16836	    intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
16837		return false;
16838
16839	/* DDI E can't be used if DDI A requires 4 lanes */
16840	if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
16841		return false;
16842
16843	if (!dev_priv->vbt.int_crt_support)
16844		return false;
16845
16846	return true;
16847}
16848
16849void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
16850{
16851	int pps_num;
16852	int pps_idx;
16853
16854	if (HAS_DDI(dev_priv))
16855		return;
16856	/*
16857	 * This w/a is needed at least on CPT/PPT, but to be sure apply it
16858	 * everywhere where registers can be write protected.
16859	 */
16860	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16861		pps_num = 2;
16862	else
16863		pps_num = 1;
16864
16865	for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
16866		u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
16867
16868		val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
16869		intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
16870	}
16871}
16872
16873static void intel_pps_init(struct drm_i915_private *dev_priv)
16874{
16875	if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
16876		dev_priv->pps_mmio_base = PCH_PPS_BASE;
16877	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16878		dev_priv->pps_mmio_base = VLV_PPS_BASE;
16879	else
16880		dev_priv->pps_mmio_base = PPS_BASE;
16881
16882	intel_pps_unlock_regs_wa(dev_priv);
16883}
16884
16885static void intel_setup_outputs(struct drm_i915_private *dev_priv)
16886{
16887	struct intel_encoder *encoder;
16888	bool dpd_is_edp = false;
16889
16890	intel_pps_init(dev_priv);
16891
16892	if (!HAS_DISPLAY(dev_priv))
16893		return;
16894
16895	if (IS_ROCKETLAKE(dev_priv)) {
16896		intel_ddi_init(dev_priv, PORT_A);
16897		intel_ddi_init(dev_priv, PORT_B);
16898		intel_ddi_init(dev_priv, PORT_D);	/* DDI TC1 */
16899		intel_ddi_init(dev_priv, PORT_E);	/* DDI TC2 */
16900	} else if (INTEL_GEN(dev_priv) >= 12) {
16901		intel_ddi_init(dev_priv, PORT_A);
16902		intel_ddi_init(dev_priv, PORT_B);
16903		intel_ddi_init(dev_priv, PORT_D);
16904		intel_ddi_init(dev_priv, PORT_E);
16905		intel_ddi_init(dev_priv, PORT_F);
16906		intel_ddi_init(dev_priv, PORT_G);
16907		intel_ddi_init(dev_priv, PORT_H);
16908		intel_ddi_init(dev_priv, PORT_I);
16909		icl_dsi_init(dev_priv);
16910	} else if (IS_ELKHARTLAKE(dev_priv)) {
16911		intel_ddi_init(dev_priv, PORT_A);
16912		intel_ddi_init(dev_priv, PORT_B);
16913		intel_ddi_init(dev_priv, PORT_C);
16914		intel_ddi_init(dev_priv, PORT_D);
16915		icl_dsi_init(dev_priv);
16916	} else if (IS_GEN(dev_priv, 11)) {
16917		intel_ddi_init(dev_priv, PORT_A);
16918		intel_ddi_init(dev_priv, PORT_B);
16919		intel_ddi_init(dev_priv, PORT_C);
16920		intel_ddi_init(dev_priv, PORT_D);
16921		intel_ddi_init(dev_priv, PORT_E);
16922		/*
16923		 * On some ICL SKUs port F is not present. No strap bits for
16924		 * this, so rely on VBT.
16925		 * Work around broken VBTs on SKUs known to have no port F.
16926		 */
16927		if (IS_ICL_WITH_PORT_F(dev_priv) &&
16928		    intel_bios_is_port_present(dev_priv, PORT_F))
16929			intel_ddi_init(dev_priv, PORT_F);
16930
16931		icl_dsi_init(dev_priv);
16932	} else if (IS_GEN9_LP(dev_priv)) {
16933		/*
16934		 * FIXME: Broxton doesn't support port detection via the
16935		 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
16936		 * detect the ports.
16937		 */
16938		intel_ddi_init(dev_priv, PORT_A);
16939		intel_ddi_init(dev_priv, PORT_B);
16940		intel_ddi_init(dev_priv, PORT_C);
16941
16942		vlv_dsi_init(dev_priv);
16943	} else if (HAS_DDI(dev_priv)) {
16944		int found;
16945
16946		if (intel_ddi_crt_present(dev_priv))
16947			intel_crt_init(dev_priv);
16948
16949		/*
16950		 * Haswell uses DDI functions to detect digital outputs.
16951		 * On SKL pre-D0 the strap isn't connected, so we assume
16952		 * it's there.
16953		 */
16954		found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
16955		/* WaIgnoreDDIAStrap: skl */
16956		if (found || IS_GEN9_BC(dev_priv))
16957			intel_ddi_init(dev_priv, PORT_A);
16958
16959		/* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
16960		 * register */
16961		found = intel_de_read(dev_priv, SFUSE_STRAP);
16962
16963		if (found & SFUSE_STRAP_DDIB_DETECTED)
16964			intel_ddi_init(dev_priv, PORT_B);
16965		if (found & SFUSE_STRAP_DDIC_DETECTED)
16966			intel_ddi_init(dev_priv, PORT_C);
16967		if (found & SFUSE_STRAP_DDID_DETECTED)
16968			intel_ddi_init(dev_priv, PORT_D);
16969		if (found & SFUSE_STRAP_DDIF_DETECTED)
16970			intel_ddi_init(dev_priv, PORT_F);
16971		/*
16972		 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
16973		 */
16974		if (IS_GEN9_BC(dev_priv) &&
16975		    intel_bios_is_port_present(dev_priv, PORT_E))
16976			intel_ddi_init(dev_priv, PORT_E);
16977
16978	} else if (HAS_PCH_SPLIT(dev_priv)) {
16979		int found;
16980
16981		/*
16982		 * intel_edp_init_connector() depends on this completing first,
16983		 * to prevent the registration of both eDP and LVDS and the
16984		 * incorrect sharing of the PPS.
16985		 */
16986		intel_lvds_init(dev_priv);
16987		intel_crt_init(dev_priv);
16988
16989		dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
16990
16991		if (ilk_has_edp_a(dev_priv))
16992			intel_dp_init(dev_priv, DP_A, PORT_A);
16993
16994		if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
16995			/* PCH SDVOB multiplex with HDMIB */
16996			found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
16997			if (!found)
16998				intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
16999			if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
17000				intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
17001		}
17002
17003		if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
17004			intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
17005
17006		if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
17007			intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
17008
17009		if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
17010			intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
17011
17012		if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
17013			intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
17014	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
17015		bool has_edp, has_port;
17016
17017		if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
17018			intel_crt_init(dev_priv);
17019
17020		/*
17021		 * The DP_DETECTED bit is the latched state of the DDC
17022		 * SDA pin at boot. However since eDP doesn't require DDC
17023		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
17024		 * eDP ports may have been muxed to an alternate function.
17025		 * Thus we can't rely on the DP_DETECTED bit alone to detect
17026		 * eDP ports. Consult the VBT as well as DP_DETECTED to
17027		 * detect eDP ports.
17028		 *
17029		 * Sadly the straps seem to be missing sometimes even for HDMI
17030		 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
17031		 * and VBT for the presence of the port. Additionally we can't
17032		 * trust the port type the VBT declares as we've seen at least
17033		 * HDMI ports that the VBT claim are DP or eDP.
17034		 */
17035		has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
17036		has_port = intel_bios_is_port_present(dev_priv, PORT_B);
17037		if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
17038			has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
17039		if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
17040			intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
17041
17042		has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
17043		has_port = intel_bios_is_port_present(dev_priv, PORT_C);
17044		if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
17045			has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
17046		if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
17047			intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
17048
17049		if (IS_CHERRYVIEW(dev_priv)) {
17050			/*
17051			 * eDP not supported on port D,
17052			 * so no need to worry about it
17053			 */
17054			has_port = intel_bios_is_port_present(dev_priv, PORT_D);
17055			if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
17056				intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
17057			if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
17058				intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
17059		}
17060
17061		vlv_dsi_init(dev_priv);
17062	} else if (IS_PINEVIEW(dev_priv)) {
17063		intel_lvds_init(dev_priv);
17064		intel_crt_init(dev_priv);
17065	} else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
17066		bool found = false;
17067
17068		if (IS_MOBILE(dev_priv))
17069			intel_lvds_init(dev_priv);
17070
17071		intel_crt_init(dev_priv);
17072
17073		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
17074			drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
17075			found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
17076			if (!found && IS_G4X(dev_priv)) {
17077				drm_dbg_kms(&dev_priv->drm,
17078					    "probing HDMI on SDVOB\n");
17079				intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
17080			}
17081
17082			if (!found && IS_G4X(dev_priv))
17083				intel_dp_init(dev_priv, DP_B, PORT_B);
17084		}
17085
17086		/* Before G4X SDVOC doesn't have its own detect register */
17087
17088		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
17089			drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
17090			found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
17091		}
17092
17093		if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
17094
17095			if (IS_G4X(dev_priv)) {
17096				drm_dbg_kms(&dev_priv->drm,
17097					    "probing HDMI on SDVOC\n");
17098				intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
17099			}
17100			if (IS_G4X(dev_priv))
17101				intel_dp_init(dev_priv, DP_C, PORT_C);
17102		}
17103
17104		if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
17105			intel_dp_init(dev_priv, DP_D, PORT_D);
17106
17107		if (SUPPORTS_TV(dev_priv))
17108			intel_tv_init(dev_priv);
17109	} else if (IS_GEN(dev_priv, 2)) {
17110		if (IS_I85X(dev_priv))
17111			intel_lvds_init(dev_priv);
17112
17113		intel_crt_init(dev_priv);
17114		intel_dvo_init(dev_priv);
17115	}
17116
17117	intel_psr_init(dev_priv);
17118
17119	for_each_intel_encoder(&dev_priv->drm, encoder) {
17120		encoder->base.possible_crtcs =
17121			intel_encoder_possible_crtcs(encoder);
17122		encoder->base.possible_clones =
17123			intel_encoder_possible_clones(encoder);
17124	}
17125
17126	intel_init_pch_refclk(dev_priv);
17127
17128	drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
17129}
17130
17131static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
17132{
17133	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
17134
17135	drm_framebuffer_cleanup(fb);
17136	intel_frontbuffer_put(intel_fb->frontbuffer);
17137
17138	kfree(intel_fb);
17139}
17140
17141static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
17142						struct drm_file *file,
17143						unsigned int *handle)
17144{
17145	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
17146	struct drm_i915_private *i915 = to_i915(obj->base.dev);
17147
17148	if (obj->userptr.mm) {
17149		drm_dbg(&i915->drm,
17150			"attempting to use a userptr for a framebuffer, denied\n");
17151		return -EINVAL;
17152	}
17153
17154	return drm_gem_handle_create(file, &obj->base, handle);
17155}
17156
17157static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
17158					struct drm_file *file,
17159					unsigned flags, unsigned color,
17160					struct drm_clip_rect *clips,
17161					unsigned num_clips)
17162{
17163	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
17164
17165	i915_gem_object_flush_if_display(obj);
17166	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
17167
17168	return 0;
17169}
17170
17171static const struct drm_framebuffer_funcs intel_fb_funcs = {
17172	.destroy = intel_user_framebuffer_destroy,
17173	.create_handle = intel_user_framebuffer_create_handle,
17174	.dirty = intel_user_framebuffer_dirty,
17175};
17176
17177static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
17178				  struct drm_i915_gem_object *obj,
17179				  struct drm_mode_fb_cmd2 *mode_cmd)
17180{
17181	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
17182	struct drm_framebuffer *fb = &intel_fb->base;
17183	u32 max_stride;
17184	unsigned int tiling, stride;
17185	int ret = -EINVAL;
17186	int i;
17187
17188	intel_fb->frontbuffer = intel_frontbuffer_get(obj);
17189	if (!intel_fb->frontbuffer)
17190		return -ENOMEM;
17191
17192	i915_gem_object_lock(obj, NULL);
17193	tiling = i915_gem_object_get_tiling(obj);
17194	stride = i915_gem_object_get_stride(obj);
17195	i915_gem_object_unlock(obj);
17196
17197	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
17198		/*
17199		 * If there's a fence, enforce that
17200		 * the fb modifier and tiling mode match.
17201		 */
17202		if (tiling != I915_TILING_NONE &&
17203		    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
17204			drm_dbg_kms(&dev_priv->drm,
17205				    "tiling_mode doesn't match fb modifier\n");
17206			goto err;
17207		}
17208	} else {
17209		if (tiling == I915_TILING_X) {
17210			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
17211		} else if (tiling == I915_TILING_Y) {
17212			drm_dbg_kms(&dev_priv->drm,
17213				    "No Y tiling for legacy addfb\n");
17214			goto err;
17215		}
17216	}
17217
17218	if (!drm_any_plane_has_format(&dev_priv->drm,
17219				      mode_cmd->pixel_format,
17220				      mode_cmd->modifier[0])) {
17221		struct drm_format_name_buf format_name;
17222
17223		drm_dbg_kms(&dev_priv->drm,
17224			    "unsupported pixel format %s / modifier 0x%llx\n",
17225			    drm_get_format_name(mode_cmd->pixel_format,
17226						&format_name),
17227			    mode_cmd->modifier[0]);
17228		goto err;
17229	}
17230
17231	/*
17232	 * gen2/3 display engine uses the fence if present,
17233	 * so the tiling mode must match the fb modifier exactly.
17234	 */
17235	if (INTEL_GEN(dev_priv) < 4 &&
17236	    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
17237		drm_dbg_kms(&dev_priv->drm,
17238			    "tiling_mode must match fb modifier exactly on gen2/3\n");
17239		goto err;
17240	}
17241
17242	max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
17243					 mode_cmd->modifier[0]);
17244	if (mode_cmd->pitches[0] > max_stride) {
17245		drm_dbg_kms(&dev_priv->drm,
17246			    "%s pitch (%u) must be at most %d\n",
17247			    mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
17248			    "tiled" : "linear",
17249			    mode_cmd->pitches[0], max_stride);
17250		goto err;
17251	}
17252
17253	/*
17254	 * If there's a fence, enforce that
17255	 * the fb pitch and fence stride match.
17256	 */
17257	if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
17258		drm_dbg_kms(&dev_priv->drm,
17259			    "pitch (%d) must match tiling stride (%d)\n",
17260			    mode_cmd->pitches[0], stride);
17261		goto err;
17262	}
17263
17264	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
17265	if (mode_cmd->offsets[0] != 0) {
17266		drm_dbg_kms(&dev_priv->drm,
17267			    "plane 0 offset (0x%08x) must be 0\n",
17268			    mode_cmd->offsets[0]);
17269		goto err;
17270	}
17271
17272	drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
17273
17274	for (i = 0; i < fb->format->num_planes; i++) {
17275		u32 stride_alignment;
17276
17277		if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
17278			drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
17279				    i);
17280			goto err;
17281		}
17282
17283		stride_alignment = intel_fb_stride_alignment(fb, i);
17284		if (fb->pitches[i] & (stride_alignment - 1)) {
17285			drm_dbg_kms(&dev_priv->drm,
17286				    "plane %d pitch (%d) must be at least %u byte aligned\n",
17287				    i, fb->pitches[i], stride_alignment);
17288			goto err;
17289		}
17290
17291		if (is_gen12_ccs_plane(fb, i)) {
17292			int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
17293
17294			if (fb->pitches[i] != ccs_aux_stride) {
17295				drm_dbg_kms(&dev_priv->drm,
17296					    "ccs aux plane %d pitch (%d) must be %d\n",
17297					    i,
17298					    fb->pitches[i], ccs_aux_stride);
17299				goto err;
17300			}
17301		}
17302
17303		fb->obj[i] = &obj->base;
17304	}
17305
17306	ret = intel_fill_fb_info(dev_priv, fb);
17307	if (ret)
17308		goto err;
17309
17310	ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
17311	if (ret) {
17312		drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
17313		goto err;
17314	}
17315
17316	return 0;
17317
17318err:
17319	intel_frontbuffer_put(intel_fb->frontbuffer);
17320	return ret;
17321}
17322
17323static struct drm_framebuffer *
17324intel_user_framebuffer_create(struct drm_device *dev,
17325			      struct drm_file *filp,
17326			      const struct drm_mode_fb_cmd2 *user_mode_cmd)
17327{
17328	struct drm_framebuffer *fb;
17329	struct drm_i915_gem_object *obj;
17330	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
17331
17332	obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
17333	if (!obj)
17334		return ERR_PTR(-ENOENT);
17335
17336	fb = intel_framebuffer_create(obj, &mode_cmd);
17337	i915_gem_object_put(obj);
17338
17339	return fb;
17340}
17341
17342static enum drm_mode_status
17343intel_mode_valid(struct drm_device *dev,
17344		 const struct drm_display_mode *mode)
17345{
17346	struct drm_i915_private *dev_priv = to_i915(dev);
17347	int hdisplay_max, htotal_max;
17348	int vdisplay_max, vtotal_max;
17349
17350	/*
17351	 * Can't reject DBLSCAN here because Xorg ddxen can add piles
17352	 * of DBLSCAN modes to the output's mode list when they detect
17353	 * the scaling mode property on the connector. And they don't
17354	 * ask the kernel to validate those modes in any way until
17355	 * modeset time at which point the client gets a protocol error.
17356	 * So in order to not upset those clients we silently ignore the
17357	 * DBLSCAN flag on such connectors. For other connectors we will
17358	 * reject modes with the DBLSCAN flag in encoder->compute_config().
17359	 * And we always reject DBLSCAN modes in connector->mode_valid()
17360	 * as we never want such modes on the connector's mode list.
17361	 */
17362
17363	if (mode->vscan > 1)
17364		return MODE_NO_VSCAN;
17365
17366	if (mode->flags & DRM_MODE_FLAG_HSKEW)
17367		return MODE_H_ILLEGAL;
17368
17369	if (mode->flags & (DRM_MODE_FLAG_CSYNC |
17370			   DRM_MODE_FLAG_NCSYNC |
17371			   DRM_MODE_FLAG_PCSYNC))
17372		return MODE_HSYNC;
17373
17374	if (mode->flags & (DRM_MODE_FLAG_BCAST |
17375			   DRM_MODE_FLAG_PIXMUX |
17376			   DRM_MODE_FLAG_CLKDIV2))
17377		return MODE_BAD;
17378
17379	/* Transcoder timing limits */
17380	if (INTEL_GEN(dev_priv) >= 11) {
17381		hdisplay_max = 16384;
17382		vdisplay_max = 8192;
17383		htotal_max = 16384;
17384		vtotal_max = 8192;
17385	} else if (INTEL_GEN(dev_priv) >= 9 ||
17386		   IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
17387		hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
17388		vdisplay_max = 4096;
17389		htotal_max = 8192;
17390		vtotal_max = 8192;
17391	} else if (INTEL_GEN(dev_priv) >= 3) {
17392		hdisplay_max = 4096;
17393		vdisplay_max = 4096;
17394		htotal_max = 8192;
17395		vtotal_max = 8192;
17396	} else {
17397		hdisplay_max = 2048;
17398		vdisplay_max = 2048;
17399		htotal_max = 4096;
17400		vtotal_max = 4096;
17401	}
17402
17403	if (mode->hdisplay > hdisplay_max ||
17404	    mode->hsync_start > htotal_max ||
17405	    mode->hsync_end > htotal_max ||
17406	    mode->htotal > htotal_max)
17407		return MODE_H_ILLEGAL;
17408
17409	if (mode->vdisplay > vdisplay_max ||
17410	    mode->vsync_start > vtotal_max ||
17411	    mode->vsync_end > vtotal_max ||
17412	    mode->vtotal > vtotal_max)
17413		return MODE_V_ILLEGAL;
17414
17415	if (INTEL_GEN(dev_priv) >= 5) {
17416		if (mode->hdisplay < 64 ||
17417		    mode->htotal - mode->hdisplay < 32)
17418			return MODE_H_ILLEGAL;
17419
17420		if (mode->vtotal - mode->vdisplay < 5)
17421			return MODE_V_ILLEGAL;
17422	} else {
17423		if (mode->htotal - mode->hdisplay < 32)
17424			return MODE_H_ILLEGAL;
17425
17426		if (mode->vtotal - mode->vdisplay < 3)
17427			return MODE_V_ILLEGAL;
17428	}
17429
17430	return MODE_OK;
17431}
17432
17433enum drm_mode_status
17434intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
17435				const struct drm_display_mode *mode)
17436{
17437	int plane_width_max, plane_height_max;
17438
17439	/*
17440	 * intel_mode_valid() should be
17441	 * sufficient on older platforms.
17442	 */
17443	if (INTEL_GEN(dev_priv) < 9)
17444		return MODE_OK;
17445
17446	/*
17447	 * Most people will probably want a fullscreen
17448	 * plane so let's not advertize modes that are
17449	 * too big for that.
17450	 */
17451	if (INTEL_GEN(dev_priv) >= 11) {
17452		plane_width_max = 5120;
17453		plane_height_max = 4320;
17454	} else {
17455		plane_width_max = 5120;
17456		plane_height_max = 4096;
17457	}
17458
17459	if (mode->hdisplay > plane_width_max)
17460		return MODE_H_ILLEGAL;
17461
17462	if (mode->vdisplay > plane_height_max)
17463		return MODE_V_ILLEGAL;
17464
17465	return MODE_OK;
17466}
17467
17468static const struct drm_mode_config_funcs intel_mode_funcs = {
17469	.fb_create = intel_user_framebuffer_create,
17470	.get_format_info = intel_get_format_info,
17471	.output_poll_changed = intel_fbdev_output_poll_changed,
17472	.mode_valid = intel_mode_valid,
17473	.atomic_check = intel_atomic_check,
17474	.atomic_commit = intel_atomic_commit,
17475	.atomic_state_alloc = intel_atomic_state_alloc,
17476	.atomic_state_clear = intel_atomic_state_clear,
17477	.atomic_state_free = intel_atomic_state_free,
17478};
17479
17480/**
17481 * intel_init_display_hooks - initialize the display modesetting hooks
17482 * @dev_priv: device private
17483 */
17484void intel_init_display_hooks(struct drm_i915_private *dev_priv)
17485{
17486	intel_init_cdclk_hooks(dev_priv);
17487
17488	if (INTEL_GEN(dev_priv) >= 9) {
17489		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
17490		dev_priv->display.get_initial_plane_config =
17491			skl_get_initial_plane_config;
17492		dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
17493		dev_priv->display.crtc_enable = hsw_crtc_enable;
17494		dev_priv->display.crtc_disable = hsw_crtc_disable;
17495	} else if (HAS_DDI(dev_priv)) {
17496		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
17497		dev_priv->display.get_initial_plane_config =
17498			i9xx_get_initial_plane_config;
17499		dev_priv->display.crtc_compute_clock =
17500			hsw_crtc_compute_clock;
17501		dev_priv->display.crtc_enable = hsw_crtc_enable;
17502		dev_priv->display.crtc_disable = hsw_crtc_disable;
17503	} else if (HAS_PCH_SPLIT(dev_priv)) {
17504		dev_priv->display.get_pipe_config = ilk_get_pipe_config;
17505		dev_priv->display.get_initial_plane_config =
17506			i9xx_get_initial_plane_config;
17507		dev_priv->display.crtc_compute_clock =
17508			ilk_crtc_compute_clock;
17509		dev_priv->display.crtc_enable = ilk_crtc_enable;
17510		dev_priv->display.crtc_disable = ilk_crtc_disable;
17511	} else if (IS_CHERRYVIEW(dev_priv)) {
17512		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17513		dev_priv->display.get_initial_plane_config =
17514			i9xx_get_initial_plane_config;
17515		dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
17516		dev_priv->display.crtc_enable = valleyview_crtc_enable;
17517		dev_priv->display.crtc_disable = i9xx_crtc_disable;
17518	} else if (IS_VALLEYVIEW(dev_priv)) {
17519		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17520		dev_priv->display.get_initial_plane_config =
17521			i9xx_get_initial_plane_config;
17522		dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
17523		dev_priv->display.crtc_enable = valleyview_crtc_enable;
17524		dev_priv->display.crtc_disable = i9xx_crtc_disable;
17525	} else if (IS_G4X(dev_priv)) {
17526		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17527		dev_priv->display.get_initial_plane_config =
17528			i9xx_get_initial_plane_config;
17529		dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
17530		dev_priv->display.crtc_enable = i9xx_crtc_enable;
17531		dev_priv->display.crtc_disable = i9xx_crtc_disable;
17532	} else if (IS_PINEVIEW(dev_priv)) {
17533		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17534		dev_priv->display.get_initial_plane_config =
17535			i9xx_get_initial_plane_config;
17536		dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
17537		dev_priv->display.crtc_enable = i9xx_crtc_enable;
17538		dev_priv->display.crtc_disable = i9xx_crtc_disable;
17539	} else if (!IS_GEN(dev_priv, 2)) {
17540		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17541		dev_priv->display.get_initial_plane_config =
17542			i9xx_get_initial_plane_config;
17543		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
17544		dev_priv->display.crtc_enable = i9xx_crtc_enable;
17545		dev_priv->display.crtc_disable = i9xx_crtc_disable;
17546	} else {
17547		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17548		dev_priv->display.get_initial_plane_config =
17549			i9xx_get_initial_plane_config;
17550		dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
17551		dev_priv->display.crtc_enable = i9xx_crtc_enable;
17552		dev_priv->display.crtc_disable = i9xx_crtc_disable;
17553	}
17554
17555	if (IS_GEN(dev_priv, 5)) {
17556		dev_priv->display.fdi_link_train = ilk_fdi_link_train;
17557	} else if (IS_GEN(dev_priv, 6)) {
17558		dev_priv->display.fdi_link_train = gen6_fdi_link_train;
17559	} else if (IS_IVYBRIDGE(dev_priv)) {
17560		/* FIXME: detect B0+ stepping and use auto training */
17561		dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
17562	}
17563
17564	if (INTEL_GEN(dev_priv) >= 9)
17565		dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
17566	else
17567		dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
17568
17569}
17570
17571void intel_modeset_init_hw(struct drm_i915_private *i915)
17572{
17573	struct intel_cdclk_state *cdclk_state =
17574		to_intel_cdclk_state(i915->cdclk.obj.state);
17575	struct intel_dbuf_state *dbuf_state =
17576		to_intel_dbuf_state(i915->dbuf.obj.state);
17577
17578	intel_update_cdclk(i915);
17579	intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
17580	cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
17581
17582	dbuf_state->enabled_slices = i915->dbuf.enabled_slices;
17583}
17584
17585static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
17586{
17587	struct drm_plane *plane;
17588	struct intel_crtc *crtc;
17589
17590	for_each_intel_crtc(state->dev, crtc) {
17591		struct intel_crtc_state *crtc_state;
17592
17593		crtc_state = intel_atomic_get_crtc_state(state, crtc);
17594		if (IS_ERR(crtc_state))
17595			return PTR_ERR(crtc_state);
17596
17597		if (crtc_state->hw.active) {
17598			/*
17599			 * Preserve the inherited flag to avoid
17600			 * taking the full modeset path.
17601			 */
17602			crtc_state->inherited = true;
17603		}
17604	}
17605
17606	drm_for_each_plane(plane, state->dev) {
17607		struct drm_plane_state *plane_state;
17608
17609		plane_state = drm_atomic_get_plane_state(state, plane);
17610		if (IS_ERR(plane_state))
17611			return PTR_ERR(plane_state);
17612	}
17613
17614	return 0;
17615}
17616
17617/*
17618 * Calculate what we think the watermarks should be for the state we've read
17619 * out of the hardware and then immediately program those watermarks so that
17620 * we ensure the hardware settings match our internal state.
17621 *
17622 * We can calculate what we think WM's should be by creating a duplicate of the
17623 * current state (which was constructed during hardware readout) and running it
17624 * through the atomic check code to calculate new watermark values in the
17625 * state object.
17626 */
17627static void sanitize_watermarks(struct drm_i915_private *dev_priv)
17628{
17629	struct drm_atomic_state *state;
17630	struct intel_atomic_state *intel_state;
17631	struct intel_crtc *crtc;
17632	struct intel_crtc_state *crtc_state;
17633	struct drm_modeset_acquire_ctx ctx;
17634	int ret;
17635	int i;
17636
17637	/* Only supported on platforms that use atomic watermark design */
17638	if (!dev_priv->display.optimize_watermarks)
17639		return;
17640
17641	state = drm_atomic_state_alloc(&dev_priv->drm);
17642	if (drm_WARN_ON(&dev_priv->drm, !state))
17643		return;
17644
17645	intel_state = to_intel_atomic_state(state);
17646
17647	drm_modeset_acquire_init(&ctx, 0);
17648
17649retry:
17650	state->acquire_ctx = &ctx;
17651
17652	/*
17653	 * Hardware readout is the only time we don't want to calculate
17654	 * intermediate watermarks (since we don't trust the current
17655	 * watermarks).
17656	 */
17657	if (!HAS_GMCH(dev_priv))
17658		intel_state->skip_intermediate_wm = true;
17659
17660	ret = sanitize_watermarks_add_affected(state);
17661	if (ret)
17662		goto fail;
17663
17664	ret = intel_atomic_check(&dev_priv->drm, state);
17665	if (ret)
17666		goto fail;
17667
17668	/* Write calculated watermark values back */
17669	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
17670		crtc_state->wm.need_postvbl_update = true;
17671		dev_priv->display.optimize_watermarks(intel_state, crtc);
17672
17673		to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
17674	}
17675
17676fail:
17677	if (ret == -EDEADLK) {
17678		drm_atomic_state_clear(state);
17679		drm_modeset_backoff(&ctx);
17680		goto retry;
17681	}
17682
17683	/*
17684	 * If we fail here, it means that the hardware appears to be
17685	 * programmed in a way that shouldn't be possible, given our
17686	 * understanding of watermark requirements.  This might mean a
17687	 * mistake in the hardware readout code or a mistake in the
17688	 * watermark calculations for a given platform.  Raise a WARN
17689	 * so that this is noticeable.
17690	 *
17691	 * If this actually happens, we'll have to just leave the
17692	 * BIOS-programmed watermarks untouched and hope for the best.
17693	 */
17694	drm_WARN(&dev_priv->drm, ret,
17695		 "Could not determine valid watermarks for inherited state\n");
17696
17697	drm_atomic_state_put(state);
17698
17699	drm_modeset_drop_locks(&ctx);
17700	drm_modeset_acquire_fini(&ctx);
17701}
17702
17703static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
17704{
17705	if (IS_GEN(dev_priv, 5)) {
17706		u32 fdi_pll_clk =
17707			intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
17708
17709		dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
17710	} else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
17711		dev_priv->fdi_pll_freq = 270000;
17712	} else {
17713		return;
17714	}
17715
17716	drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
17717}
17718
17719static int intel_initial_commit(struct drm_device *dev)
17720{
17721	struct drm_atomic_state *state = NULL;
17722	struct drm_modeset_acquire_ctx ctx;
17723	struct intel_crtc *crtc;
17724	int ret = 0;
17725
17726	state = drm_atomic_state_alloc(dev);
17727	if (!state)
17728		return -ENOMEM;
17729
17730	drm_modeset_acquire_init(&ctx, 0);
17731
17732retry:
17733	state->acquire_ctx = &ctx;
17734
17735	for_each_intel_crtc(dev, crtc) {
17736		struct intel_crtc_state *crtc_state =
17737			intel_atomic_get_crtc_state(state, crtc);
17738
17739		if (IS_ERR(crtc_state)) {
17740			ret = PTR_ERR(crtc_state);
17741			goto out;
17742		}
17743
17744		if (crtc_state->hw.active) {
17745			/*
17746			 * We've not yet detected sink capabilities
17747			 * (audio,infoframes,etc.) and thus we don't want to
17748			 * force a full state recomputation yet. We want that to
17749			 * happen only for the first real commit from userspace.
17750			 * So preserve the inherited flag for the time being.
17751			 */
17752			crtc_state->inherited = true;
17753
17754			ret = drm_atomic_add_affected_planes(state, &crtc->base);
17755			if (ret)
17756				goto out;
17757
17758			/*
17759			 * FIXME hack to force a LUT update to avoid the
17760			 * plane update forcing the pipe gamma on without
17761			 * having a proper LUT loaded. Remove once we
17762			 * have readout for pipe gamma enable.
17763			 */
17764			crtc_state->uapi.color_mgmt_changed = true;
17765
17766			/*
17767			 * FIXME hack to force full modeset when DSC is being
17768			 * used.
17769			 *
17770			 * As long as we do not have full state readout and
17771			 * config comparison of crtc_state->dsc, we have no way
17772			 * to ensure reliable fastset. Remove once we have
17773			 * readout for DSC.
17774			 */
17775			if (crtc_state->dsc.compression_enable) {
17776				ret = drm_atomic_add_affected_connectors(state,
17777									 &crtc->base);
17778				if (ret)
17779					goto out;
17780				crtc_state->uapi.mode_changed = true;
17781				drm_dbg_kms(dev, "Force full modeset for DSC\n");
17782			}
17783		}
17784	}
17785
17786	ret = drm_atomic_commit(state);
17787
17788out:
17789	if (ret == -EDEADLK) {
17790		drm_atomic_state_clear(state);
17791		drm_modeset_backoff(&ctx);
17792		goto retry;
17793	}
17794
17795	drm_atomic_state_put(state);
17796
17797	drm_modeset_drop_locks(&ctx);
17798	drm_modeset_acquire_fini(&ctx);
17799
17800	return ret;
17801}
17802
17803static void intel_mode_config_init(struct drm_i915_private *i915)
17804{
17805	struct drm_mode_config *mode_config = &i915->drm.mode_config;
17806
17807	drm_mode_config_init(&i915->drm);
17808	INIT_LIST_HEAD(&i915->global_obj_list);
17809
17810	mode_config->min_width = 0;
17811	mode_config->min_height = 0;
17812
17813	mode_config->preferred_depth = 24;
17814	mode_config->prefer_shadow = 1;
17815
17816	mode_config->allow_fb_modifiers = true;
17817
17818	mode_config->funcs = &intel_mode_funcs;
17819
17820	/*
17821	 * Maximum framebuffer dimensions, chosen to match
17822	 * the maximum render engine surface size on gen4+.
17823	 */
17824	if (INTEL_GEN(i915) >= 7) {
17825		mode_config->max_width = 16384;
17826		mode_config->max_height = 16384;
17827	} else if (INTEL_GEN(i915) >= 4) {
17828		mode_config->max_width = 8192;
17829		mode_config->max_height = 8192;
17830	} else if (IS_GEN(i915, 3)) {
17831		mode_config->max_width = 4096;
17832		mode_config->max_height = 4096;
17833	} else {
17834		mode_config->max_width = 2048;
17835		mode_config->max_height = 2048;
17836	}
17837
17838	if (IS_I845G(i915) || IS_I865G(i915)) {
17839		mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
17840		mode_config->cursor_height = 1023;
17841	} else if (IS_I830(i915) || IS_I85X(i915) ||
17842		   IS_I915G(i915) || IS_I915GM(i915)) {
17843		mode_config->cursor_width = 64;
17844		mode_config->cursor_height = 64;
17845	} else {
17846		mode_config->cursor_width = 256;
17847		mode_config->cursor_height = 256;
17848	}
17849}
17850
17851static void intel_mode_config_cleanup(struct drm_i915_private *i915)
17852{
17853	intel_atomic_global_obj_cleanup(i915);
17854	drm_mode_config_cleanup(&i915->drm);
17855}
17856
17857static void plane_config_fini(struct intel_initial_plane_config *plane_config)
17858{
17859	if (plane_config->fb) {
17860		struct drm_framebuffer *fb = &plane_config->fb->base;
17861
17862		/* We may only have the stub and not a full framebuffer */
17863		if (drm_framebuffer_read_refcount(fb))
17864			drm_framebuffer_put(fb);
17865		else
17866			kfree(fb);
17867	}
17868
17869	if (plane_config->vma)
17870		i915_vma_put(plane_config->vma);
17871}
17872
17873/* part #1: call before irq install */
17874int intel_modeset_init_noirq(struct drm_i915_private *i915)
17875{
17876	int ret;
17877
17878	if (i915_inject_probe_failure(i915))
17879		return -ENODEV;
17880
17881	if (HAS_DISPLAY(i915)) {
17882		ret = drm_vblank_init(&i915->drm,
17883				      INTEL_NUM_PIPES(i915));
17884		if (ret)
17885			return ret;
17886	}
17887
17888	intel_bios_init(i915);
17889
17890	ret = intel_vga_register(i915);
17891	if (ret)
17892		goto cleanup_bios;
17893
17894	/* FIXME: completely on the wrong abstraction layer */
17895	intel_power_domains_init_hw(i915, false);
17896
17897	intel_csr_ucode_init(i915);
17898
17899	i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
17900	i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
17901					WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
17902
17903	intel_mode_config_init(i915);
17904
17905	ret = intel_cdclk_init(i915);
17906	if (ret)
17907		goto cleanup_vga_client_pw_domain_csr;
17908
17909	ret = intel_dbuf_init(i915);
17910	if (ret)
17911		goto cleanup_vga_client_pw_domain_csr;
17912
17913	ret = intel_bw_init(i915);
17914	if (ret)
17915		goto cleanup_vga_client_pw_domain_csr;
17916
17917	init_llist_head(&i915->atomic_helper.free_list);
17918	INIT_WORK(&i915->atomic_helper.free_work,
17919		  intel_atomic_helper_free_state_worker);
17920
17921	intel_init_quirks(i915);
17922
17923	intel_fbc_init(i915);
17924
17925	return 0;
17926
17927cleanup_vga_client_pw_domain_csr:
17928	intel_csr_ucode_fini(i915);
17929	intel_power_domains_driver_remove(i915);
17930	intel_vga_unregister(i915);
17931cleanup_bios:
17932	intel_bios_driver_remove(i915);
17933
17934	return ret;
17935}
17936
17937/* part #2: call after irq install, but before gem init */
17938int intel_modeset_init_nogem(struct drm_i915_private *i915)
17939{
17940	struct drm_device *dev = &i915->drm;
17941	enum pipe pipe;
17942	struct intel_crtc *crtc;
17943	int ret;
17944
17945	intel_init_pm(i915);
17946
17947	intel_panel_sanitize_ssc(i915);
17948
17949	intel_gmbus_setup(i915);
17950
17951	drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
17952		    INTEL_NUM_PIPES(i915),
17953		    INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
17954
17955	if (HAS_DISPLAY(i915)) {
17956		for_each_pipe(i915, pipe) {
17957			ret = intel_crtc_init(i915, pipe);
17958			if (ret) {
17959				intel_mode_config_cleanup(i915);
17960				return ret;
17961			}
17962		}
17963	}
17964
17965	intel_plane_possible_crtcs_init(i915);
17966	intel_shared_dpll_init(dev);
17967	intel_update_fdi_pll_freq(i915);
17968
17969	intel_update_czclk(i915);
17970	intel_modeset_init_hw(i915);
17971
17972	intel_hdcp_component_init(i915);
17973
17974	if (i915->max_cdclk_freq == 0)
17975		intel_update_max_cdclk(i915);
17976
17977	/*
17978	 * If the platform has HTI, we need to find out whether it has reserved
17979	 * any display resources before we create our display outputs.
17980	 */
17981	if (INTEL_INFO(i915)->display.has_hti)
17982		i915->hti_state = intel_de_read(i915, HDPORT_STATE);
17983
17984	/* Just disable it once at startup */
17985	intel_vga_disable(i915);
17986	intel_setup_outputs(i915);
17987
17988	drm_modeset_lock_all(dev);
17989	intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
17990	drm_modeset_unlock_all(dev);
17991
17992	for_each_intel_crtc(dev, crtc) {
17993		struct intel_initial_plane_config plane_config = {};
17994
17995		if (!crtc->active)
17996			continue;
17997
17998		/*
17999		 * Note that reserving the BIOS fb up front prevents us
18000		 * from stuffing other stolen allocations like the ring
18001		 * on top.  This prevents some ugliness at boot time, and
18002		 * can even allow for smooth boot transitions if the BIOS
18003		 * fb is large enough for the active pipe configuration.
18004		 */
18005		i915->display.get_initial_plane_config(crtc, &plane_config);
18006
18007		/*
18008		 * If the fb is shared between multiple heads, we'll
18009		 * just get the first one.
18010		 */
18011		intel_find_initial_plane_obj(crtc, &plane_config);
18012
18013		plane_config_fini(&plane_config);
18014	}
18015
18016	/*
18017	 * Make sure hardware watermarks really match the state we read out.
18018	 * Note that we need to do this after reconstructing the BIOS fb's
18019	 * since the watermark calculation done here will use pstate->fb.
18020	 */
18021	if (!HAS_GMCH(i915))
18022		sanitize_watermarks(i915);
18023
18024	return 0;
18025}
18026
18027/* part #3: call after gem init */
18028int intel_modeset_init(struct drm_i915_private *i915)
18029{
18030	int ret;
18031
18032	if (!HAS_DISPLAY(i915))
18033		return 0;
18034
18035	/*
18036	 * Force all active planes to recompute their states. So that on
18037	 * mode_setcrtc after probe, all the intel_plane_state variables
18038	 * are already calculated and there is no assert_plane warnings
18039	 * during bootup.
18040	 */
18041	ret = intel_initial_commit(&i915->drm);
18042	if (ret)
18043		drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
18044
18045	intel_overlay_setup(i915);
18046
18047	ret = intel_fbdev_init(&i915->drm);
18048	if (ret)
18049		return ret;
18050
18051	/* Only enable hotplug handling once the fbdev is fully set up. */
18052	intel_hpd_init(i915);
18053
18054	intel_init_ipc(i915);
18055
18056	intel_psr_set_force_mode_changed(i915->psr.dp);
18057
18058	return 0;
18059}
18060
18061void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
18062{
18063	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18064	/* 640x480@60Hz, ~25175 kHz */
18065	struct dpll clock = {
18066		.m1 = 18,
18067		.m2 = 7,
18068		.p1 = 13,
18069		.p2 = 4,
18070		.n = 2,
18071	};
18072	u32 dpll, fp;
18073	int i;
18074
18075	drm_WARN_ON(&dev_priv->drm,
18076		    i9xx_calc_dpll_params(48000, &clock) != 25154);
18077
18078	drm_dbg_kms(&dev_priv->drm,
18079		    "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
18080		    pipe_name(pipe), clock.vco, clock.dot);
18081
18082	fp = i9xx_dpll_compute_fp(&clock);
18083	dpll = DPLL_DVO_2X_MODE |
18084		DPLL_VGA_MODE_DIS |
18085		((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
18086		PLL_P2_DIVIDE_BY_4 |
18087		PLL_REF_INPUT_DREFCLK |
18088		DPLL_VCO_ENABLE;
18089
18090	intel_de_write(dev_priv, FP0(pipe), fp);
18091	intel_de_write(dev_priv, FP1(pipe), fp);
18092
18093	intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
18094	intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
18095	intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
18096	intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
18097	intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
18098	intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
18099	intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
18100
18101	/*
18102	 * Apparently we need to have VGA mode enabled prior to changing
18103	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
18104	 * dividers, even though the register value does change.
18105	 */
18106	intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
18107	intel_de_write(dev_priv, DPLL(pipe), dpll);
18108
18109	/* Wait for the clocks to stabilize. */
18110	intel_de_posting_read(dev_priv, DPLL(pipe));
18111	udelay(150);
18112
18113	/* The pixel multiplier can only be updated once the
18114	 * DPLL is enabled and the clocks are stable.
18115	 *
18116	 * So write it again.
18117	 */
18118	intel_de_write(dev_priv, DPLL(pipe), dpll);
18119
18120	/* We do this three times for luck */
18121	for (i = 0; i < 3 ; i++) {
18122		intel_de_write(dev_priv, DPLL(pipe), dpll);
18123		intel_de_posting_read(dev_priv, DPLL(pipe));
18124		udelay(150); /* wait for warmup */
18125	}
18126
18127	intel_de_write(dev_priv, PIPECONF(pipe),
18128		       PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
18129	intel_de_posting_read(dev_priv, PIPECONF(pipe));
18130
18131	intel_wait_for_pipe_scanline_moving(crtc);
18132}
18133
18134void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
18135{
18136	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18137
18138	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
18139		    pipe_name(pipe));
18140
18141	drm_WARN_ON(&dev_priv->drm,
18142		    intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
18143		    DISPLAY_PLANE_ENABLE);
18144	drm_WARN_ON(&dev_priv->drm,
18145		    intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
18146		    DISPLAY_PLANE_ENABLE);
18147	drm_WARN_ON(&dev_priv->drm,
18148		    intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
18149		    DISPLAY_PLANE_ENABLE);
18150	drm_WARN_ON(&dev_priv->drm,
18151		    intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
18152	drm_WARN_ON(&dev_priv->drm,
18153		    intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
18154
18155	intel_de_write(dev_priv, PIPECONF(pipe), 0);
18156	intel_de_posting_read(dev_priv, PIPECONF(pipe));
18157
18158	intel_wait_for_pipe_scanline_stopped(crtc);
18159
18160	intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
18161	intel_de_posting_read(dev_priv, DPLL(pipe));
18162}
18163
18164static void
18165intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
18166{
18167	struct intel_crtc *crtc;
18168
18169	if (INTEL_GEN(dev_priv) >= 4)
18170		return;
18171
18172	for_each_intel_crtc(&dev_priv->drm, crtc) {
18173		struct intel_plane *plane =
18174			to_intel_plane(crtc->base.primary);
18175		struct intel_crtc *plane_crtc;
18176		enum pipe pipe;
18177
18178		if (!plane->get_hw_state(plane, &pipe))
18179			continue;
18180
18181		if (pipe == crtc->pipe)
18182			continue;
18183
18184		drm_dbg_kms(&dev_priv->drm,
18185			    "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
18186			    plane->base.base.id, plane->base.name);
18187
18188		plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18189		intel_plane_disable_noatomic(plane_crtc, plane);
18190	}
18191}
18192
18193static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
18194{
18195	struct drm_device *dev = crtc->base.dev;
18196	struct intel_encoder *encoder;
18197
18198	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
18199		return true;
18200
18201	return false;
18202}
18203
18204static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
18205{
18206	struct drm_device *dev = encoder->base.dev;
18207	struct intel_connector *connector;
18208
18209	for_each_connector_on_encoder(dev, &encoder->base, connector)
18210		return connector;
18211
18212	return NULL;
18213}
18214
18215static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
18216			      enum pipe pch_transcoder)
18217{
18218	return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
18219		(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
18220}
18221
18222static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
18223{
18224	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
18225	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
18226	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
18227
18228	if (INTEL_GEN(dev_priv) >= 9 ||
18229	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
18230		i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
18231		u32 val;
18232
18233		if (transcoder_is_dsi(cpu_transcoder))
18234			return;
18235
18236		val = intel_de_read(dev_priv, reg);
18237		val &= ~HSW_FRAME_START_DELAY_MASK;
18238		val |= HSW_FRAME_START_DELAY(0);
18239		intel_de_write(dev_priv, reg, val);
18240	} else {
18241		i915_reg_t reg = PIPECONF(cpu_transcoder);
18242		u32 val;
18243
18244		val = intel_de_read(dev_priv, reg);
18245		val &= ~PIPECONF_FRAME_START_DELAY_MASK;
18246		val |= PIPECONF_FRAME_START_DELAY(0);
18247		intel_de_write(dev_priv, reg, val);
18248	}
18249
18250	if (!crtc_state->has_pch_encoder)
18251		return;
18252
18253	if (HAS_PCH_IBX(dev_priv)) {
18254		i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
18255		u32 val;
18256
18257		val = intel_de_read(dev_priv, reg);
18258		val &= ~TRANS_FRAME_START_DELAY_MASK;
18259		val |= TRANS_FRAME_START_DELAY(0);
18260		intel_de_write(dev_priv, reg, val);
18261	} else {
18262		enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
18263		i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
18264		u32 val;
18265
18266		val = intel_de_read(dev_priv, reg);
18267		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
18268		val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
18269		intel_de_write(dev_priv, reg, val);
18270	}
18271}
18272
18273static void intel_sanitize_crtc(struct intel_crtc *crtc,
18274				struct drm_modeset_acquire_ctx *ctx)
18275{
18276	struct drm_device *dev = crtc->base.dev;
18277	struct drm_i915_private *dev_priv = to_i915(dev);
18278	struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
18279
18280	if (crtc_state->hw.active) {
18281		struct intel_plane *plane;
18282
18283		/* Clear any frame start delays used for debugging left by the BIOS */
18284		intel_sanitize_frame_start_delay(crtc_state);
18285
18286		/* Disable everything but the primary plane */
18287		for_each_intel_plane_on_crtc(dev, crtc, plane) {
18288			const struct intel_plane_state *plane_state =
18289				to_intel_plane_state(plane->base.state);
18290
18291			if (plane_state->uapi.visible &&
18292			    plane->base.type != DRM_PLANE_TYPE_PRIMARY)
18293				intel_plane_disable_noatomic(crtc, plane);
18294		}
18295
18296		/*
18297		 * Disable any background color set by the BIOS, but enable the
18298		 * gamma and CSC to match how we program our planes.
18299		 */
18300		if (INTEL_GEN(dev_priv) >= 9)
18301			intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
18302				       SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
18303	}
18304
18305	/* Adjust the state of the output pipe according to whether we
18306	 * have active connectors/encoders. */
18307	if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc))
18308		intel_crtc_disable_noatomic(crtc, ctx);
18309
18310	if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
18311		/*
18312		 * We start out with underrun reporting disabled to avoid races.
18313		 * For correct bookkeeping mark this on active crtcs.
18314		 *
18315		 * Also on gmch platforms we dont have any hardware bits to
18316		 * disable the underrun reporting. Which means we need to start
18317		 * out with underrun reporting disabled also on inactive pipes,
18318		 * since otherwise we'll complain about the garbage we read when
18319		 * e.g. coming up after runtime pm.
18320		 *
18321		 * No protection against concurrent access is required - at
18322		 * worst a fifo underrun happens which also sets this to false.
18323		 */
18324		crtc->cpu_fifo_underrun_disabled = true;
18325		/*
18326		 * We track the PCH trancoder underrun reporting state
18327		 * within the crtc. With crtc for pipe A housing the underrun
18328		 * reporting state for PCH transcoder A, crtc for pipe B housing
18329		 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
18330		 * and marking underrun reporting as disabled for the non-existing
18331		 * PCH transcoders B and C would prevent enabling the south
18332		 * error interrupt (see cpt_can_enable_serr_int()).
18333		 */
18334		if (has_pch_trancoder(dev_priv, crtc->pipe))
18335			crtc->pch_fifo_underrun_disabled = true;
18336	}
18337}
18338
18339static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
18340{
18341	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
18342
18343	/*
18344	 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
18345	 * the hardware when a high res displays plugged in. DPLL P
18346	 * divider is zero, and the pipe timings are bonkers. We'll
18347	 * try to disable everything in that case.
18348	 *
18349	 * FIXME would be nice to be able to sanitize this state
18350	 * without several WARNs, but for now let's take the easy
18351	 * road.
18352	 */
18353	return IS_GEN(dev_priv, 6) &&
18354		crtc_state->hw.active &&
18355		crtc_state->shared_dpll &&
18356		crtc_state->port_clock == 0;
18357}
18358
18359static void intel_sanitize_encoder(struct intel_encoder *encoder)
18360{
18361	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
18362	struct intel_connector *connector;
18363	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18364	struct intel_crtc_state *crtc_state = crtc ?
18365		to_intel_crtc_state(crtc->base.state) : NULL;
18366
18367	/* We need to check both for a crtc link (meaning that the
18368	 * encoder is active and trying to read from a pipe) and the
18369	 * pipe itself being active. */
18370	bool has_active_crtc = crtc_state &&
18371		crtc_state->hw.active;
18372
18373	if (crtc_state && has_bogus_dpll_config(crtc_state)) {
18374		drm_dbg_kms(&dev_priv->drm,
18375			    "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
18376			    pipe_name(crtc->pipe));
18377		has_active_crtc = false;
18378	}
18379
18380	connector = intel_encoder_find_connector(encoder);
18381	if (connector && !has_active_crtc) {
18382		drm_dbg_kms(&dev_priv->drm,
18383			    "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
18384			    encoder->base.base.id,
18385			    encoder->base.name);
18386
18387		/* Connector is active, but has no active pipe. This is
18388		 * fallout from our resume register restoring. Disable
18389		 * the encoder manually again. */
18390		if (crtc_state) {
18391			struct drm_encoder *best_encoder;
18392
18393			drm_dbg_kms(&dev_priv->drm,
18394				    "[ENCODER:%d:%s] manually disabled\n",
18395				    encoder->base.base.id,
18396				    encoder->base.name);
18397
18398			/* avoid oopsing in case the hooks consult best_encoder */
18399			best_encoder = connector->base.state->best_encoder;
18400			connector->base.state->best_encoder = &encoder->base;
18401
18402			/* FIXME NULL atomic state passed! */
18403			if (encoder->disable)
18404				encoder->disable(NULL, encoder, crtc_state,
18405						 connector->base.state);
18406			if (encoder->post_disable)
18407				encoder->post_disable(NULL, encoder, crtc_state,
18408						      connector->base.state);
18409
18410			connector->base.state->best_encoder = best_encoder;
18411		}
18412		encoder->base.crtc = NULL;
18413
18414		/* Inconsistent output/port/pipe state happens presumably due to
18415		 * a bug in one of the get_hw_state functions. Or someplace else
18416		 * in our code, like the register restore mess on resume. Clamp
18417		 * things to off as a safer default. */
18418
18419		connector->base.dpms = DRM_MODE_DPMS_OFF;
18420		connector->base.encoder = NULL;
18421	}
18422
18423	/* notify opregion of the sanitized encoder state */
18424	intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
18425
18426	if (INTEL_GEN(dev_priv) >= 11)
18427		icl_sanitize_encoder_pll_mapping(encoder);
18428}
18429
18430/* FIXME read out full plane state for all planes */
18431static void readout_plane_state(struct drm_i915_private *dev_priv)
18432{
18433	struct intel_plane *plane;
18434	struct intel_crtc *crtc;
18435
18436	for_each_intel_plane(&dev_priv->drm, plane) {
18437		struct intel_plane_state *plane_state =
18438			to_intel_plane_state(plane->base.state);
18439		struct intel_crtc_state *crtc_state;
18440		enum pipe pipe = PIPE_A;
18441		bool visible;
18442
18443		visible = plane->get_hw_state(plane, &pipe);
18444
18445		crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18446		crtc_state = to_intel_crtc_state(crtc->base.state);
18447
18448		intel_set_plane_visible(crtc_state, plane_state, visible);
18449
18450		drm_dbg_kms(&dev_priv->drm,
18451			    "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
18452			    plane->base.base.id, plane->base.name,
18453			    enableddisabled(visible), pipe_name(pipe));
18454	}
18455
18456	for_each_intel_crtc(&dev_priv->drm, crtc) {
18457		struct intel_crtc_state *crtc_state =
18458			to_intel_crtc_state(crtc->base.state);
18459
18460		fixup_active_planes(crtc_state);
18461	}
18462}
18463
18464static void intel_modeset_readout_hw_state(struct drm_device *dev)
18465{
18466	struct drm_i915_private *dev_priv = to_i915(dev);
18467	struct intel_cdclk_state *cdclk_state =
18468		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
18469	struct intel_dbuf_state *dbuf_state =
18470		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
18471	enum pipe pipe;
18472	struct intel_crtc *crtc;
18473	struct intel_encoder *encoder;
18474	struct intel_connector *connector;
18475	struct drm_connector_list_iter conn_iter;
18476	u8 active_pipes = 0;
18477
18478	for_each_intel_crtc(dev, crtc) {
18479		struct intel_crtc_state *crtc_state =
18480			to_intel_crtc_state(crtc->base.state);
18481
18482		__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
18483		intel_crtc_free_hw_state(crtc_state);
18484		intel_crtc_state_reset(crtc_state, crtc);
18485
18486		crtc_state->hw.active = crtc_state->hw.enable =
18487			dev_priv->display.get_pipe_config(crtc, crtc_state);
18488
18489		crtc->base.enabled = crtc_state->hw.enable;
18490		crtc->active = crtc_state->hw.active;
18491
18492		if (crtc_state->hw.active)
18493			active_pipes |= BIT(crtc->pipe);
18494
18495		drm_dbg_kms(&dev_priv->drm,
18496			    "[CRTC:%d:%s] hw state readout: %s\n",
18497			    crtc->base.base.id, crtc->base.name,
18498			    enableddisabled(crtc_state->hw.active));
18499	}
18500
18501	dev_priv->active_pipes = cdclk_state->active_pipes =
18502		dbuf_state->active_pipes = active_pipes;
18503
18504	readout_plane_state(dev_priv);
18505
18506	intel_dpll_readout_hw_state(dev_priv);
18507
18508	for_each_intel_encoder(dev, encoder) {
18509		pipe = 0;
18510
18511		if (encoder->get_hw_state(encoder, &pipe)) {
18512			struct intel_crtc_state *crtc_state;
18513
18514			crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18515			crtc_state = to_intel_crtc_state(crtc->base.state);
18516
18517			encoder->base.crtc = &crtc->base;
18518			encoder->get_config(encoder, crtc_state);
18519		} else {
18520			encoder->base.crtc = NULL;
18521		}
18522
18523		drm_dbg_kms(&dev_priv->drm,
18524			    "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
18525			    encoder->base.base.id, encoder->base.name,
18526			    enableddisabled(encoder->base.crtc),
18527			    pipe_name(pipe));
18528	}
18529
18530	drm_connector_list_iter_begin(dev, &conn_iter);
18531	for_each_intel_connector_iter(connector, &conn_iter) {
18532		if (connector->get_hw_state(connector)) {
18533			struct intel_crtc_state *crtc_state;
18534			struct intel_crtc *crtc;
18535
18536			connector->base.dpms = DRM_MODE_DPMS_ON;
18537
18538			encoder = intel_attached_encoder(connector);
18539			connector->base.encoder = &encoder->base;
18540
18541			crtc = to_intel_crtc(encoder->base.crtc);
18542			crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
18543
18544			if (crtc_state && crtc_state->hw.active) {
18545				/*
18546				 * This has to be done during hardware readout
18547				 * because anything calling .crtc_disable may
18548				 * rely on the connector_mask being accurate.
18549				 */
18550				crtc_state->uapi.connector_mask |=
18551					drm_connector_mask(&connector->base);
18552				crtc_state->uapi.encoder_mask |=
18553					drm_encoder_mask(&encoder->base);
18554			}
18555		} else {
18556			connector->base.dpms = DRM_MODE_DPMS_OFF;
18557			connector->base.encoder = NULL;
18558		}
18559		drm_dbg_kms(&dev_priv->drm,
18560			    "[CONNECTOR:%d:%s] hw state readout: %s\n",
18561			    connector->base.base.id, connector->base.name,
18562			    enableddisabled(connector->base.encoder));
18563	}
18564	drm_connector_list_iter_end(&conn_iter);
18565
18566	for_each_intel_crtc(dev, crtc) {
18567		struct intel_bw_state *bw_state =
18568			to_intel_bw_state(dev_priv->bw_obj.state);
18569		struct intel_crtc_state *crtc_state =
18570			to_intel_crtc_state(crtc->base.state);
18571		struct intel_plane *plane;
18572		int min_cdclk = 0;
18573
18574		if (crtc_state->hw.active) {
18575			struct drm_display_mode *mode = &crtc_state->hw.mode;
18576
18577			intel_mode_from_pipe_config(&crtc_state->hw.adjusted_mode,
18578						    crtc_state);
18579
18580			*mode = crtc_state->hw.adjusted_mode;
18581			mode->hdisplay = crtc_state->pipe_src_w;
18582			mode->vdisplay = crtc_state->pipe_src_h;
18583
18584			/*
18585			 * The initial mode needs to be set in order to keep
18586			 * the atomic core happy. It wants a valid mode if the
18587			 * crtc's enabled, so we do the above call.
18588			 *
18589			 * But we don't set all the derived state fully, hence
18590			 * set a flag to indicate that a full recalculation is
18591			 * needed on the next commit.
18592			 */
18593			crtc_state->inherited = true;
18594
18595			intel_crtc_compute_pixel_rate(crtc_state);
18596
18597			intel_crtc_update_active_timings(crtc_state);
18598
18599			intel_crtc_copy_hw_to_uapi_state(crtc_state);
18600		}
18601
18602		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
18603			const struct intel_plane_state *plane_state =
18604				to_intel_plane_state(plane->base.state);
18605
18606			/*
18607			 * FIXME don't have the fb yet, so can't
18608			 * use intel_plane_data_rate() :(
18609			 */
18610			if (plane_state->uapi.visible)
18611				crtc_state->data_rate[plane->id] =
18612					4 * crtc_state->pixel_rate;
18613			/*
18614			 * FIXME don't have the fb yet, so can't
18615			 * use plane->min_cdclk() :(
18616			 */
18617			if (plane_state->uapi.visible && plane->min_cdclk) {
18618				if (crtc_state->double_wide ||
18619				    INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
18620					crtc_state->min_cdclk[plane->id] =
18621						DIV_ROUND_UP(crtc_state->pixel_rate, 2);
18622				else
18623					crtc_state->min_cdclk[plane->id] =
18624						crtc_state->pixel_rate;
18625			}
18626			drm_dbg_kms(&dev_priv->drm,
18627				    "[PLANE:%d:%s] min_cdclk %d kHz\n",
18628				    plane->base.base.id, plane->base.name,
18629				    crtc_state->min_cdclk[plane->id]);
18630		}
18631
18632		if (crtc_state->hw.active) {
18633			min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
18634			if (drm_WARN_ON(dev, min_cdclk < 0))
18635				min_cdclk = 0;
18636		}
18637
18638		cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
18639		cdclk_state->min_voltage_level[crtc->pipe] =
18640			crtc_state->min_voltage_level;
18641
18642		intel_bw_crtc_update(bw_state, crtc_state);
18643
18644		intel_pipe_config_sanity_check(dev_priv, crtc_state);
18645	}
18646}
18647
18648static void
18649get_encoder_power_domains(struct drm_i915_private *dev_priv)
18650{
18651	struct intel_encoder *encoder;
18652
18653	for_each_intel_encoder(&dev_priv->drm, encoder) {
18654		struct intel_crtc_state *crtc_state;
18655
18656		if (!encoder->get_power_domains)
18657			continue;
18658
18659		/*
18660		 * MST-primary and inactive encoders don't have a crtc state
18661		 * and neither of these require any power domain references.
18662		 */
18663		if (!encoder->base.crtc)
18664			continue;
18665
18666		crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
18667		encoder->get_power_domains(encoder, crtc_state);
18668	}
18669}
18670
18671static void intel_early_display_was(struct drm_i915_private *dev_priv)
18672{
18673	/*
18674	 * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
18675	 * Also known as Wa_14010480278.
18676	 */
18677	if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv))
18678		intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
18679			       intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
18680
18681	if (IS_HASWELL(dev_priv)) {
18682		/*
18683		 * WaRsPkgCStateDisplayPMReq:hsw
18684		 * System hang if this isn't done before disabling all planes!
18685		 */
18686		intel_de_write(dev_priv, CHICKEN_PAR1_1,
18687			       intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
18688	}
18689}
18690
18691static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
18692				       enum port port, i915_reg_t hdmi_reg)
18693{
18694	u32 val = intel_de_read(dev_priv, hdmi_reg);
18695
18696	if (val & SDVO_ENABLE ||
18697	    (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
18698		return;
18699
18700	drm_dbg_kms(&dev_priv->drm,
18701		    "Sanitizing transcoder select for HDMI %c\n",
18702		    port_name(port));
18703
18704	val &= ~SDVO_PIPE_SEL_MASK;
18705	val |= SDVO_PIPE_SEL(PIPE_A);
18706
18707	intel_de_write(dev_priv, hdmi_reg, val);
18708}
18709
18710static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
18711				     enum port port, i915_reg_t dp_reg)
18712{
18713	u32 val = intel_de_read(dev_priv, dp_reg);
18714
18715	if (val & DP_PORT_EN ||
18716	    (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
18717		return;
18718
18719	drm_dbg_kms(&dev_priv->drm,
18720		    "Sanitizing transcoder select for DP %c\n",
18721		    port_name(port));
18722
18723	val &= ~DP_PIPE_SEL_MASK;
18724	val |= DP_PIPE_SEL(PIPE_A);
18725
18726	intel_de_write(dev_priv, dp_reg, val);
18727}
18728
18729static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
18730{
18731	/*
18732	 * The BIOS may select transcoder B on some of the PCH
18733	 * ports even it doesn't enable the port. This would trip
18734	 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
18735	 * Sanitize the transcoder select bits to prevent that. We
18736	 * assume that the BIOS never actually enabled the port,
18737	 * because if it did we'd actually have to toggle the port
18738	 * on and back off to make the transcoder A select stick
18739	 * (see. intel_dp_link_down(), intel_disable_hdmi(),
18740	 * intel_disable_sdvo()).
18741	 */
18742	ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
18743	ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
18744	ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
18745
18746	/* PCH SDVOB multiplex with HDMIB */
18747	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
18748	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
18749	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
18750}
18751
18752/* Scan out the current hw modeset state,
18753 * and sanitizes it to the current state
18754 */
18755static void
18756intel_modeset_setup_hw_state(struct drm_device *dev,
18757			     struct drm_modeset_acquire_ctx *ctx)
18758{
18759	struct drm_i915_private *dev_priv = to_i915(dev);
18760	struct intel_encoder *encoder;
18761	struct intel_crtc *crtc;
18762	intel_wakeref_t wakeref;
18763
18764	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
18765
18766	intel_early_display_was(dev_priv);
18767	intel_modeset_readout_hw_state(dev);
18768
18769	/* HW state is read out, now we need to sanitize this mess. */
18770
18771	/* Sanitize the TypeC port mode upfront, encoders depend on this */
18772	for_each_intel_encoder(dev, encoder) {
18773		enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
18774
18775		/* We need to sanitize only the MST primary port. */
18776		if (encoder->type != INTEL_OUTPUT_DP_MST &&
18777		    intel_phy_is_tc(dev_priv, phy))
18778			intel_tc_port_sanitize(enc_to_dig_port(encoder));
18779	}
18780
18781	get_encoder_power_domains(dev_priv);
18782
18783	if (HAS_PCH_IBX(dev_priv))
18784		ibx_sanitize_pch_ports(dev_priv);
18785
18786	/*
18787	 * intel_sanitize_plane_mapping() may need to do vblank
18788	 * waits, so we need vblank interrupts restored beforehand.
18789	 */
18790	for_each_intel_crtc(&dev_priv->drm, crtc) {
18791		struct intel_crtc_state *crtc_state =
18792			to_intel_crtc_state(crtc->base.state);
18793
18794		drm_crtc_vblank_reset(&crtc->base);
18795
18796		if (crtc_state->hw.active)
18797			intel_crtc_vblank_on(crtc_state);
18798	}
18799
18800	intel_sanitize_plane_mapping(dev_priv);
18801
18802	for_each_intel_encoder(dev, encoder)
18803		intel_sanitize_encoder(encoder);
18804
18805	for_each_intel_crtc(&dev_priv->drm, crtc) {
18806		struct intel_crtc_state *crtc_state =
18807			to_intel_crtc_state(crtc->base.state);
18808
18809		intel_sanitize_crtc(crtc, ctx);
18810		intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
18811	}
18812
18813	intel_modeset_update_connector_atomic_state(dev);
18814
18815	intel_dpll_sanitize_state(dev_priv);
18816
18817	if (IS_G4X(dev_priv)) {
18818		g4x_wm_get_hw_state(dev_priv);
18819		g4x_wm_sanitize(dev_priv);
18820	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
18821		vlv_wm_get_hw_state(dev_priv);
18822		vlv_wm_sanitize(dev_priv);
18823	} else if (INTEL_GEN(dev_priv) >= 9) {
18824		skl_wm_get_hw_state(dev_priv);
18825	} else if (HAS_PCH_SPLIT(dev_priv)) {
18826		ilk_wm_get_hw_state(dev_priv);
18827	}
18828
18829	for_each_intel_crtc(dev, crtc) {
18830		struct intel_crtc_state *crtc_state =
18831			to_intel_crtc_state(crtc->base.state);
18832		u64 put_domains;
18833
18834		put_domains = modeset_get_crtc_power_domains(crtc_state);
18835		if (drm_WARN_ON(dev, put_domains))
18836			modeset_put_power_domains(dev_priv, put_domains);
18837	}
18838
18839	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
18840}
18841
18842void intel_display_resume(struct drm_device *dev)
18843{
18844	struct drm_i915_private *dev_priv = to_i915(dev);
18845	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
18846	struct drm_modeset_acquire_ctx ctx;
18847	int ret;
18848
18849	dev_priv->modeset_restore_state = NULL;
18850	if (state)
18851		state->acquire_ctx = &ctx;
18852
18853	drm_modeset_acquire_init(&ctx, 0);
18854
18855	while (1) {
18856		ret = drm_modeset_lock_all_ctx(dev, &ctx);
18857		if (ret != -EDEADLK)
18858			break;
18859
18860		drm_modeset_backoff(&ctx);
18861	}
18862
18863	if (!ret)
18864		ret = __intel_display_resume(dev, state, &ctx);
18865
18866	intel_enable_ipc(dev_priv);
18867	drm_modeset_drop_locks(&ctx);
18868	drm_modeset_acquire_fini(&ctx);
18869
18870	if (ret)
18871		drm_err(&dev_priv->drm,
18872			"Restoring old state failed with %i\n", ret);
18873	if (state)
18874		drm_atomic_state_put(state);
18875}
18876
18877static void intel_hpd_poll_fini(struct drm_i915_private *i915)
18878{
18879	struct intel_connector *connector;
18880	struct drm_connector_list_iter conn_iter;
18881
18882	/* Kill all the work that may have been queued by hpd. */
18883	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
18884	for_each_intel_connector_iter(connector, &conn_iter) {
18885		if (connector->modeset_retry_work.func)
18886			cancel_work_sync(&connector->modeset_retry_work);
18887		if (connector->hdcp.shim) {
18888			cancel_delayed_work_sync(&connector->hdcp.check_work);
18889			cancel_work_sync(&connector->hdcp.prop_work);
18890		}
18891	}
18892	drm_connector_list_iter_end(&conn_iter);
18893}
18894
18895/* part #1: call before irq uninstall */
18896void intel_modeset_driver_remove(struct drm_i915_private *i915)
18897{
18898	flush_workqueue(i915->flip_wq);
18899	flush_workqueue(i915->modeset_wq);
18900
18901	flush_work(&i915->atomic_helper.free_work);
18902	drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
18903}
18904
18905/* part #2: call after irq uninstall */
18906void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
18907{
18908	/*
18909	 * Due to the hpd irq storm handling the hotplug work can re-arm the
18910	 * poll handlers. Hence disable polling after hpd handling is shut down.
18911	 */
18912	intel_hpd_poll_fini(i915);
18913
18914	/*
18915	 * MST topology needs to be suspended so we don't have any calls to
18916	 * fbdev after it's finalized. MST will be destroyed later as part of
18917	 * drm_mode_config_cleanup()
18918	 */
18919	intel_dp_mst_suspend(i915);
18920
18921	/* poll work can call into fbdev, hence clean that up afterwards */
18922	intel_fbdev_fini(i915);
18923
18924	intel_unregister_dsm_handler();
18925
18926	intel_fbc_global_disable(i915);
18927
18928	/* flush any delayed tasks or pending work */
18929	flush_scheduled_work();
18930
18931	intel_hdcp_component_fini(i915);
18932
18933	intel_mode_config_cleanup(i915);
18934
18935	intel_overlay_cleanup(i915);
18936
18937	intel_gmbus_teardown(i915);
18938
18939	destroy_workqueue(i915->flip_wq);
18940	destroy_workqueue(i915->modeset_wq);
18941
18942	intel_fbc_cleanup_cfb(i915);
18943}
18944
18945/* part #3: call after gem init */
18946void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
18947{
18948	intel_csr_ucode_fini(i915);
18949
18950	intel_power_domains_driver_remove(i915);
18951
18952	intel_vga_unregister(i915);
18953
18954	intel_bios_driver_remove(i915);
18955}
18956
18957#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
18958
18959struct intel_display_error_state {
18960
18961	u32 power_well_driver;
18962
18963	struct intel_cursor_error_state {
18964		u32 control;
18965		u32 position;
18966		u32 base;
18967		u32 size;
18968	} cursor[I915_MAX_PIPES];
18969
18970	struct intel_pipe_error_state {
18971		bool power_domain_on;
18972		u32 source;
18973		u32 stat;
18974	} pipe[I915_MAX_PIPES];
18975
18976	struct intel_plane_error_state {
18977		u32 control;
18978		u32 stride;
18979		u32 size;
18980		u32 pos;
18981		u32 addr;
18982		u32 surface;
18983		u32 tile_offset;
18984	} plane[I915_MAX_PIPES];
18985
18986	struct intel_transcoder_error_state {
18987		bool available;
18988		bool power_domain_on;
18989		enum transcoder cpu_transcoder;
18990
18991		u32 conf;
18992
18993		u32 htotal;
18994		u32 hblank;
18995		u32 hsync;
18996		u32 vtotal;
18997		u32 vblank;
18998		u32 vsync;
18999	} transcoder[5];
19000};
19001
19002struct intel_display_error_state *
19003intel_display_capture_error_state(struct drm_i915_private *dev_priv)
19004{
19005	struct intel_display_error_state *error;
19006	int transcoders[] = {
19007		TRANSCODER_A,
19008		TRANSCODER_B,
19009		TRANSCODER_C,
19010		TRANSCODER_D,
19011		TRANSCODER_EDP,
19012	};
19013	int i;
19014
19015	BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
19016
19017	if (!HAS_DISPLAY(dev_priv))
19018		return NULL;
19019
19020	error = kzalloc(sizeof(*error), GFP_ATOMIC);
19021	if (error == NULL)
19022		return NULL;
19023
19024	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
19025		error->power_well_driver = intel_de_read(dev_priv,
19026							 HSW_PWR_WELL_CTL2);
19027
19028	for_each_pipe(dev_priv, i) {
19029		error->pipe[i].power_domain_on =
19030			__intel_display_power_is_enabled(dev_priv,
19031							 POWER_DOMAIN_PIPE(i));
19032		if (!error->pipe[i].power_domain_on)
19033			continue;
19034
19035		error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i));
19036		error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i));
19037		error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i));
19038
19039		error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i));
19040		error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i));
19041		if (INTEL_GEN(dev_priv) <= 3) {
19042			error->plane[i].size = intel_de_read(dev_priv,
19043							     DSPSIZE(i));
19044			error->plane[i].pos = intel_de_read(dev_priv,
19045							    DSPPOS(i));
19046		}
19047		if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
19048			error->plane[i].addr = intel_de_read(dev_priv,
19049							     DSPADDR(i));
19050		if (INTEL_GEN(dev_priv) >= 4) {
19051			error->plane[i].surface = intel_de_read(dev_priv,
19052								DSPSURF(i));
19053			error->plane[i].tile_offset = intel_de_read(dev_priv,
19054								    DSPTILEOFF(i));
19055		}
19056
19057		error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i));
19058
19059		if (HAS_GMCH(dev_priv))
19060			error->pipe[i].stat = intel_de_read(dev_priv,
19061							    PIPESTAT(i));
19062	}
19063
19064	for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
19065		enum transcoder cpu_transcoder = transcoders[i];
19066
19067		if (!HAS_TRANSCODER(dev_priv, cpu_transcoder))
19068			continue;
19069
19070		error->transcoder[i].available = true;
19071		error->transcoder[i].power_domain_on =
19072			__intel_display_power_is_enabled(dev_priv,
19073				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
19074		if (!error->transcoder[i].power_domain_on)
19075			continue;
19076
19077		error->transcoder[i].cpu_transcoder = cpu_transcoder;
19078
19079		error->transcoder[i].conf = intel_de_read(dev_priv,
19080							  PIPECONF(cpu_transcoder));
19081		error->transcoder[i].htotal = intel_de_read(dev_priv,
19082							    HTOTAL(cpu_transcoder));
19083		error->transcoder[i].hblank = intel_de_read(dev_priv,
19084							    HBLANK(cpu_transcoder));
19085		error->transcoder[i].hsync = intel_de_read(dev_priv,
19086							   HSYNC(cpu_transcoder));
19087		error->transcoder[i].vtotal = intel_de_read(dev_priv,
19088							    VTOTAL(cpu_transcoder));
19089		error->transcoder[i].vblank = intel_de_read(dev_priv,
19090							    VBLANK(cpu_transcoder));
19091		error->transcoder[i].vsync = intel_de_read(dev_priv,
19092							   VSYNC(cpu_transcoder));
19093	}
19094
19095	return error;
19096}
19097
19098#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
19099
19100void
19101intel_display_print_error_state(struct drm_i915_error_state_buf *m,
19102				struct intel_display_error_state *error)
19103{
19104	struct drm_i915_private *dev_priv = m->i915;
19105	int i;
19106
19107	if (!error)
19108		return;
19109
19110	err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
19111	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
19112		err_printf(m, "PWR_WELL_CTL2: %08x\n",
19113			   error->power_well_driver);
19114	for_each_pipe(dev_priv, i) {
19115		err_printf(m, "Pipe [%d]:\n", i);
19116		err_printf(m, "  Power: %s\n",
19117			   onoff(error->pipe[i].power_domain_on));
19118		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
19119		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
19120
19121		err_printf(m, "Plane [%d]:\n", i);
19122		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
19123		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
19124		if (INTEL_GEN(dev_priv) <= 3) {
19125			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
19126			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
19127		}
19128		if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
19129			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
19130		if (INTEL_GEN(dev_priv) >= 4) {
19131			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
19132			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
19133		}
19134
19135		err_printf(m, "Cursor [%d]:\n", i);
19136		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
19137		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
19138		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
19139	}
19140
19141	for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
19142		if (!error->transcoder[i].available)
19143			continue;
19144
19145		err_printf(m, "CPU transcoder: %s\n",
19146			   transcoder_name(error->transcoder[i].cpu_transcoder));
19147		err_printf(m, "  Power: %s\n",
19148			   onoff(error->transcoder[i].power_domain_on));
19149		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
19150		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
19151		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
19152		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
19153		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
19154		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
19155		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
19156	}
19157}
19158
19159#endif
19160