1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3/*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/circ_buf.h>
32#include <linux/slab.h>
33#include <linux/sysrq.h>
34
35#include <drm/drm_drv.h>
36#include <drm/drm_irq.h>
37
38#include "display/intel_display_types.h"
39#include "display/intel_fifo_underrun.h"
40#include "display/intel_hotplug.h"
41#include "display/intel_lpe_audio.h"
42#include "display/intel_psr.h"
43
44#include "gt/intel_breadcrumbs.h"
45#include "gt/intel_gt.h"
46#include "gt/intel_gt_irq.h"
47#include "gt/intel_gt_pm_irq.h"
48#include "gt/intel_rps.h"
49
50#include "i915_drv.h"
51#include "i915_irq.h"
52#include "i915_trace.h"
53#include "intel_pm.h"
54
55/**
56 * DOC: interrupt handling
57 *
58 * These functions provide the basic support for enabling and disabling the
59 * interrupt handling support. There's a lot more functionality in i915_irq.c
60 * and related files, but that will be described in separate chapters.
61 */
62
63typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
64
65static const u32 hpd_ilk[HPD_NUM_PINS] = {
66	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
67};
68
69static const u32 hpd_ivb[HPD_NUM_PINS] = {
70	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
71};
72
73static const u32 hpd_bdw[HPD_NUM_PINS] = {
74	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
75};
76
77static const u32 hpd_ibx[HPD_NUM_PINS] = {
78	[HPD_CRT] = SDE_CRT_HOTPLUG,
79	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
80	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
81	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
82	[HPD_PORT_D] = SDE_PORTD_HOTPLUG,
83};
84
85static const u32 hpd_cpt[HPD_NUM_PINS] = {
86	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
87	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
88	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
89	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
90	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
91};
92
93static const u32 hpd_spt[HPD_NUM_PINS] = {
94	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
95	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
96	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
97	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
98	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
99};
100
101static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
102	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
103	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
104	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
105	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
106	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
107	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
108};
109
110static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
111	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
112	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
113	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
114	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
115	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
116	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
117};
118
119static const u32 hpd_status_i915[HPD_NUM_PINS] = {
120	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
121	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
122	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
123	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
124	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
125	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
126};
127
128static const u32 hpd_bxt[HPD_NUM_PINS] = {
129	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
130	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
131	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC,
132};
133
134static const u32 hpd_gen11[HPD_NUM_PINS] = {
135	[HPD_PORT_TC1] = GEN11_TC_HOTPLUG(PORT_TC1) | GEN11_TBT_HOTPLUG(PORT_TC1),
136	[HPD_PORT_TC2] = GEN11_TC_HOTPLUG(PORT_TC2) | GEN11_TBT_HOTPLUG(PORT_TC2),
137	[HPD_PORT_TC3] = GEN11_TC_HOTPLUG(PORT_TC3) | GEN11_TBT_HOTPLUG(PORT_TC3),
138	[HPD_PORT_TC4] = GEN11_TC_HOTPLUG(PORT_TC4) | GEN11_TBT_HOTPLUG(PORT_TC4),
139	[HPD_PORT_TC5] = GEN11_TC_HOTPLUG(PORT_TC5) | GEN11_TBT_HOTPLUG(PORT_TC5),
140	[HPD_PORT_TC6] = GEN11_TC_HOTPLUG(PORT_TC6) | GEN11_TBT_HOTPLUG(PORT_TC6),
141};
142
143static const u32 hpd_icp[HPD_NUM_PINS] = {
144	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
145	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
146	[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(PORT_C),
147	[HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
148	[HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
149	[HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
150	[HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
151	[HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(PORT_TC5),
152	[HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
153};
154
155static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
156{
157	struct i915_hotplug *hpd = &dev_priv->hotplug;
158
159	if (HAS_GMCH(dev_priv)) {
160		if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
161		    IS_CHERRYVIEW(dev_priv))
162			hpd->hpd = hpd_status_g4x;
163		else
164			hpd->hpd = hpd_status_i915;
165		return;
166	}
167
168	if (INTEL_GEN(dev_priv) >= 11)
169		hpd->hpd = hpd_gen11;
170	else if (IS_GEN9_LP(dev_priv))
171		hpd->hpd = hpd_bxt;
172	else if (INTEL_GEN(dev_priv) >= 8)
173		hpd->hpd = hpd_bdw;
174	else if (INTEL_GEN(dev_priv) >= 7)
175		hpd->hpd = hpd_ivb;
176	else
177		hpd->hpd = hpd_ilk;
178
179	if (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv))
180		return;
181
182	if (HAS_PCH_TGP(dev_priv) || HAS_PCH_JSP(dev_priv) ||
183	    HAS_PCH_ICP(dev_priv) || HAS_PCH_MCC(dev_priv))
184		hpd->pch_hpd = hpd_icp;
185	else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
186		hpd->pch_hpd = hpd_spt;
187	else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
188		hpd->pch_hpd = hpd_cpt;
189	else if (HAS_PCH_IBX(dev_priv))
190		hpd->pch_hpd = hpd_ibx;
191	else
192		MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
193}
194
195static void
196intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
197{
198	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
199
200	drm_crtc_handle_vblank(&crtc->base);
201}
202
203void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
204		    i915_reg_t iir, i915_reg_t ier)
205{
206	intel_uncore_write(uncore, imr, 0xffffffff);
207	intel_uncore_posting_read(uncore, imr);
208
209	intel_uncore_write(uncore, ier, 0);
210
211	/* IIR can theoretically queue up two events. Be paranoid. */
212	intel_uncore_write(uncore, iir, 0xffffffff);
213	intel_uncore_posting_read(uncore, iir);
214	intel_uncore_write(uncore, iir, 0xffffffff);
215	intel_uncore_posting_read(uncore, iir);
216}
217
218void gen2_irq_reset(struct intel_uncore *uncore)
219{
220	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
221	intel_uncore_posting_read16(uncore, GEN2_IMR);
222
223	intel_uncore_write16(uncore, GEN2_IER, 0);
224
225	/* IIR can theoretically queue up two events. Be paranoid. */
226	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
227	intel_uncore_posting_read16(uncore, GEN2_IIR);
228	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
229	intel_uncore_posting_read16(uncore, GEN2_IIR);
230}
231
232/*
233 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
234 */
235static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
236{
237	u32 val = intel_uncore_read(uncore, reg);
238
239	if (val == 0)
240		return;
241
242	drm_WARN(&uncore->i915->drm, 1,
243		 "Interrupt register 0x%x is not zero: 0x%08x\n",
244		 i915_mmio_reg_offset(reg), val);
245	intel_uncore_write(uncore, reg, 0xffffffff);
246	intel_uncore_posting_read(uncore, reg);
247	intel_uncore_write(uncore, reg, 0xffffffff);
248	intel_uncore_posting_read(uncore, reg);
249}
250
251static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
252{
253	u16 val = intel_uncore_read16(uncore, GEN2_IIR);
254
255	if (val == 0)
256		return;
257
258	drm_WARN(&uncore->i915->drm, 1,
259		 "Interrupt register 0x%x is not zero: 0x%08x\n",
260		 i915_mmio_reg_offset(GEN2_IIR), val);
261	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
262	intel_uncore_posting_read16(uncore, GEN2_IIR);
263	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
264	intel_uncore_posting_read16(uncore, GEN2_IIR);
265}
266
267void gen3_irq_init(struct intel_uncore *uncore,
268		   i915_reg_t imr, u32 imr_val,
269		   i915_reg_t ier, u32 ier_val,
270		   i915_reg_t iir)
271{
272	gen3_assert_iir_is_zero(uncore, iir);
273
274	intel_uncore_write(uncore, ier, ier_val);
275	intel_uncore_write(uncore, imr, imr_val);
276	intel_uncore_posting_read(uncore, imr);
277}
278
279void gen2_irq_init(struct intel_uncore *uncore,
280		   u32 imr_val, u32 ier_val)
281{
282	gen2_assert_iir_is_zero(uncore);
283
284	intel_uncore_write16(uncore, GEN2_IER, ier_val);
285	intel_uncore_write16(uncore, GEN2_IMR, imr_val);
286	intel_uncore_posting_read16(uncore, GEN2_IMR);
287}
288
289/* For display hotplug interrupt */
290static inline void
291i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
292				     u32 mask,
293				     u32 bits)
294{
295	u32 val;
296
297	lockdep_assert_held(&dev_priv->irq_lock);
298	drm_WARN_ON(&dev_priv->drm, bits & ~mask);
299
300	val = I915_READ(PORT_HOTPLUG_EN);
301	val &= ~mask;
302	val |= bits;
303	I915_WRITE(PORT_HOTPLUG_EN, val);
304}
305
306/**
307 * i915_hotplug_interrupt_update - update hotplug interrupt enable
308 * @dev_priv: driver private
309 * @mask: bits to update
310 * @bits: bits to enable
311 * NOTE: the HPD enable bits are modified both inside and outside
312 * of an interrupt context. To avoid that read-modify-write cycles
313 * interfer, these bits are protected by a spinlock. Since this
314 * function is usually not called from a context where the lock is
315 * held already, this function acquires the lock itself. A non-locking
316 * version is also available.
317 */
318void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
319				   u32 mask,
320				   u32 bits)
321{
322	spin_lock_irq(&dev_priv->irq_lock);
323	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
324	spin_unlock_irq(&dev_priv->irq_lock);
325}
326
327/**
328 * ilk_update_display_irq - update DEIMR
329 * @dev_priv: driver private
330 * @interrupt_mask: mask of interrupt bits to update
331 * @enabled_irq_mask: mask of interrupt bits to enable
332 */
333void ilk_update_display_irq(struct drm_i915_private *dev_priv,
334			    u32 interrupt_mask,
335			    u32 enabled_irq_mask)
336{
337	u32 new_val;
338
339	lockdep_assert_held(&dev_priv->irq_lock);
340
341	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
342
343	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
344		return;
345
346	new_val = dev_priv->irq_mask;
347	new_val &= ~interrupt_mask;
348	new_val |= (~enabled_irq_mask & interrupt_mask);
349
350	if (new_val != dev_priv->irq_mask) {
351		dev_priv->irq_mask = new_val;
352		I915_WRITE(DEIMR, dev_priv->irq_mask);
353		POSTING_READ(DEIMR);
354	}
355}
356
357/**
358 * bdw_update_port_irq - update DE port interrupt
359 * @dev_priv: driver private
360 * @interrupt_mask: mask of interrupt bits to update
361 * @enabled_irq_mask: mask of interrupt bits to enable
362 */
363static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
364				u32 interrupt_mask,
365				u32 enabled_irq_mask)
366{
367	u32 new_val;
368	u32 old_val;
369
370	lockdep_assert_held(&dev_priv->irq_lock);
371
372	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
373
374	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
375		return;
376
377	old_val = I915_READ(GEN8_DE_PORT_IMR);
378
379	new_val = old_val;
380	new_val &= ~interrupt_mask;
381	new_val |= (~enabled_irq_mask & interrupt_mask);
382
383	if (new_val != old_val) {
384		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
385		POSTING_READ(GEN8_DE_PORT_IMR);
386	}
387}
388
389/**
390 * bdw_update_pipe_irq - update DE pipe interrupt
391 * @dev_priv: driver private
392 * @pipe: pipe whose interrupt to update
393 * @interrupt_mask: mask of interrupt bits to update
394 * @enabled_irq_mask: mask of interrupt bits to enable
395 */
396void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
397			 enum pipe pipe,
398			 u32 interrupt_mask,
399			 u32 enabled_irq_mask)
400{
401	u32 new_val;
402
403	lockdep_assert_held(&dev_priv->irq_lock);
404
405	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
406
407	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
408		return;
409
410	new_val = dev_priv->de_irq_mask[pipe];
411	new_val &= ~interrupt_mask;
412	new_val |= (~enabled_irq_mask & interrupt_mask);
413
414	if (new_val != dev_priv->de_irq_mask[pipe]) {
415		dev_priv->de_irq_mask[pipe] = new_val;
416		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
417		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
418	}
419}
420
421/**
422 * ibx_display_interrupt_update - update SDEIMR
423 * @dev_priv: driver private
424 * @interrupt_mask: mask of interrupt bits to update
425 * @enabled_irq_mask: mask of interrupt bits to enable
426 */
427void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
428				  u32 interrupt_mask,
429				  u32 enabled_irq_mask)
430{
431	u32 sdeimr = I915_READ(SDEIMR);
432	sdeimr &= ~interrupt_mask;
433	sdeimr |= (~enabled_irq_mask & interrupt_mask);
434
435	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
436
437	lockdep_assert_held(&dev_priv->irq_lock);
438
439	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
440		return;
441
442	I915_WRITE(SDEIMR, sdeimr);
443	POSTING_READ(SDEIMR);
444}
445
446u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
447			      enum pipe pipe)
448{
449	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
450	u32 enable_mask = status_mask << 16;
451
452	lockdep_assert_held(&dev_priv->irq_lock);
453
454	if (INTEL_GEN(dev_priv) < 5)
455		goto out;
456
457	/*
458	 * On pipe A we don't support the PSR interrupt yet,
459	 * on pipe B and C the same bit MBZ.
460	 */
461	if (drm_WARN_ON_ONCE(&dev_priv->drm,
462			     status_mask & PIPE_A_PSR_STATUS_VLV))
463		return 0;
464	/*
465	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
466	 * A the same bit is for perf counters which we don't use either.
467	 */
468	if (drm_WARN_ON_ONCE(&dev_priv->drm,
469			     status_mask & PIPE_B_PSR_STATUS_VLV))
470		return 0;
471
472	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
473			 SPRITE0_FLIP_DONE_INT_EN_VLV |
474			 SPRITE1_FLIP_DONE_INT_EN_VLV);
475	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
476		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
477	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
478		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
479
480out:
481	drm_WARN_ONCE(&dev_priv->drm,
482		      enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
483		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
484		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
485		      pipe_name(pipe), enable_mask, status_mask);
486
487	return enable_mask;
488}
489
490void i915_enable_pipestat(struct drm_i915_private *dev_priv,
491			  enum pipe pipe, u32 status_mask)
492{
493	i915_reg_t reg = PIPESTAT(pipe);
494	u32 enable_mask;
495
496	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
497		      "pipe %c: status_mask=0x%x\n",
498		      pipe_name(pipe), status_mask);
499
500	lockdep_assert_held(&dev_priv->irq_lock);
501	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
502
503	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
504		return;
505
506	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
507	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
508
509	I915_WRITE(reg, enable_mask | status_mask);
510	POSTING_READ(reg);
511}
512
513void i915_disable_pipestat(struct drm_i915_private *dev_priv,
514			   enum pipe pipe, u32 status_mask)
515{
516	i915_reg_t reg = PIPESTAT(pipe);
517	u32 enable_mask;
518
519	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
520		      "pipe %c: status_mask=0x%x\n",
521		      pipe_name(pipe), status_mask);
522
523	lockdep_assert_held(&dev_priv->irq_lock);
524	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
525
526	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
527		return;
528
529	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
530	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
531
532	I915_WRITE(reg, enable_mask | status_mask);
533	POSTING_READ(reg);
534}
535
536static bool i915_has_asle(struct drm_i915_private *dev_priv)
537{
538	if (!dev_priv->opregion.asle)
539		return false;
540
541	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
542}
543
544/**
545 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
546 * @dev_priv: i915 device private
547 */
548static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
549{
550	if (!i915_has_asle(dev_priv))
551		return;
552
553	spin_lock_irq(&dev_priv->irq_lock);
554
555	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
556	if (INTEL_GEN(dev_priv) >= 4)
557		i915_enable_pipestat(dev_priv, PIPE_A,
558				     PIPE_LEGACY_BLC_EVENT_STATUS);
559
560	spin_unlock_irq(&dev_priv->irq_lock);
561}
562
563/*
564 * This timing diagram depicts the video signal in and
565 * around the vertical blanking period.
566 *
567 * Assumptions about the fictitious mode used in this example:
568 *  vblank_start >= 3
569 *  vsync_start = vblank_start + 1
570 *  vsync_end = vblank_start + 2
571 *  vtotal = vblank_start + 3
572 *
573 *           start of vblank:
574 *           latch double buffered registers
575 *           increment frame counter (ctg+)
576 *           generate start of vblank interrupt (gen4+)
577 *           |
578 *           |          frame start:
579 *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
580 *           |          may be shifted forward 1-3 extra lines via PIPECONF
581 *           |          |
582 *           |          |  start of vsync:
583 *           |          |  generate vsync interrupt
584 *           |          |  |
585 * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
586 *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
587 * ----va---> <-----------------vb--------------------> <--------va-------------
588 *       |          |       <----vs----->                     |
589 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
590 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
591 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
592 *       |          |                                         |
593 *       last visible pixel                                   first visible pixel
594 *                  |                                         increment frame counter (gen3/4)
595 *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
596 *
597 * x  = horizontal active
598 * _  = horizontal blanking
599 * hs = horizontal sync
600 * va = vertical active
601 * vb = vertical blanking
602 * vs = vertical sync
603 * vbs = vblank_start (number)
604 *
605 * Summary:
606 * - most events happen at the start of horizontal sync
607 * - frame start happens at the start of horizontal blank, 1-4 lines
608 *   (depending on PIPECONF settings) after the start of vblank
609 * - gen3/4 pixel and frame counter are synchronized with the start
610 *   of horizontal active on the first line of vertical active
611 */
612
613/* Called from drm generic code, passed a 'crtc', which
614 * we use as a pipe index
615 */
616u32 i915_get_vblank_counter(struct drm_crtc *crtc)
617{
618	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
619	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
620	const struct drm_display_mode *mode = &vblank->hwmode;
621	enum pipe pipe = to_intel_crtc(crtc)->pipe;
622	i915_reg_t high_frame, low_frame;
623	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
624	unsigned long irqflags;
625
626	/*
627	 * On i965gm TV output the frame counter only works up to
628	 * the point when we enable the TV encoder. After that the
629	 * frame counter ceases to work and reads zero. We need a
630	 * vblank wait before enabling the TV encoder and so we
631	 * have to enable vblank interrupts while the frame counter
632	 * is still in a working state. However the core vblank code
633	 * does not like us returning non-zero frame counter values
634	 * when we've told it that we don't have a working frame
635	 * counter. Thus we must stop non-zero values leaking out.
636	 */
637	if (!vblank->max_vblank_count)
638		return 0;
639
640	htotal = mode->crtc_htotal;
641	hsync_start = mode->crtc_hsync_start;
642	vbl_start = mode->crtc_vblank_start;
643	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
644		vbl_start = DIV_ROUND_UP(vbl_start, 2);
645
646	/* Convert to pixel count */
647	vbl_start *= htotal;
648
649	/* Start of vblank event occurs at start of hsync */
650	vbl_start -= htotal - hsync_start;
651
652	high_frame = PIPEFRAME(pipe);
653	low_frame = PIPEFRAMEPIXEL(pipe);
654
655	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
656
657	/*
658	 * High & low register fields aren't synchronized, so make sure
659	 * we get a low value that's stable across two reads of the high
660	 * register.
661	 */
662	do {
663		high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
664		low   = intel_de_read_fw(dev_priv, low_frame);
665		high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
666	} while (high1 != high2);
667
668	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
669
670	high1 >>= PIPE_FRAME_HIGH_SHIFT;
671	pixel = low & PIPE_PIXEL_MASK;
672	low >>= PIPE_FRAME_LOW_SHIFT;
673
674	/*
675	 * The frame counter increments at beginning of active.
676	 * Cook up a vblank counter by also checking the pixel
677	 * counter against vblank start.
678	 */
679	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
680}
681
682u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
683{
684	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
685	enum pipe pipe = to_intel_crtc(crtc)->pipe;
686
687	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
688}
689
690/*
691 * On certain encoders on certain platforms, pipe
692 * scanline register will not work to get the scanline,
693 * since the timings are driven from the PORT or issues
694 * with scanline register updates.
695 * This function will use Framestamp and current
696 * timestamp registers to calculate the scanline.
697 */
698static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
699{
700	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
701	struct drm_vblank_crtc *vblank =
702		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
703	const struct drm_display_mode *mode = &vblank->hwmode;
704	u32 vblank_start = mode->crtc_vblank_start;
705	u32 vtotal = mode->crtc_vtotal;
706	u32 htotal = mode->crtc_htotal;
707	u32 clock = mode->crtc_clock;
708	u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
709
710	/*
711	 * To avoid the race condition where we might cross into the
712	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
713	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
714	 * during the same frame.
715	 */
716	do {
717		/*
718		 * This field provides read back of the display
719		 * pipe frame time stamp. The time stamp value
720		 * is sampled at every start of vertical blank.
721		 */
722		scan_prev_time = intel_de_read_fw(dev_priv,
723						  PIPE_FRMTMSTMP(crtc->pipe));
724
725		/*
726		 * The TIMESTAMP_CTR register has the current
727		 * time stamp value.
728		 */
729		scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
730
731		scan_post_time = intel_de_read_fw(dev_priv,
732						  PIPE_FRMTMSTMP(crtc->pipe));
733	} while (scan_post_time != scan_prev_time);
734
735	scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
736					clock), 1000 * htotal);
737	scanline = min(scanline, vtotal - 1);
738	scanline = (scanline + vblank_start) % vtotal;
739
740	return scanline;
741}
742
743/*
744 * intel_de_read_fw(), only for fast reads of display block, no need for
745 * forcewake etc.
746 */
747static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
748{
749	struct drm_device *dev = crtc->base.dev;
750	struct drm_i915_private *dev_priv = to_i915(dev);
751	const struct drm_display_mode *mode;
752	struct drm_vblank_crtc *vblank;
753	enum pipe pipe = crtc->pipe;
754	int position, vtotal;
755
756	if (!crtc->active)
757		return -1;
758
759	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
760	mode = &vblank->hwmode;
761
762	if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
763		return __intel_get_crtc_scanline_from_timestamp(crtc);
764
765	vtotal = mode->crtc_vtotal;
766	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
767		vtotal /= 2;
768
769	if (IS_GEN(dev_priv, 2))
770		position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
771	else
772		position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
773
774	/*
775	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
776	 * read it just before the start of vblank.  So try it again
777	 * so we don't accidentally end up spanning a vblank frame
778	 * increment, causing the pipe_update_end() code to squak at us.
779	 *
780	 * The nature of this problem means we can't simply check the ISR
781	 * bit and return the vblank start value; nor can we use the scanline
782	 * debug register in the transcoder as it appears to have the same
783	 * problem.  We may need to extend this to include other platforms,
784	 * but so far testing only shows the problem on HSW.
785	 */
786	if (HAS_DDI(dev_priv) && !position) {
787		int i, temp;
788
789		for (i = 0; i < 100; i++) {
790			udelay(1);
791			temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
792			if (temp != position) {
793				position = temp;
794				break;
795			}
796		}
797	}
798
799	/*
800	 * See update_scanline_offset() for the details on the
801	 * scanline_offset adjustment.
802	 */
803	return (position + crtc->scanline_offset) % vtotal;
804}
805
806static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
807				     bool in_vblank_irq,
808				     int *vpos, int *hpos,
809				     ktime_t *stime, ktime_t *etime,
810				     const struct drm_display_mode *mode)
811{
812	struct drm_device *dev = _crtc->dev;
813	struct drm_i915_private *dev_priv = to_i915(dev);
814	struct intel_crtc *crtc = to_intel_crtc(_crtc);
815	enum pipe pipe = crtc->pipe;
816	int position;
817	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
818	unsigned long irqflags;
819	bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
820		IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
821		crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
822
823	if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
824		drm_dbg(&dev_priv->drm,
825			"trying to get scanoutpos for disabled "
826			"pipe %c\n", pipe_name(pipe));
827		return false;
828	}
829
830	htotal = mode->crtc_htotal;
831	hsync_start = mode->crtc_hsync_start;
832	vtotal = mode->crtc_vtotal;
833	vbl_start = mode->crtc_vblank_start;
834	vbl_end = mode->crtc_vblank_end;
835
836	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
837		vbl_start = DIV_ROUND_UP(vbl_start, 2);
838		vbl_end /= 2;
839		vtotal /= 2;
840	}
841
842	/*
843	 * Lock uncore.lock, as we will do multiple timing critical raw
844	 * register reads, potentially with preemption disabled, so the
845	 * following code must not block on uncore.lock.
846	 */
847	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
848
849	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
850
851	/* Get optional system timestamp before query. */
852	if (stime)
853		*stime = ktime_get();
854
855	if (use_scanline_counter) {
856		/* No obvious pixelcount register. Only query vertical
857		 * scanout position from Display scan line register.
858		 */
859		position = __intel_get_crtc_scanline(crtc);
860	} else {
861		/* Have access to pixelcount since start of frame.
862		 * We can split this into vertical and horizontal
863		 * scanout position.
864		 */
865		position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
866
867		/* convert to pixel counts */
868		vbl_start *= htotal;
869		vbl_end *= htotal;
870		vtotal *= htotal;
871
872		/*
873		 * In interlaced modes, the pixel counter counts all pixels,
874		 * so one field will have htotal more pixels. In order to avoid
875		 * the reported position from jumping backwards when the pixel
876		 * counter is beyond the length of the shorter field, just
877		 * clamp the position the length of the shorter field. This
878		 * matches how the scanline counter based position works since
879		 * the scanline counter doesn't count the two half lines.
880		 */
881		if (position >= vtotal)
882			position = vtotal - 1;
883
884		/*
885		 * Start of vblank interrupt is triggered at start of hsync,
886		 * just prior to the first active line of vblank. However we
887		 * consider lines to start at the leading edge of horizontal
888		 * active. So, should we get here before we've crossed into
889		 * the horizontal active of the first line in vblank, we would
890		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
891		 * always add htotal-hsync_start to the current pixel position.
892		 */
893		position = (position + htotal - hsync_start) % vtotal;
894	}
895
896	/* Get optional system timestamp after query. */
897	if (etime)
898		*etime = ktime_get();
899
900	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
901
902	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
903
904	/*
905	 * While in vblank, position will be negative
906	 * counting up towards 0 at vbl_end. And outside
907	 * vblank, position will be positive counting
908	 * up since vbl_end.
909	 */
910	if (position >= vbl_start)
911		position -= vbl_end;
912	else
913		position += vtotal - vbl_end;
914
915	if (use_scanline_counter) {
916		*vpos = position;
917		*hpos = 0;
918	} else {
919		*vpos = position / htotal;
920		*hpos = position - (*vpos * htotal);
921	}
922
923	return true;
924}
925
926bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
927				     ktime_t *vblank_time, bool in_vblank_irq)
928{
929	return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
930		crtc, max_error, vblank_time, in_vblank_irq,
931		i915_get_crtc_scanoutpos);
932}
933
934int intel_get_crtc_scanline(struct intel_crtc *crtc)
935{
936	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
937	unsigned long irqflags;
938	int position;
939
940	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
941	position = __intel_get_crtc_scanline(crtc);
942	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
943
944	return position;
945}
946
947/**
948 * ivb_parity_work - Workqueue called when a parity error interrupt
949 * occurred.
950 * @work: workqueue struct
951 *
952 * Doesn't actually do anything except notify userspace. As a consequence of
953 * this event, userspace should try to remap the bad rows since statistically
954 * it is likely the same row is more likely to go bad again.
955 */
956static void ivb_parity_work(struct work_struct *work)
957{
958	struct drm_i915_private *dev_priv =
959		container_of(work, typeof(*dev_priv), l3_parity.error_work);
960	struct intel_gt *gt = &dev_priv->gt;
961	u32 error_status, row, bank, subbank;
962	char *parity_event[6];
963	u32 misccpctl;
964	u8 slice = 0;
965
966	/* We must turn off DOP level clock gating to access the L3 registers.
967	 * In order to prevent a get/put style interface, acquire struct mutex
968	 * any time we access those registers.
969	 */
970	mutex_lock(&dev_priv->drm.struct_mutex);
971
972	/* If we've screwed up tracking, just let the interrupt fire again */
973	if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
974		goto out;
975
976	misccpctl = I915_READ(GEN7_MISCCPCTL);
977	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
978	POSTING_READ(GEN7_MISCCPCTL);
979
980	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
981		i915_reg_t reg;
982
983		slice--;
984		if (drm_WARN_ON_ONCE(&dev_priv->drm,
985				     slice >= NUM_L3_SLICES(dev_priv)))
986			break;
987
988		dev_priv->l3_parity.which_slice &= ~(1<<slice);
989
990		reg = GEN7_L3CDERRST1(slice);
991
992		error_status = I915_READ(reg);
993		row = GEN7_PARITY_ERROR_ROW(error_status);
994		bank = GEN7_PARITY_ERROR_BANK(error_status);
995		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
996
997		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
998		POSTING_READ(reg);
999
1000		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1001		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1002		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1003		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1004		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1005		parity_event[5] = NULL;
1006
1007		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1008				   KOBJ_CHANGE, parity_event);
1009
1010		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1011			  slice, row, bank, subbank);
1012
1013		kfree(parity_event[4]);
1014		kfree(parity_event[3]);
1015		kfree(parity_event[2]);
1016		kfree(parity_event[1]);
1017	}
1018
1019	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1020
1021out:
1022	drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
1023	spin_lock_irq(&gt->irq_lock);
1024	gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
1025	spin_unlock_irq(&gt->irq_lock);
1026
1027	mutex_unlock(&dev_priv->drm.struct_mutex);
1028}
1029
1030static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1031{
1032	switch (pin) {
1033	case HPD_PORT_TC1:
1034		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1035	case HPD_PORT_TC2:
1036		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1037	case HPD_PORT_TC3:
1038		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1039	case HPD_PORT_TC4:
1040		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1041	case HPD_PORT_TC5:
1042		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC5);
1043	case HPD_PORT_TC6:
1044		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC6);
1045	default:
1046		return false;
1047	}
1048}
1049
1050static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1051{
1052	switch (pin) {
1053	case HPD_PORT_A:
1054		return val & PORTA_HOTPLUG_LONG_DETECT;
1055	case HPD_PORT_B:
1056		return val & PORTB_HOTPLUG_LONG_DETECT;
1057	case HPD_PORT_C:
1058		return val & PORTC_HOTPLUG_LONG_DETECT;
1059	default:
1060		return false;
1061	}
1062}
1063
1064static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1065{
1066	switch (pin) {
1067	case HPD_PORT_A:
1068		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_A);
1069	case HPD_PORT_B:
1070		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_B);
1071	case HPD_PORT_C:
1072		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_C);
1073	default:
1074		return false;
1075	}
1076}
1077
1078static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1079{
1080	switch (pin) {
1081	case HPD_PORT_TC1:
1082		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1083	case HPD_PORT_TC2:
1084		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1085	case HPD_PORT_TC3:
1086		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1087	case HPD_PORT_TC4:
1088		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
1089	case HPD_PORT_TC5:
1090		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC5);
1091	case HPD_PORT_TC6:
1092		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC6);
1093	default:
1094		return false;
1095	}
1096}
1097
1098static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1099{
1100	switch (pin) {
1101	case HPD_PORT_E:
1102		return val & PORTE_HOTPLUG_LONG_DETECT;
1103	default:
1104		return false;
1105	}
1106}
1107
1108static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1109{
1110	switch (pin) {
1111	case HPD_PORT_A:
1112		return val & PORTA_HOTPLUG_LONG_DETECT;
1113	case HPD_PORT_B:
1114		return val & PORTB_HOTPLUG_LONG_DETECT;
1115	case HPD_PORT_C:
1116		return val & PORTC_HOTPLUG_LONG_DETECT;
1117	case HPD_PORT_D:
1118		return val & PORTD_HOTPLUG_LONG_DETECT;
1119	default:
1120		return false;
1121	}
1122}
1123
1124static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1125{
1126	switch (pin) {
1127	case HPD_PORT_A:
1128		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1129	default:
1130		return false;
1131	}
1132}
1133
1134static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1135{
1136	switch (pin) {
1137	case HPD_PORT_B:
1138		return val & PORTB_HOTPLUG_LONG_DETECT;
1139	case HPD_PORT_C:
1140		return val & PORTC_HOTPLUG_LONG_DETECT;
1141	case HPD_PORT_D:
1142		return val & PORTD_HOTPLUG_LONG_DETECT;
1143	default:
1144		return false;
1145	}
1146}
1147
1148static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1149{
1150	switch (pin) {
1151	case HPD_PORT_B:
1152		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1153	case HPD_PORT_C:
1154		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1155	case HPD_PORT_D:
1156		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1157	default:
1158		return false;
1159	}
1160}
1161
1162/*
1163 * Get a bit mask of pins that have triggered, and which ones may be long.
1164 * This can be called multiple times with the same masks to accumulate
1165 * hotplug detection results from several registers.
1166 *
1167 * Note that the caller is expected to zero out the masks initially.
1168 */
1169static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1170			       u32 *pin_mask, u32 *long_mask,
1171			       u32 hotplug_trigger, u32 dig_hotplug_reg,
1172			       const u32 hpd[HPD_NUM_PINS],
1173			       bool long_pulse_detect(enum hpd_pin pin, u32 val))
1174{
1175	enum hpd_pin pin;
1176
1177	BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
1178
1179	for_each_hpd_pin(pin) {
1180		if ((hpd[pin] & hotplug_trigger) == 0)
1181			continue;
1182
1183		*pin_mask |= BIT(pin);
1184
1185		if (long_pulse_detect(pin, dig_hotplug_reg))
1186			*long_mask |= BIT(pin);
1187	}
1188
1189	drm_dbg(&dev_priv->drm,
1190		"hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1191		hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1192
1193}
1194
1195static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1196{
1197	wake_up_all(&dev_priv->gmbus_wait_queue);
1198}
1199
1200static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1201{
1202	wake_up_all(&dev_priv->gmbus_wait_queue);
1203}
1204
1205#if defined(CONFIG_DEBUG_FS)
1206static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1207					 enum pipe pipe,
1208					 u32 crc0, u32 crc1,
1209					 u32 crc2, u32 crc3,
1210					 u32 crc4)
1211{
1212	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1213	struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
1214	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
1215
1216	trace_intel_pipe_crc(crtc, crcs);
1217
1218	spin_lock(&pipe_crc->lock);
1219	/*
1220	 * For some not yet identified reason, the first CRC is
1221	 * bonkers. So let's just wait for the next vblank and read
1222	 * out the buggy result.
1223	 *
1224	 * On GEN8+ sometimes the second CRC is bonkers as well, so
1225	 * don't trust that one either.
1226	 */
1227	if (pipe_crc->skipped <= 0 ||
1228	    (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1229		pipe_crc->skipped++;
1230		spin_unlock(&pipe_crc->lock);
1231		return;
1232	}
1233	spin_unlock(&pipe_crc->lock);
1234
1235	drm_crtc_add_crc_entry(&crtc->base, true,
1236				drm_crtc_accurate_vblank_count(&crtc->base),
1237				crcs);
1238}
1239#else
1240static inline void
1241display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1242			     enum pipe pipe,
1243			     u32 crc0, u32 crc1,
1244			     u32 crc2, u32 crc3,
1245			     u32 crc4) {}
1246#endif
1247
1248
1249static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1250				     enum pipe pipe)
1251{
1252	display_pipe_crc_irq_handler(dev_priv, pipe,
1253				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1254				     0, 0, 0, 0);
1255}
1256
1257static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1258				     enum pipe pipe)
1259{
1260	display_pipe_crc_irq_handler(dev_priv, pipe,
1261				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1262				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1263				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1264				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1265				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1266}
1267
1268static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1269				      enum pipe pipe)
1270{
1271	u32 res1, res2;
1272
1273	if (INTEL_GEN(dev_priv) >= 3)
1274		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1275	else
1276		res1 = 0;
1277
1278	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1279		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1280	else
1281		res2 = 0;
1282
1283	display_pipe_crc_irq_handler(dev_priv, pipe,
1284				     I915_READ(PIPE_CRC_RES_RED(pipe)),
1285				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1286				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1287				     res1, res2);
1288}
1289
1290static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1291{
1292	enum pipe pipe;
1293
1294	for_each_pipe(dev_priv, pipe) {
1295		I915_WRITE(PIPESTAT(pipe),
1296			   PIPESTAT_INT_STATUS_MASK |
1297			   PIPE_FIFO_UNDERRUN_STATUS);
1298
1299		dev_priv->pipestat_irq_mask[pipe] = 0;
1300	}
1301}
1302
1303static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1304				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1305{
1306	enum pipe pipe;
1307
1308	spin_lock(&dev_priv->irq_lock);
1309
1310	if (!dev_priv->display_irqs_enabled) {
1311		spin_unlock(&dev_priv->irq_lock);
1312		return;
1313	}
1314
1315	for_each_pipe(dev_priv, pipe) {
1316		i915_reg_t reg;
1317		u32 status_mask, enable_mask, iir_bit = 0;
1318
1319		/*
1320		 * PIPESTAT bits get signalled even when the interrupt is
1321		 * disabled with the mask bits, and some of the status bits do
1322		 * not generate interrupts at all (like the underrun bit). Hence
1323		 * we need to be careful that we only handle what we want to
1324		 * handle.
1325		 */
1326
1327		/* fifo underruns are filterered in the underrun handler. */
1328		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1329
1330		switch (pipe) {
1331		default:
1332		case PIPE_A:
1333			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1334			break;
1335		case PIPE_B:
1336			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1337			break;
1338		case PIPE_C:
1339			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1340			break;
1341		}
1342		if (iir & iir_bit)
1343			status_mask |= dev_priv->pipestat_irq_mask[pipe];
1344
1345		if (!status_mask)
1346			continue;
1347
1348		reg = PIPESTAT(pipe);
1349		pipe_stats[pipe] = I915_READ(reg) & status_mask;
1350		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1351
1352		/*
1353		 * Clear the PIPE*STAT regs before the IIR
1354		 *
1355		 * Toggle the enable bits to make sure we get an
1356		 * edge in the ISR pipe event bit if we don't clear
1357		 * all the enabled status bits. Otherwise the edge
1358		 * triggered IIR on i965/g4x wouldn't notice that
1359		 * an interrupt is still pending.
1360		 */
1361		if (pipe_stats[pipe]) {
1362			I915_WRITE(reg, pipe_stats[pipe]);
1363			I915_WRITE(reg, enable_mask);
1364		}
1365	}
1366	spin_unlock(&dev_priv->irq_lock);
1367}
1368
1369static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1370				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1371{
1372	enum pipe pipe;
1373
1374	for_each_pipe(dev_priv, pipe) {
1375		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1376			intel_handle_vblank(dev_priv, pipe);
1377
1378		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1379			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1380
1381		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1382			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1383	}
1384}
1385
1386static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1387				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1388{
1389	bool blc_event = false;
1390	enum pipe pipe;
1391
1392	for_each_pipe(dev_priv, pipe) {
1393		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1394			intel_handle_vblank(dev_priv, pipe);
1395
1396		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1397			blc_event = true;
1398
1399		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1400			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1401
1402		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1403			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1404	}
1405
1406	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1407		intel_opregion_asle_intr(dev_priv);
1408}
1409
1410static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1411				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1412{
1413	bool blc_event = false;
1414	enum pipe pipe;
1415
1416	for_each_pipe(dev_priv, pipe) {
1417		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1418			intel_handle_vblank(dev_priv, pipe);
1419
1420		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1421			blc_event = true;
1422
1423		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1424			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1425
1426		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1427			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1428	}
1429
1430	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1431		intel_opregion_asle_intr(dev_priv);
1432
1433	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1434		gmbus_irq_handler(dev_priv);
1435}
1436
1437static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1438					    u32 pipe_stats[I915_MAX_PIPES])
1439{
1440	enum pipe pipe;
1441
1442	for_each_pipe(dev_priv, pipe) {
1443		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1444			intel_handle_vblank(dev_priv, pipe);
1445
1446		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1447			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1448
1449		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1450			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1451	}
1452
1453	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1454		gmbus_irq_handler(dev_priv);
1455}
1456
1457static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1458{
1459	u32 hotplug_status = 0, hotplug_status_mask;
1460	int i;
1461
1462	if (IS_G4X(dev_priv) ||
1463	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1464		hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
1465			DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
1466	else
1467		hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1468
1469	/*
1470	 * We absolutely have to clear all the pending interrupt
1471	 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
1472	 * interrupt bit won't have an edge, and the i965/g4x
1473	 * edge triggered IIR will not notice that an interrupt
1474	 * is still pending. We can't use PORT_HOTPLUG_EN to
1475	 * guarantee the edge as the act of toggling the enable
1476	 * bits can itself generate a new hotplug interrupt :(
1477	 */
1478	for (i = 0; i < 10; i++) {
1479		u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
1480
1481		if (tmp == 0)
1482			return hotplug_status;
1483
1484		hotplug_status |= tmp;
1485		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1486	}
1487
1488	drm_WARN_ONCE(&dev_priv->drm, 1,
1489		      "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
1490		      I915_READ(PORT_HOTPLUG_STAT));
1491
1492	return hotplug_status;
1493}
1494
1495static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1496				 u32 hotplug_status)
1497{
1498	u32 pin_mask = 0, long_mask = 0;
1499	u32 hotplug_trigger;
1500
1501	if (IS_G4X(dev_priv) ||
1502	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1503		hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1504	else
1505		hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1506
1507	if (hotplug_trigger) {
1508		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1509				   hotplug_trigger, hotplug_trigger,
1510				   dev_priv->hotplug.hpd,
1511				   i9xx_port_hotplug_long_detect);
1512
1513		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1514	}
1515
1516	if ((IS_G4X(dev_priv) ||
1517	     IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1518	    hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1519		dp_aux_irq_handler(dev_priv);
1520}
1521
1522static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1523{
1524	struct drm_i915_private *dev_priv = arg;
1525	irqreturn_t ret = IRQ_NONE;
1526
1527	if (!intel_irqs_enabled(dev_priv))
1528		return IRQ_NONE;
1529
1530	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1531	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1532
1533	do {
1534		u32 iir, gt_iir, pm_iir;
1535		u32 pipe_stats[I915_MAX_PIPES] = {};
1536		u32 hotplug_status = 0;
1537		u32 ier = 0;
1538
1539		gt_iir = I915_READ(GTIIR);
1540		pm_iir = I915_READ(GEN6_PMIIR);
1541		iir = I915_READ(VLV_IIR);
1542
1543		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1544			break;
1545
1546		ret = IRQ_HANDLED;
1547
1548		/*
1549		 * Theory on interrupt generation, based on empirical evidence:
1550		 *
1551		 * x = ((VLV_IIR & VLV_IER) ||
1552		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1553		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1554		 *
1555		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1556		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1557		 * guarantee the CPU interrupt will be raised again even if we
1558		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1559		 * bits this time around.
1560		 */
1561		I915_WRITE(VLV_MASTER_IER, 0);
1562		ier = I915_READ(VLV_IER);
1563		I915_WRITE(VLV_IER, 0);
1564
1565		if (gt_iir)
1566			I915_WRITE(GTIIR, gt_iir);
1567		if (pm_iir)
1568			I915_WRITE(GEN6_PMIIR, pm_iir);
1569
1570		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1571			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1572
1573		/* Call regardless, as some status bits might not be
1574		 * signalled in iir */
1575		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1576
1577		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1578			   I915_LPE_PIPE_B_INTERRUPT))
1579			intel_lpe_audio_irq_handler(dev_priv);
1580
1581		/*
1582		 * VLV_IIR is single buffered, and reflects the level
1583		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1584		 */
1585		if (iir)
1586			I915_WRITE(VLV_IIR, iir);
1587
1588		I915_WRITE(VLV_IER, ier);
1589		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1590
1591		if (gt_iir)
1592			gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
1593		if (pm_iir)
1594			gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
1595
1596		if (hotplug_status)
1597			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1598
1599		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1600	} while (0);
1601
1602	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1603
1604	return ret;
1605}
1606
1607static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1608{
1609	struct drm_i915_private *dev_priv = arg;
1610	irqreturn_t ret = IRQ_NONE;
1611
1612	if (!intel_irqs_enabled(dev_priv))
1613		return IRQ_NONE;
1614
1615	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1616	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1617
1618	do {
1619		u32 master_ctl, iir;
1620		u32 pipe_stats[I915_MAX_PIPES] = {};
1621		u32 hotplug_status = 0;
1622		u32 ier = 0;
1623
1624		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1625		iir = I915_READ(VLV_IIR);
1626
1627		if (master_ctl == 0 && iir == 0)
1628			break;
1629
1630		ret = IRQ_HANDLED;
1631
1632		/*
1633		 * Theory on interrupt generation, based on empirical evidence:
1634		 *
1635		 * x = ((VLV_IIR & VLV_IER) ||
1636		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1637		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1638		 *
1639		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1640		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1641		 * guarantee the CPU interrupt will be raised again even if we
1642		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1643		 * bits this time around.
1644		 */
1645		I915_WRITE(GEN8_MASTER_IRQ, 0);
1646		ier = I915_READ(VLV_IER);
1647		I915_WRITE(VLV_IER, 0);
1648
1649		gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
1650
1651		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1652			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1653
1654		/* Call regardless, as some status bits might not be
1655		 * signalled in iir */
1656		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1657
1658		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1659			   I915_LPE_PIPE_B_INTERRUPT |
1660			   I915_LPE_PIPE_C_INTERRUPT))
1661			intel_lpe_audio_irq_handler(dev_priv);
1662
1663		/*
1664		 * VLV_IIR is single buffered, and reflects the level
1665		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1666		 */
1667		if (iir)
1668			I915_WRITE(VLV_IIR, iir);
1669
1670		I915_WRITE(VLV_IER, ier);
1671		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1672
1673		if (hotplug_status)
1674			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1675
1676		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1677	} while (0);
1678
1679	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1680
1681	return ret;
1682}
1683
1684static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1685				u32 hotplug_trigger)
1686{
1687	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1688
1689	/*
1690	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1691	 * unless we touch the hotplug register, even if hotplug_trigger is
1692	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1693	 * errors.
1694	 */
1695	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1696	if (!hotplug_trigger) {
1697		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1698			PORTD_HOTPLUG_STATUS_MASK |
1699			PORTC_HOTPLUG_STATUS_MASK |
1700			PORTB_HOTPLUG_STATUS_MASK;
1701		dig_hotplug_reg &= ~mask;
1702	}
1703
1704	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1705	if (!hotplug_trigger)
1706		return;
1707
1708	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1709			   hotplug_trigger, dig_hotplug_reg,
1710			   dev_priv->hotplug.pch_hpd,
1711			   pch_port_hotplug_long_detect);
1712
1713	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1714}
1715
1716static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1717{
1718	enum pipe pipe;
1719	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1720
1721	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1722
1723	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1724		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1725			       SDE_AUDIO_POWER_SHIFT);
1726		drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
1727			port_name(port));
1728	}
1729
1730	if (pch_iir & SDE_AUX_MASK)
1731		dp_aux_irq_handler(dev_priv);
1732
1733	if (pch_iir & SDE_GMBUS)
1734		gmbus_irq_handler(dev_priv);
1735
1736	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1737		drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
1738
1739	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1740		drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
1741
1742	if (pch_iir & SDE_POISON)
1743		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1744
1745	if (pch_iir & SDE_FDI_MASK) {
1746		for_each_pipe(dev_priv, pipe)
1747			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
1748				pipe_name(pipe),
1749				I915_READ(FDI_RX_IIR(pipe)));
1750	}
1751
1752	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1753		drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
1754
1755	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1756		drm_dbg(&dev_priv->drm,
1757			"PCH transcoder CRC error interrupt\n");
1758
1759	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1760		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1761
1762	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1763		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1764}
1765
1766static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1767{
1768	u32 err_int = I915_READ(GEN7_ERR_INT);
1769	enum pipe pipe;
1770
1771	if (err_int & ERR_INT_POISON)
1772		drm_err(&dev_priv->drm, "Poison interrupt\n");
1773
1774	for_each_pipe(dev_priv, pipe) {
1775		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1776			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1777
1778		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1779			if (IS_IVYBRIDGE(dev_priv))
1780				ivb_pipe_crc_irq_handler(dev_priv, pipe);
1781			else
1782				hsw_pipe_crc_irq_handler(dev_priv, pipe);
1783		}
1784	}
1785
1786	I915_WRITE(GEN7_ERR_INT, err_int);
1787}
1788
1789static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1790{
1791	u32 serr_int = I915_READ(SERR_INT);
1792	enum pipe pipe;
1793
1794	if (serr_int & SERR_INT_POISON)
1795		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1796
1797	for_each_pipe(dev_priv, pipe)
1798		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
1799			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1800
1801	I915_WRITE(SERR_INT, serr_int);
1802}
1803
1804static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1805{
1806	enum pipe pipe;
1807	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1808
1809	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1810
1811	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1812		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1813			       SDE_AUDIO_POWER_SHIFT_CPT);
1814		drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
1815			port_name(port));
1816	}
1817
1818	if (pch_iir & SDE_AUX_MASK_CPT)
1819		dp_aux_irq_handler(dev_priv);
1820
1821	if (pch_iir & SDE_GMBUS_CPT)
1822		gmbus_irq_handler(dev_priv);
1823
1824	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1825		drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
1826
1827	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1828		drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
1829
1830	if (pch_iir & SDE_FDI_MASK_CPT) {
1831		for_each_pipe(dev_priv, pipe)
1832			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
1833				pipe_name(pipe),
1834				I915_READ(FDI_RX_IIR(pipe)));
1835	}
1836
1837	if (pch_iir & SDE_ERROR_CPT)
1838		cpt_serr_int_handler(dev_priv);
1839}
1840
1841static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1842{
1843	u32 ddi_hotplug_trigger, tc_hotplug_trigger;
1844	u32 pin_mask = 0, long_mask = 0;
1845
1846	if (HAS_PCH_TGP(dev_priv)) {
1847		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
1848		tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
1849	} else if (HAS_PCH_JSP(dev_priv)) {
1850		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
1851		tc_hotplug_trigger = 0;
1852	} else if (HAS_PCH_MCC(dev_priv)) {
1853		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
1854		tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(PORT_TC1);
1855	} else {
1856		drm_WARN(&dev_priv->drm, !HAS_PCH_ICP(dev_priv),
1857			 "Unrecognized PCH type 0x%x\n",
1858			 INTEL_PCH_TYPE(dev_priv));
1859
1860		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
1861		tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
1862	}
1863
1864	if (ddi_hotplug_trigger) {
1865		u32 dig_hotplug_reg;
1866
1867		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
1868		I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
1869
1870		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1871				   ddi_hotplug_trigger, dig_hotplug_reg,
1872				   dev_priv->hotplug.pch_hpd,
1873				   icp_ddi_port_hotplug_long_detect);
1874	}
1875
1876	if (tc_hotplug_trigger) {
1877		u32 dig_hotplug_reg;
1878
1879		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
1880		I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
1881
1882		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1883				   tc_hotplug_trigger, dig_hotplug_reg,
1884				   dev_priv->hotplug.pch_hpd,
1885				   icp_tc_port_hotplug_long_detect);
1886	}
1887
1888	if (pin_mask)
1889		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1890
1891	if (pch_iir & SDE_GMBUS_ICP)
1892		gmbus_irq_handler(dev_priv);
1893}
1894
1895static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1896{
1897	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1898		~SDE_PORTE_HOTPLUG_SPT;
1899	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1900	u32 pin_mask = 0, long_mask = 0;
1901
1902	if (hotplug_trigger) {
1903		u32 dig_hotplug_reg;
1904
1905		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1906		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1907
1908		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1909				   hotplug_trigger, dig_hotplug_reg,
1910				   dev_priv->hotplug.pch_hpd,
1911				   spt_port_hotplug_long_detect);
1912	}
1913
1914	if (hotplug2_trigger) {
1915		u32 dig_hotplug_reg;
1916
1917		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
1918		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
1919
1920		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1921				   hotplug2_trigger, dig_hotplug_reg,
1922				   dev_priv->hotplug.pch_hpd,
1923				   spt_port_hotplug2_long_detect);
1924	}
1925
1926	if (pin_mask)
1927		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1928
1929	if (pch_iir & SDE_GMBUS_CPT)
1930		gmbus_irq_handler(dev_priv);
1931}
1932
1933static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
1934				u32 hotplug_trigger)
1935{
1936	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1937
1938	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
1939	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
1940
1941	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1942			   hotplug_trigger, dig_hotplug_reg,
1943			   dev_priv->hotplug.hpd,
1944			   ilk_port_hotplug_long_detect);
1945
1946	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1947}
1948
1949static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
1950				    u32 de_iir)
1951{
1952	enum pipe pipe;
1953	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
1954
1955	if (hotplug_trigger)
1956		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
1957
1958	if (de_iir & DE_AUX_CHANNEL_A)
1959		dp_aux_irq_handler(dev_priv);
1960
1961	if (de_iir & DE_GSE)
1962		intel_opregion_asle_intr(dev_priv);
1963
1964	if (de_iir & DE_POISON)
1965		drm_err(&dev_priv->drm, "Poison interrupt\n");
1966
1967	for_each_pipe(dev_priv, pipe) {
1968		if (de_iir & DE_PIPE_VBLANK(pipe))
1969			intel_handle_vblank(dev_priv, pipe);
1970
1971		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1972			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1973
1974		if (de_iir & DE_PIPE_CRC_DONE(pipe))
1975			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1976	}
1977
1978	/* check event from PCH */
1979	if (de_iir & DE_PCH_EVENT) {
1980		u32 pch_iir = I915_READ(SDEIIR);
1981
1982		if (HAS_PCH_CPT(dev_priv))
1983			cpt_irq_handler(dev_priv, pch_iir);
1984		else
1985			ibx_irq_handler(dev_priv, pch_iir);
1986
1987		/* should clear PCH hotplug event before clear CPU irq */
1988		I915_WRITE(SDEIIR, pch_iir);
1989	}
1990
1991	if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
1992		gen5_rps_irq_handler(&dev_priv->gt.rps);
1993}
1994
1995static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
1996				    u32 de_iir)
1997{
1998	enum pipe pipe;
1999	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2000
2001	if (hotplug_trigger)
2002		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2003
2004	if (de_iir & DE_ERR_INT_IVB)
2005		ivb_err_int_handler(dev_priv);
2006
2007	if (de_iir & DE_EDP_PSR_INT_HSW) {
2008		u32 psr_iir = I915_READ(EDP_PSR_IIR);
2009
2010		intel_psr_irq_handler(dev_priv, psr_iir);
2011		I915_WRITE(EDP_PSR_IIR, psr_iir);
2012	}
2013
2014	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2015		dp_aux_irq_handler(dev_priv);
2016
2017	if (de_iir & DE_GSE_IVB)
2018		intel_opregion_asle_intr(dev_priv);
2019
2020	for_each_pipe(dev_priv, pipe) {
2021		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2022			intel_handle_vblank(dev_priv, pipe);
2023	}
2024
2025	/* check event from PCH */
2026	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2027		u32 pch_iir = I915_READ(SDEIIR);
2028
2029		cpt_irq_handler(dev_priv, pch_iir);
2030
2031		/* clear PCH hotplug event before clear CPU irq */
2032		I915_WRITE(SDEIIR, pch_iir);
2033	}
2034}
2035
2036/*
2037 * To handle irqs with the minimum potential races with fresh interrupts, we:
2038 * 1 - Disable Master Interrupt Control.
2039 * 2 - Find the source(s) of the interrupt.
2040 * 3 - Clear the Interrupt Identity bits (IIR).
2041 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2042 * 5 - Re-enable Master Interrupt Control.
2043 */
2044static irqreturn_t ilk_irq_handler(int irq, void *arg)
2045{
2046	struct drm_i915_private *i915 = arg;
2047	void __iomem * const regs = i915->uncore.regs;
2048	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2049	irqreturn_t ret = IRQ_NONE;
2050
2051	if (unlikely(!intel_irqs_enabled(i915)))
2052		return IRQ_NONE;
2053
2054	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2055	disable_rpm_wakeref_asserts(&i915->runtime_pm);
2056
2057	/* disable master interrupt before clearing iir  */
2058	de_ier = raw_reg_read(regs, DEIER);
2059	raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2060
2061	/* Disable south interrupts. We'll only write to SDEIIR once, so further
2062	 * interrupts will will be stored on its back queue, and then we'll be
2063	 * able to process them after we restore SDEIER (as soon as we restore
2064	 * it, we'll get an interrupt if SDEIIR still has something to process
2065	 * due to its back queue). */
2066	if (!HAS_PCH_NOP(i915)) {
2067		sde_ier = raw_reg_read(regs, SDEIER);
2068		raw_reg_write(regs, SDEIER, 0);
2069	}
2070
2071	/* Find, clear, then process each source of interrupt */
2072
2073	gt_iir = raw_reg_read(regs, GTIIR);
2074	if (gt_iir) {
2075		raw_reg_write(regs, GTIIR, gt_iir);
2076		if (INTEL_GEN(i915) >= 6)
2077			gen6_gt_irq_handler(&i915->gt, gt_iir);
2078		else
2079			gen5_gt_irq_handler(&i915->gt, gt_iir);
2080		ret = IRQ_HANDLED;
2081	}
2082
2083	de_iir = raw_reg_read(regs, DEIIR);
2084	if (de_iir) {
2085		raw_reg_write(regs, DEIIR, de_iir);
2086		if (INTEL_GEN(i915) >= 7)
2087			ivb_display_irq_handler(i915, de_iir);
2088		else
2089			ilk_display_irq_handler(i915, de_iir);
2090		ret = IRQ_HANDLED;
2091	}
2092
2093	if (INTEL_GEN(i915) >= 6) {
2094		u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
2095		if (pm_iir) {
2096			raw_reg_write(regs, GEN6_PMIIR, pm_iir);
2097			gen6_rps_irq_handler(&i915->gt.rps, pm_iir);
2098			ret = IRQ_HANDLED;
2099		}
2100	}
2101
2102	raw_reg_write(regs, DEIER, de_ier);
2103	if (sde_ier)
2104		raw_reg_write(regs, SDEIER, sde_ier);
2105
2106	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2107	enable_rpm_wakeref_asserts(&i915->runtime_pm);
2108
2109	return ret;
2110}
2111
2112static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2113				u32 hotplug_trigger)
2114{
2115	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2116
2117	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2118	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2119
2120	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2121			   hotplug_trigger, dig_hotplug_reg,
2122			   dev_priv->hotplug.hpd,
2123			   bxt_port_hotplug_long_detect);
2124
2125	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2126}
2127
2128static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2129{
2130	u32 pin_mask = 0, long_mask = 0;
2131	u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2132	u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2133
2134	if (trigger_tc) {
2135		u32 dig_hotplug_reg;
2136
2137		dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
2138		I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2139
2140		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2141				   trigger_tc, dig_hotplug_reg,
2142				   dev_priv->hotplug.hpd,
2143				   gen11_port_hotplug_long_detect);
2144	}
2145
2146	if (trigger_tbt) {
2147		u32 dig_hotplug_reg;
2148
2149		dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
2150		I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2151
2152		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2153				   trigger_tbt, dig_hotplug_reg,
2154				   dev_priv->hotplug.hpd,
2155				   gen11_port_hotplug_long_detect);
2156	}
2157
2158	if (pin_mask)
2159		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2160	else
2161		drm_err(&dev_priv->drm,
2162			"Unexpected DE HPD interrupt 0x%08x\n", iir);
2163}
2164
2165static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
2166{
2167	u32 mask;
2168
2169	if (INTEL_GEN(dev_priv) >= 12)
2170		return TGL_DE_PORT_AUX_DDIA |
2171			TGL_DE_PORT_AUX_DDIB |
2172			TGL_DE_PORT_AUX_DDIC |
2173			TGL_DE_PORT_AUX_USBC1 |
2174			TGL_DE_PORT_AUX_USBC2 |
2175			TGL_DE_PORT_AUX_USBC3 |
2176			TGL_DE_PORT_AUX_USBC4 |
2177			TGL_DE_PORT_AUX_USBC5 |
2178			TGL_DE_PORT_AUX_USBC6;
2179
2180
2181	mask = GEN8_AUX_CHANNEL_A;
2182	if (INTEL_GEN(dev_priv) >= 9)
2183		mask |= GEN9_AUX_CHANNEL_B |
2184			GEN9_AUX_CHANNEL_C |
2185			GEN9_AUX_CHANNEL_D;
2186
2187	if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11))
2188		mask |= CNL_AUX_CHANNEL_F;
2189
2190	if (IS_GEN(dev_priv, 11))
2191		mask |= ICL_AUX_CHANNEL_E;
2192
2193	return mask;
2194}
2195
2196static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
2197{
2198	if (IS_ROCKETLAKE(dev_priv))
2199		return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
2200	else if (INTEL_GEN(dev_priv) >= 11)
2201		return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
2202	else if (INTEL_GEN(dev_priv) >= 9)
2203		return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2204	else
2205		return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2206}
2207
2208static void
2209gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2210{
2211	bool found = false;
2212
2213	if (iir & GEN8_DE_MISC_GSE) {
2214		intel_opregion_asle_intr(dev_priv);
2215		found = true;
2216	}
2217
2218	if (iir & GEN8_DE_EDP_PSR) {
2219		u32 psr_iir;
2220		i915_reg_t iir_reg;
2221
2222		if (INTEL_GEN(dev_priv) >= 12)
2223			iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder);
2224		else
2225			iir_reg = EDP_PSR_IIR;
2226
2227		psr_iir = I915_READ(iir_reg);
2228		I915_WRITE(iir_reg, psr_iir);
2229
2230		if (psr_iir)
2231			found = true;
2232
2233		intel_psr_irq_handler(dev_priv, psr_iir);
2234	}
2235
2236	if (!found)
2237		drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
2238}
2239
2240static irqreturn_t
2241gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2242{
2243	irqreturn_t ret = IRQ_NONE;
2244	u32 iir;
2245	enum pipe pipe;
2246
2247	if (master_ctl & GEN8_DE_MISC_IRQ) {
2248		iir = I915_READ(GEN8_DE_MISC_IIR);
2249		if (iir) {
2250			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2251			ret = IRQ_HANDLED;
2252			gen8_de_misc_irq_handler(dev_priv, iir);
2253		} else {
2254			drm_err(&dev_priv->drm,
2255				"The master control interrupt lied (DE MISC)!\n");
2256		}
2257	}
2258
2259	if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2260		iir = I915_READ(GEN11_DE_HPD_IIR);
2261		if (iir) {
2262			I915_WRITE(GEN11_DE_HPD_IIR, iir);
2263			ret = IRQ_HANDLED;
2264			gen11_hpd_irq_handler(dev_priv, iir);
2265		} else {
2266			drm_err(&dev_priv->drm,
2267				"The master control interrupt lied, (DE HPD)!\n");
2268		}
2269	}
2270
2271	if (master_ctl & GEN8_DE_PORT_IRQ) {
2272		iir = I915_READ(GEN8_DE_PORT_IIR);
2273		if (iir) {
2274			u32 tmp_mask;
2275			bool found = false;
2276
2277			I915_WRITE(GEN8_DE_PORT_IIR, iir);
2278			ret = IRQ_HANDLED;
2279
2280			if (iir & gen8_de_port_aux_mask(dev_priv)) {
2281				dp_aux_irq_handler(dev_priv);
2282				found = true;
2283			}
2284
2285			if (IS_GEN9_LP(dev_priv)) {
2286				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2287				if (tmp_mask) {
2288					bxt_hpd_irq_handler(dev_priv, tmp_mask);
2289					found = true;
2290				}
2291			} else if (IS_BROADWELL(dev_priv)) {
2292				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2293				if (tmp_mask) {
2294					ilk_hpd_irq_handler(dev_priv, tmp_mask);
2295					found = true;
2296				}
2297			}
2298
2299			if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2300				gmbus_irq_handler(dev_priv);
2301				found = true;
2302			}
2303
2304			if (!found)
2305				drm_err(&dev_priv->drm,
2306					"Unexpected DE Port interrupt\n");
2307		}
2308		else
2309			drm_err(&dev_priv->drm,
2310				"The master control interrupt lied (DE PORT)!\n");
2311	}
2312
2313	for_each_pipe(dev_priv, pipe) {
2314		u32 fault_errors;
2315
2316		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2317			continue;
2318
2319		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2320		if (!iir) {
2321			drm_err(&dev_priv->drm,
2322				"The master control interrupt lied (DE PIPE)!\n");
2323			continue;
2324		}
2325
2326		ret = IRQ_HANDLED;
2327		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2328
2329		if (iir & GEN8_PIPE_VBLANK)
2330			intel_handle_vblank(dev_priv, pipe);
2331
2332		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2333			hsw_pipe_crc_irq_handler(dev_priv, pipe);
2334
2335		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2336			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2337
2338		fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2339		if (fault_errors)
2340			drm_err(&dev_priv->drm,
2341				"Fault errors on pipe %c: 0x%08x\n",
2342				pipe_name(pipe),
2343				fault_errors);
2344	}
2345
2346	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2347	    master_ctl & GEN8_DE_PCH_IRQ) {
2348		/*
2349		 * FIXME(BDW): Assume for now that the new interrupt handling
2350		 * scheme also closed the SDE interrupt handling race we've seen
2351		 * on older pch-split platforms. But this needs testing.
2352		 */
2353		iir = I915_READ(SDEIIR);
2354		if (iir) {
2355			I915_WRITE(SDEIIR, iir);
2356			ret = IRQ_HANDLED;
2357
2358			if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2359				icp_irq_handler(dev_priv, iir);
2360			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2361				spt_irq_handler(dev_priv, iir);
2362			else
2363				cpt_irq_handler(dev_priv, iir);
2364		} else {
2365			/*
2366			 * Like on previous PCH there seems to be something
2367			 * fishy going on with forwarding PCH interrupts.
2368			 */
2369			drm_dbg(&dev_priv->drm,
2370				"The master control interrupt lied (SDE)!\n");
2371		}
2372	}
2373
2374	return ret;
2375}
2376
2377static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2378{
2379	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2380
2381	/*
2382	 * Now with master disabled, get a sample of level indications
2383	 * for this interrupt. Indications will be cleared on related acks.
2384	 * New indications can and will light up during processing,
2385	 * and will generate new interrupt after enabling master.
2386	 */
2387	return raw_reg_read(regs, GEN8_MASTER_IRQ);
2388}
2389
2390static inline void gen8_master_intr_enable(void __iomem * const regs)
2391{
2392	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2393}
2394
2395static irqreturn_t gen8_irq_handler(int irq, void *arg)
2396{
2397	struct drm_i915_private *dev_priv = arg;
2398	void __iomem * const regs = dev_priv->uncore.regs;
2399	u32 master_ctl;
2400
2401	if (!intel_irqs_enabled(dev_priv))
2402		return IRQ_NONE;
2403
2404	master_ctl = gen8_master_intr_disable(regs);
2405	if (!master_ctl) {
2406		gen8_master_intr_enable(regs);
2407		return IRQ_NONE;
2408	}
2409
2410	/* Find, queue (onto bottom-halves), then clear each source */
2411	gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
2412
2413	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2414	if (master_ctl & ~GEN8_GT_IRQS) {
2415		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2416		gen8_de_irq_handler(dev_priv, master_ctl);
2417		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2418	}
2419
2420	gen8_master_intr_enable(regs);
2421
2422	return IRQ_HANDLED;
2423}
2424
2425static u32
2426gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
2427{
2428	void __iomem * const regs = gt->uncore->regs;
2429	u32 iir;
2430
2431	if (!(master_ctl & GEN11_GU_MISC_IRQ))
2432		return 0;
2433
2434	iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
2435	if (likely(iir))
2436		raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2437
2438	return iir;
2439}
2440
2441static void
2442gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
2443{
2444	if (iir & GEN11_GU_MISC_GSE)
2445		intel_opregion_asle_intr(gt->i915);
2446}
2447
2448static inline u32 gen11_master_intr_disable(void __iomem * const regs)
2449{
2450	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
2451
2452	/*
2453	 * Now with master disabled, get a sample of level indications
2454	 * for this interrupt. Indications will be cleared on related acks.
2455	 * New indications can and will light up during processing,
2456	 * and will generate new interrupt after enabling master.
2457	 */
2458	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2459}
2460
2461static inline void gen11_master_intr_enable(void __iomem * const regs)
2462{
2463	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
2464}
2465
2466static void
2467gen11_display_irq_handler(struct drm_i915_private *i915)
2468{
2469	void __iomem * const regs = i915->uncore.regs;
2470	const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
2471
2472	disable_rpm_wakeref_asserts(&i915->runtime_pm);
2473	/*
2474	 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
2475	 * for the display related bits.
2476	 */
2477	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
2478	gen8_de_irq_handler(i915, disp_ctl);
2479	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
2480		      GEN11_DISPLAY_IRQ_ENABLE);
2481
2482	enable_rpm_wakeref_asserts(&i915->runtime_pm);
2483}
2484
2485static __always_inline irqreturn_t
2486__gen11_irq_handler(struct drm_i915_private * const i915,
2487		    u32 (*intr_disable)(void __iomem * const regs),
2488		    void (*intr_enable)(void __iomem * const regs))
2489{
2490	void __iomem * const regs = i915->uncore.regs;
2491	struct intel_gt *gt = &i915->gt;
2492	u32 master_ctl;
2493	u32 gu_misc_iir;
2494
2495	if (!intel_irqs_enabled(i915))
2496		return IRQ_NONE;
2497
2498	master_ctl = intr_disable(regs);
2499	if (!master_ctl) {
2500		intr_enable(regs);
2501		return IRQ_NONE;
2502	}
2503
2504	/* Find, queue (onto bottom-halves), then clear each source */
2505	gen11_gt_irq_handler(gt, master_ctl);
2506
2507	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2508	if (master_ctl & GEN11_DISPLAY_IRQ)
2509		gen11_display_irq_handler(i915);
2510
2511	gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
2512
2513	intr_enable(regs);
2514
2515	gen11_gu_misc_irq_handler(gt, gu_misc_iir);
2516
2517	return IRQ_HANDLED;
2518}
2519
2520static irqreturn_t gen11_irq_handler(int irq, void *arg)
2521{
2522	return __gen11_irq_handler(arg,
2523				   gen11_master_intr_disable,
2524				   gen11_master_intr_enable);
2525}
2526
2527static u32 dg1_master_intr_disable_and_ack(void __iomem * const regs)
2528{
2529	u32 val;
2530
2531	/* First disable interrupts */
2532	raw_reg_write(regs, DG1_MSTR_UNIT_INTR, 0);
2533
2534	/* Get the indication levels and ack the master unit */
2535	val = raw_reg_read(regs, DG1_MSTR_UNIT_INTR);
2536	if (unlikely(!val))
2537		return 0;
2538
2539	raw_reg_write(regs, DG1_MSTR_UNIT_INTR, val);
2540
2541	/*
2542	 * Now with master disabled, get a sample of level indications
2543	 * for this interrupt and ack them right away - we keep GEN11_MASTER_IRQ
2544	 * out as this bit doesn't exist anymore for DG1
2545	 */
2546	val = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ) & ~GEN11_MASTER_IRQ;
2547	if (unlikely(!val))
2548		return 0;
2549
2550	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, val);
2551
2552	return val;
2553}
2554
2555static inline void dg1_master_intr_enable(void __iomem * const regs)
2556{
2557	raw_reg_write(regs, DG1_MSTR_UNIT_INTR, DG1_MSTR_IRQ);
2558}
2559
2560static irqreturn_t dg1_irq_handler(int irq, void *arg)
2561{
2562	return __gen11_irq_handler(arg,
2563				   dg1_master_intr_disable_and_ack,
2564				   dg1_master_intr_enable);
2565}
2566
2567/* Called from drm generic code, passed 'crtc' which
2568 * we use as a pipe index
2569 */
2570int i8xx_enable_vblank(struct drm_crtc *crtc)
2571{
2572	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2573	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2574	unsigned long irqflags;
2575
2576	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2577	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2578	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2579
2580	return 0;
2581}
2582
2583int i915gm_enable_vblank(struct drm_crtc *crtc)
2584{
2585	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2586
2587	/*
2588	 * Vblank interrupts fail to wake the device up from C2+.
2589	 * Disabling render clock gating during C-states avoids
2590	 * the problem. There is a small power cost so we do this
2591	 * only when vblank interrupts are actually enabled.
2592	 */
2593	if (dev_priv->vblank_enabled++ == 0)
2594		I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2595
2596	return i8xx_enable_vblank(crtc);
2597}
2598
2599int i965_enable_vblank(struct drm_crtc *crtc)
2600{
2601	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2602	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2603	unsigned long irqflags;
2604
2605	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2606	i915_enable_pipestat(dev_priv, pipe,
2607			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2608	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2609
2610	return 0;
2611}
2612
2613int ilk_enable_vblank(struct drm_crtc *crtc)
2614{
2615	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2616	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2617	unsigned long irqflags;
2618	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2619		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2620
2621	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2622	ilk_enable_display_irq(dev_priv, bit);
2623	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2624
2625	/* Even though there is no DMC, frame counter can get stuck when
2626	 * PSR is active as no frames are generated.
2627	 */
2628	if (HAS_PSR(dev_priv))
2629		drm_crtc_vblank_restore(crtc);
2630
2631	return 0;
2632}
2633
2634int bdw_enable_vblank(struct drm_crtc *crtc)
2635{
2636	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2637	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2638	unsigned long irqflags;
2639
2640	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2641	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2642	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2643
2644	/* Even if there is no DMC, frame counter can get stuck when
2645	 * PSR is active as no frames are generated, so check only for PSR.
2646	 */
2647	if (HAS_PSR(dev_priv))
2648		drm_crtc_vblank_restore(crtc);
2649
2650	return 0;
2651}
2652
2653/* Called from drm generic code, passed 'crtc' which
2654 * we use as a pipe index
2655 */
2656void i8xx_disable_vblank(struct drm_crtc *crtc)
2657{
2658	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2659	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2660	unsigned long irqflags;
2661
2662	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2663	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2664	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2665}
2666
2667void i915gm_disable_vblank(struct drm_crtc *crtc)
2668{
2669	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2670
2671	i8xx_disable_vblank(crtc);
2672
2673	if (--dev_priv->vblank_enabled == 0)
2674		I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2675}
2676
2677void i965_disable_vblank(struct drm_crtc *crtc)
2678{
2679	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2680	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2681	unsigned long irqflags;
2682
2683	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2684	i915_disable_pipestat(dev_priv, pipe,
2685			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2686	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2687}
2688
2689void ilk_disable_vblank(struct drm_crtc *crtc)
2690{
2691	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2692	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2693	unsigned long irqflags;
2694	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2695		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2696
2697	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2698	ilk_disable_display_irq(dev_priv, bit);
2699	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2700}
2701
2702void bdw_disable_vblank(struct drm_crtc *crtc)
2703{
2704	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2705	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2706	unsigned long irqflags;
2707
2708	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2709	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2710	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2711}
2712
2713static void ibx_irq_reset(struct drm_i915_private *dev_priv)
2714{
2715	struct intel_uncore *uncore = &dev_priv->uncore;
2716
2717	if (HAS_PCH_NOP(dev_priv))
2718		return;
2719
2720	GEN3_IRQ_RESET(uncore, SDE);
2721
2722	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2723		I915_WRITE(SERR_INT, 0xffffffff);
2724}
2725
2726/*
2727 * SDEIER is also touched by the interrupt handler to work around missed PCH
2728 * interrupts. Hence we can't update it after the interrupt handler is enabled -
2729 * instead we unconditionally enable all PCH interrupt sources here, but then
2730 * only unmask them as needed with SDEIMR.
2731 *
2732 * This function needs to be called before interrupts are enabled.
2733 */
2734static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
2735{
2736	if (HAS_PCH_NOP(dev_priv))
2737		return;
2738
2739	drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
2740	I915_WRITE(SDEIER, 0xffffffff);
2741	POSTING_READ(SDEIER);
2742}
2743
2744static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2745{
2746	struct intel_uncore *uncore = &dev_priv->uncore;
2747
2748	if (IS_CHERRYVIEW(dev_priv))
2749		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2750	else
2751		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
2752
2753	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
2754	intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2755
2756	i9xx_pipestat_irq_reset(dev_priv);
2757
2758	GEN3_IRQ_RESET(uncore, VLV_);
2759	dev_priv->irq_mask = ~0u;
2760}
2761
2762static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
2763{
2764	struct intel_uncore *uncore = &dev_priv->uncore;
2765
2766	u32 pipestat_mask;
2767	u32 enable_mask;
2768	enum pipe pipe;
2769
2770	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
2771
2772	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
2773	for_each_pipe(dev_priv, pipe)
2774		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
2775
2776	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
2777		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2778		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2779		I915_LPE_PIPE_A_INTERRUPT |
2780		I915_LPE_PIPE_B_INTERRUPT;
2781
2782	if (IS_CHERRYVIEW(dev_priv))
2783		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
2784			I915_LPE_PIPE_C_INTERRUPT;
2785
2786	drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
2787
2788	dev_priv->irq_mask = ~enable_mask;
2789
2790	GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
2791}
2792
2793/* drm_dma.h hooks
2794*/
2795static void ilk_irq_reset(struct drm_i915_private *dev_priv)
2796{
2797	struct intel_uncore *uncore = &dev_priv->uncore;
2798
2799	GEN3_IRQ_RESET(uncore, DE);
2800	if (IS_GEN(dev_priv, 7))
2801		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
2802
2803	if (IS_HASWELL(dev_priv)) {
2804		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2805		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2806	}
2807
2808	gen5_gt_irq_reset(&dev_priv->gt);
2809
2810	ibx_irq_reset(dev_priv);
2811}
2812
2813static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
2814{
2815	I915_WRITE(VLV_MASTER_IER, 0);
2816	POSTING_READ(VLV_MASTER_IER);
2817
2818	gen5_gt_irq_reset(&dev_priv->gt);
2819
2820	spin_lock_irq(&dev_priv->irq_lock);
2821	if (dev_priv->display_irqs_enabled)
2822		vlv_display_irq_reset(dev_priv);
2823	spin_unlock_irq(&dev_priv->irq_lock);
2824}
2825
2826static void gen8_irq_reset(struct drm_i915_private *dev_priv)
2827{
2828	struct intel_uncore *uncore = &dev_priv->uncore;
2829	enum pipe pipe;
2830
2831	gen8_master_intr_disable(dev_priv->uncore.regs);
2832
2833	gen8_gt_irq_reset(&dev_priv->gt);
2834
2835	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2836	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2837
2838	for_each_pipe(dev_priv, pipe)
2839		if (intel_display_power_is_enabled(dev_priv,
2840						   POWER_DOMAIN_PIPE(pipe)))
2841			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2842
2843	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
2844	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
2845	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2846
2847	if (HAS_PCH_SPLIT(dev_priv))
2848		ibx_irq_reset(dev_priv);
2849}
2850
2851static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
2852{
2853	struct intel_uncore *uncore = &dev_priv->uncore;
2854	enum pipe pipe;
2855	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
2856		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
2857
2858	intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
2859
2860	if (INTEL_GEN(dev_priv) >= 12) {
2861		enum transcoder trans;
2862
2863		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
2864			enum intel_display_power_domain domain;
2865
2866			domain = POWER_DOMAIN_TRANSCODER(trans);
2867			if (!intel_display_power_is_enabled(dev_priv, domain))
2868				continue;
2869
2870			intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
2871			intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
2872		}
2873	} else {
2874		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2875		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2876	}
2877
2878	for_each_pipe(dev_priv, pipe)
2879		if (intel_display_power_is_enabled(dev_priv,
2880						   POWER_DOMAIN_PIPE(pipe)))
2881			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2882
2883	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
2884	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
2885	GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
2886
2887	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2888		GEN3_IRQ_RESET(uncore, SDE);
2889
2890	/* Wa_14010685332:icl,jsl,ehl,tgl,rkl */
2891	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) {
2892		intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
2893				 SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
2894		intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
2895				 SBCLK_RUN_REFCLK_DIS, 0);
2896	}
2897}
2898
2899static void gen11_irq_reset(struct drm_i915_private *dev_priv)
2900{
2901	struct intel_uncore *uncore = &dev_priv->uncore;
2902
2903	if (HAS_MASTER_UNIT_IRQ(dev_priv))
2904		dg1_master_intr_disable_and_ack(dev_priv->uncore.regs);
2905	else
2906		gen11_master_intr_disable(dev_priv->uncore.regs);
2907
2908	gen11_gt_irq_reset(&dev_priv->gt);
2909	gen11_display_irq_reset(dev_priv);
2910
2911	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
2912	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2913}
2914
2915void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
2916				     u8 pipe_mask)
2917{
2918	struct intel_uncore *uncore = &dev_priv->uncore;
2919
2920	u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
2921	enum pipe pipe;
2922
2923	spin_lock_irq(&dev_priv->irq_lock);
2924
2925	if (!intel_irqs_enabled(dev_priv)) {
2926		spin_unlock_irq(&dev_priv->irq_lock);
2927		return;
2928	}
2929
2930	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2931		GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
2932				  dev_priv->de_irq_mask[pipe],
2933				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
2934
2935	spin_unlock_irq(&dev_priv->irq_lock);
2936}
2937
2938void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
2939				     u8 pipe_mask)
2940{
2941	struct intel_uncore *uncore = &dev_priv->uncore;
2942	enum pipe pipe;
2943
2944	spin_lock_irq(&dev_priv->irq_lock);
2945
2946	if (!intel_irqs_enabled(dev_priv)) {
2947		spin_unlock_irq(&dev_priv->irq_lock);
2948		return;
2949	}
2950
2951	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2952		GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2953
2954	spin_unlock_irq(&dev_priv->irq_lock);
2955
2956	/* make sure we're done processing display irqs */
2957	intel_synchronize_irq(dev_priv);
2958}
2959
2960static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
2961{
2962	struct intel_uncore *uncore = &dev_priv->uncore;
2963
2964	I915_WRITE(GEN8_MASTER_IRQ, 0);
2965	POSTING_READ(GEN8_MASTER_IRQ);
2966
2967	gen8_gt_irq_reset(&dev_priv->gt);
2968
2969	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2970
2971	spin_lock_irq(&dev_priv->irq_lock);
2972	if (dev_priv->display_irqs_enabled)
2973		vlv_display_irq_reset(dev_priv);
2974	spin_unlock_irq(&dev_priv->irq_lock);
2975}
2976
2977static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
2978				  const u32 hpd[HPD_NUM_PINS])
2979{
2980	struct intel_encoder *encoder;
2981	u32 enabled_irqs = 0;
2982
2983	for_each_intel_encoder(&dev_priv->drm, encoder)
2984		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
2985			enabled_irqs |= hpd[encoder->hpd_pin];
2986
2987	return enabled_irqs;
2988}
2989
2990static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
2991				  const u32 hpd[HPD_NUM_PINS])
2992{
2993	struct intel_encoder *encoder;
2994	u32 hotplug_irqs = 0;
2995
2996	for_each_intel_encoder(&dev_priv->drm, encoder)
2997		hotplug_irqs |= hpd[encoder->hpd_pin];
2998
2999	return hotplug_irqs;
3000}
3001
3002static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3003{
3004	u32 hotplug;
3005
3006	/*
3007	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3008	 * duration to 2ms (which is the minimum in the Display Port spec).
3009	 * The pulse duration bits are reserved on LPT+.
3010	 */
3011	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3012	hotplug &= ~(PORTB_PULSE_DURATION_MASK |
3013		     PORTC_PULSE_DURATION_MASK |
3014		     PORTD_PULSE_DURATION_MASK);
3015	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3016	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3017	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3018	/*
3019	 * When CPU and PCH are on the same package, port A
3020	 * HPD must be enabled in both north and south.
3021	 */
3022	if (HAS_PCH_LPT_LP(dev_priv))
3023		hotplug |= PORTA_HOTPLUG_ENABLE;
3024	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3025}
3026
3027static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3028{
3029	u32 hotplug_irqs, enabled_irqs;
3030
3031	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3032	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3033
3034	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3035
3036	ibx_hpd_detection_setup(dev_priv);
3037}
3038
3039static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv,
3040					u32 enable_mask)
3041{
3042	u32 hotplug;
3043
3044	hotplug = I915_READ(SHOTPLUG_CTL_DDI);
3045	hotplug |= enable_mask;
3046	I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
3047}
3048
3049static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv,
3050				       u32 enable_mask)
3051{
3052	u32 hotplug;
3053
3054	hotplug = I915_READ(SHOTPLUG_CTL_TC);
3055	hotplug |= enable_mask;
3056	I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
3057}
3058
3059static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
3060			      u32 ddi_enable_mask, u32 tc_enable_mask)
3061{
3062	u32 hotplug_irqs, enabled_irqs;
3063
3064	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3065	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3066
3067	if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
3068		I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3069
3070	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3071
3072	icp_ddi_hpd_detection_setup(dev_priv, ddi_enable_mask);
3073	if (tc_enable_mask)
3074		icp_tc_hpd_detection_setup(dev_priv, tc_enable_mask);
3075}
3076
3077/*
3078 * EHL doesn't need most of gen11_hpd_irq_setup, it's handling only the
3079 * equivalent of SDE.
3080 */
3081static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
3082{
3083	icp_hpd_irq_setup(dev_priv,
3084			  ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1));
3085}
3086
3087/*
3088 * JSP behaves exactly the same as MCC above except that port C is mapped to
3089 * the DDI-C pins instead of the TC1 pins.  This means we should follow TGP's
3090 * masks & tables rather than ICP's masks & tables.
3091 */
3092static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3093{
3094	icp_hpd_irq_setup(dev_priv,
3095			  TGP_DDI_HPD_ENABLE_MASK, 0);
3096}
3097
3098static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
3099{
3100	u32 hotplug;
3101
3102	hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
3103	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3104		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3105		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3106		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4) |
3107		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC5) |
3108		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC6);
3109	I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3110
3111	hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
3112	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3113		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3114		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3115		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4) |
3116		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC5) |
3117		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC6);
3118	I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
3119}
3120
3121static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3122{
3123	u32 hotplug_irqs, enabled_irqs;
3124	u32 val;
3125
3126	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3127	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3128
3129	val = I915_READ(GEN11_DE_HPD_IMR);
3130	val &= ~hotplug_irqs;
3131	val |= ~enabled_irqs & hotplug_irqs;
3132	I915_WRITE(GEN11_DE_HPD_IMR, val);
3133	POSTING_READ(GEN11_DE_HPD_IMR);
3134
3135	gen11_hpd_detection_setup(dev_priv);
3136
3137	if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
3138		icp_hpd_irq_setup(dev_priv,
3139				  TGP_DDI_HPD_ENABLE_MASK, TGP_TC_HPD_ENABLE_MASK);
3140	else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3141		icp_hpd_irq_setup(dev_priv,
3142				  ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE_MASK);
3143}
3144
3145static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3146{
3147	u32 val, hotplug;
3148
3149	/* Display WA #1179 WaHardHangonHotPlug: cnp */
3150	if (HAS_PCH_CNP(dev_priv)) {
3151		val = I915_READ(SOUTH_CHICKEN1);
3152		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
3153		val |= CHASSIS_CLK_REQ_DURATION(0xf);
3154		I915_WRITE(SOUTH_CHICKEN1, val);
3155	}
3156
3157	/* Enable digital hotplug on the PCH */
3158	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3159	hotplug |= PORTA_HOTPLUG_ENABLE |
3160		   PORTB_HOTPLUG_ENABLE |
3161		   PORTC_HOTPLUG_ENABLE |
3162		   PORTD_HOTPLUG_ENABLE;
3163	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3164
3165	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3166	hotplug |= PORTE_HOTPLUG_ENABLE;
3167	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3168}
3169
3170static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3171{
3172	u32 hotplug_irqs, enabled_irqs;
3173
3174	if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
3175		I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3176
3177	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3178	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3179
3180	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3181
3182	spt_hpd_detection_setup(dev_priv);
3183}
3184
3185static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3186{
3187	u32 hotplug;
3188
3189	/*
3190	 * Enable digital hotplug on the CPU, and configure the DP short pulse
3191	 * duration to 2ms (which is the minimum in the Display Port spec)
3192	 * The pulse duration bits are reserved on HSW+.
3193	 */
3194	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3195	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3196	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
3197		   DIGITAL_PORTA_PULSE_DURATION_2ms;
3198	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3199}
3200
3201static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3202{
3203	u32 hotplug_irqs, enabled_irqs;
3204
3205	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3206	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3207
3208	if (INTEL_GEN(dev_priv) >= 8)
3209		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3210	else
3211		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3212
3213	ilk_hpd_detection_setup(dev_priv);
3214
3215	ibx_hpd_irq_setup(dev_priv);
3216}
3217
3218static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
3219				      u32 enabled_irqs)
3220{
3221	u32 hotplug;
3222
3223	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3224	hotplug |= PORTA_HOTPLUG_ENABLE |
3225		   PORTB_HOTPLUG_ENABLE |
3226		   PORTC_HOTPLUG_ENABLE;
3227
3228	drm_dbg_kms(&dev_priv->drm,
3229		    "Invert bit setting: hp_ctl:%x hp_port:%x\n",
3230		    hotplug, enabled_irqs);
3231	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3232
3233	/*
3234	 * For BXT invert bit has to be set based on AOB design
3235	 * for HPD detection logic, update it based on VBT fields.
3236	 */
3237	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3238	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3239		hotplug |= BXT_DDIA_HPD_INVERT;
3240	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3241	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3242		hotplug |= BXT_DDIB_HPD_INVERT;
3243	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3244	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3245		hotplug |= BXT_DDIC_HPD_INVERT;
3246
3247	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3248}
3249
3250static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3251{
3252	__bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
3253}
3254
3255static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3256{
3257	u32 hotplug_irqs, enabled_irqs;
3258
3259	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3260	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3261
3262	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3263
3264	__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
3265}
3266
3267static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
3268{
3269	u32 mask;
3270
3271	if (HAS_PCH_NOP(dev_priv))
3272		return;
3273
3274	if (HAS_PCH_IBX(dev_priv))
3275		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3276	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3277		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3278	else
3279		mask = SDE_GMBUS_CPT;
3280
3281	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
3282	I915_WRITE(SDEIMR, ~mask);
3283
3284	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
3285	    HAS_PCH_LPT(dev_priv))
3286		ibx_hpd_detection_setup(dev_priv);
3287	else
3288		spt_hpd_detection_setup(dev_priv);
3289}
3290
3291static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3292{
3293	struct intel_uncore *uncore = &dev_priv->uncore;
3294	u32 display_mask, extra_mask;
3295
3296	if (INTEL_GEN(dev_priv) >= 7) {
3297		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3298				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3299		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3300			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3301			      DE_DP_A_HOTPLUG_IVB);
3302	} else {
3303		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3304				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3305				DE_PIPEA_CRC_DONE | DE_POISON);
3306		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3307			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3308			      DE_DP_A_HOTPLUG);
3309	}
3310
3311	if (IS_HASWELL(dev_priv)) {
3312		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3313		display_mask |= DE_EDP_PSR_INT_HSW;
3314	}
3315
3316	dev_priv->irq_mask = ~display_mask;
3317
3318	ibx_irq_pre_postinstall(dev_priv);
3319
3320	GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
3321		      display_mask | extra_mask);
3322
3323	gen5_gt_irq_postinstall(&dev_priv->gt);
3324
3325	ilk_hpd_detection_setup(dev_priv);
3326
3327	ibx_irq_postinstall(dev_priv);
3328
3329	if (IS_IRONLAKE_M(dev_priv)) {
3330		/* Enable PCU event interrupts
3331		 *
3332		 * spinlocking not required here for correctness since interrupt
3333		 * setup is guaranteed to run in single-threaded context. But we
3334		 * need it to make the assert_spin_locked happy. */
3335		spin_lock_irq(&dev_priv->irq_lock);
3336		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3337		spin_unlock_irq(&dev_priv->irq_lock);
3338	}
3339}
3340
3341void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3342{
3343	lockdep_assert_held(&dev_priv->irq_lock);
3344
3345	if (dev_priv->display_irqs_enabled)
3346		return;
3347
3348	dev_priv->display_irqs_enabled = true;
3349
3350	if (intel_irqs_enabled(dev_priv)) {
3351		vlv_display_irq_reset(dev_priv);
3352		vlv_display_irq_postinstall(dev_priv);
3353	}
3354}
3355
3356void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3357{
3358	lockdep_assert_held(&dev_priv->irq_lock);
3359
3360	if (!dev_priv->display_irqs_enabled)
3361		return;
3362
3363	dev_priv->display_irqs_enabled = false;
3364
3365	if (intel_irqs_enabled(dev_priv))
3366		vlv_display_irq_reset(dev_priv);
3367}
3368
3369
3370static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3371{
3372	gen5_gt_irq_postinstall(&dev_priv->gt);
3373
3374	spin_lock_irq(&dev_priv->irq_lock);
3375	if (dev_priv->display_irqs_enabled)
3376		vlv_display_irq_postinstall(dev_priv);
3377	spin_unlock_irq(&dev_priv->irq_lock);
3378
3379	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3380	POSTING_READ(VLV_MASTER_IER);
3381}
3382
3383static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3384{
3385	struct intel_uncore *uncore = &dev_priv->uncore;
3386
3387	u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
3388		GEN8_PIPE_CDCLK_CRC_DONE;
3389	u32 de_pipe_enables;
3390	u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
3391	u32 de_port_enables;
3392	u32 de_misc_masked = GEN8_DE_EDP_PSR;
3393	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3394		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3395	enum pipe pipe;
3396
3397	if (INTEL_GEN(dev_priv) <= 10)
3398		de_misc_masked |= GEN8_DE_MISC_GSE;
3399
3400	if (IS_GEN9_LP(dev_priv))
3401		de_port_masked |= BXT_DE_PORT_GMBUS;
3402
3403	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3404					   GEN8_PIPE_FIFO_UNDERRUN;
3405
3406	de_port_enables = de_port_masked;
3407	if (IS_GEN9_LP(dev_priv))
3408		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3409	else if (IS_BROADWELL(dev_priv))
3410		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3411
3412	if (INTEL_GEN(dev_priv) >= 12) {
3413		enum transcoder trans;
3414
3415		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3416			enum intel_display_power_domain domain;
3417
3418			domain = POWER_DOMAIN_TRANSCODER(trans);
3419			if (!intel_display_power_is_enabled(dev_priv, domain))
3420				continue;
3421
3422			gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
3423		}
3424	} else {
3425		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3426	}
3427
3428	for_each_pipe(dev_priv, pipe) {
3429		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3430
3431		if (intel_display_power_is_enabled(dev_priv,
3432				POWER_DOMAIN_PIPE(pipe)))
3433			GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3434					  dev_priv->de_irq_mask[pipe],
3435					  de_pipe_enables);
3436	}
3437
3438	GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3439	GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3440
3441	if (INTEL_GEN(dev_priv) >= 11) {
3442		u32 de_hpd_masked = 0;
3443		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
3444				     GEN11_DE_TBT_HOTPLUG_MASK;
3445
3446		GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
3447			      de_hpd_enables);
3448		gen11_hpd_detection_setup(dev_priv);
3449	} else if (IS_GEN9_LP(dev_priv)) {
3450		bxt_hpd_detection_setup(dev_priv);
3451	} else if (IS_BROADWELL(dev_priv)) {
3452		ilk_hpd_detection_setup(dev_priv);
3453	}
3454}
3455
3456static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3457{
3458	if (HAS_PCH_SPLIT(dev_priv))
3459		ibx_irq_pre_postinstall(dev_priv);
3460
3461	gen8_gt_irq_postinstall(&dev_priv->gt);
3462	gen8_de_irq_postinstall(dev_priv);
3463
3464	if (HAS_PCH_SPLIT(dev_priv))
3465		ibx_irq_postinstall(dev_priv);
3466
3467	gen8_master_intr_enable(dev_priv->uncore.regs);
3468}
3469
3470static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3471{
3472	u32 mask = SDE_GMBUS_ICP;
3473
3474	drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
3475	I915_WRITE(SDEIER, 0xffffffff);
3476	POSTING_READ(SDEIER);
3477
3478	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
3479	I915_WRITE(SDEIMR, ~mask);
3480
3481	if (HAS_PCH_TGP(dev_priv)) {
3482		icp_ddi_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK);
3483		icp_tc_hpd_detection_setup(dev_priv, TGP_TC_HPD_ENABLE_MASK);
3484	} else if (HAS_PCH_JSP(dev_priv)) {
3485		icp_ddi_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK);
3486	} else if (HAS_PCH_MCC(dev_priv)) {
3487		icp_ddi_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK);
3488		icp_tc_hpd_detection_setup(dev_priv, ICP_TC_HPD_ENABLE(PORT_TC1));
3489	} else {
3490		icp_ddi_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK);
3491		icp_tc_hpd_detection_setup(dev_priv, ICP_TC_HPD_ENABLE_MASK);
3492	}
3493}
3494
3495static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
3496{
3497	struct intel_uncore *uncore = &dev_priv->uncore;
3498	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3499
3500	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3501		icp_irq_postinstall(dev_priv);
3502
3503	gen11_gt_irq_postinstall(&dev_priv->gt);
3504	gen8_de_irq_postinstall(dev_priv);
3505
3506	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3507
3508	I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
3509
3510	if (HAS_MASTER_UNIT_IRQ(dev_priv)) {
3511		dg1_master_intr_enable(uncore->regs);
3512		POSTING_READ(DG1_MSTR_UNIT_INTR);
3513	} else {
3514		gen11_master_intr_enable(uncore->regs);
3515		POSTING_READ(GEN11_GFX_MSTR_IRQ);
3516	}
3517}
3518
3519static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3520{
3521	gen8_gt_irq_postinstall(&dev_priv->gt);
3522
3523	spin_lock_irq(&dev_priv->irq_lock);
3524	if (dev_priv->display_irqs_enabled)
3525		vlv_display_irq_postinstall(dev_priv);
3526	spin_unlock_irq(&dev_priv->irq_lock);
3527
3528	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3529	POSTING_READ(GEN8_MASTER_IRQ);
3530}
3531
3532static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
3533{
3534	struct intel_uncore *uncore = &dev_priv->uncore;
3535
3536	i9xx_pipestat_irq_reset(dev_priv);
3537
3538	GEN2_IRQ_RESET(uncore);
3539}
3540
3541static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
3542{
3543	struct intel_uncore *uncore = &dev_priv->uncore;
3544	u16 enable_mask;
3545
3546	intel_uncore_write16(uncore,
3547			     EMR,
3548			     ~(I915_ERROR_PAGE_TABLE |
3549			       I915_ERROR_MEMORY_REFRESH));
3550
3551	/* Unmask the interrupts that we always want on. */
3552	dev_priv->irq_mask =
3553		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3554		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3555		  I915_MASTER_ERROR_INTERRUPT);
3556
3557	enable_mask =
3558		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3559		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3560		I915_MASTER_ERROR_INTERRUPT |
3561		I915_USER_INTERRUPT;
3562
3563	GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
3564
3565	/* Interrupt setup is already guaranteed to be single-threaded, this is
3566	 * just to make the assert_spin_locked check happy. */
3567	spin_lock_irq(&dev_priv->irq_lock);
3568	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3569	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3570	spin_unlock_irq(&dev_priv->irq_lock);
3571}
3572
3573static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3574			       u16 *eir, u16 *eir_stuck)
3575{
3576	struct intel_uncore *uncore = &i915->uncore;
3577	u16 emr;
3578
3579	*eir = intel_uncore_read16(uncore, EIR);
3580
3581	if (*eir)
3582		intel_uncore_write16(uncore, EIR, *eir);
3583
3584	*eir_stuck = intel_uncore_read16(uncore, EIR);
3585	if (*eir_stuck == 0)
3586		return;
3587
3588	/*
3589	 * Toggle all EMR bits to make sure we get an edge
3590	 * in the ISR master error bit if we don't clear
3591	 * all the EIR bits. Otherwise the edge triggered
3592	 * IIR on i965/g4x wouldn't notice that an interrupt
3593	 * is still pending. Also some EIR bits can't be
3594	 * cleared except by handling the underlying error
3595	 * (or by a GPU reset) so we mask any bit that
3596	 * remains set.
3597	 */
3598	emr = intel_uncore_read16(uncore, EMR);
3599	intel_uncore_write16(uncore, EMR, 0xffff);
3600	intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3601}
3602
3603static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
3604				   u16 eir, u16 eir_stuck)
3605{
3606	DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
3607
3608	if (eir_stuck)
3609		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
3610			eir_stuck);
3611}
3612
3613static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
3614			       u32 *eir, u32 *eir_stuck)
3615{
3616	u32 emr;
3617
3618	*eir = I915_READ(EIR);
3619
3620	I915_WRITE(EIR, *eir);
3621
3622	*eir_stuck = I915_READ(EIR);
3623	if (*eir_stuck == 0)
3624		return;
3625
3626	/*
3627	 * Toggle all EMR bits to make sure we get an edge
3628	 * in the ISR master error bit if we don't clear
3629	 * all the EIR bits. Otherwise the edge triggered
3630	 * IIR on i965/g4x wouldn't notice that an interrupt
3631	 * is still pending. Also some EIR bits can't be
3632	 * cleared except by handling the underlying error
3633	 * (or by a GPU reset) so we mask any bit that
3634	 * remains set.
3635	 */
3636	emr = I915_READ(EMR);
3637	I915_WRITE(EMR, 0xffffffff);
3638	I915_WRITE(EMR, emr | *eir_stuck);
3639}
3640
3641static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
3642				   u32 eir, u32 eir_stuck)
3643{
3644	DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
3645
3646	if (eir_stuck)
3647		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
3648			eir_stuck);
3649}
3650
3651static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3652{
3653	struct drm_i915_private *dev_priv = arg;
3654	irqreturn_t ret = IRQ_NONE;
3655
3656	if (!intel_irqs_enabled(dev_priv))
3657		return IRQ_NONE;
3658
3659	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3660	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3661
3662	do {
3663		u32 pipe_stats[I915_MAX_PIPES] = {};
3664		u16 eir = 0, eir_stuck = 0;
3665		u16 iir;
3666
3667		iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
3668		if (iir == 0)
3669			break;
3670
3671		ret = IRQ_HANDLED;
3672
3673		/* Call regardless, as some status bits might not be
3674		 * signalled in iir */
3675		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3676
3677		if (iir & I915_MASTER_ERROR_INTERRUPT)
3678			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3679
3680		intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
3681
3682		if (iir & I915_USER_INTERRUPT)
3683			intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
3684
3685		if (iir & I915_MASTER_ERROR_INTERRUPT)
3686			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
3687
3688		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3689	} while (0);
3690
3691	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3692
3693	return ret;
3694}
3695
3696static void i915_irq_reset(struct drm_i915_private *dev_priv)
3697{
3698	struct intel_uncore *uncore = &dev_priv->uncore;
3699
3700	if (I915_HAS_HOTPLUG(dev_priv)) {
3701		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3702		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3703	}
3704
3705	i9xx_pipestat_irq_reset(dev_priv);
3706
3707	GEN3_IRQ_RESET(uncore, GEN2_);
3708}
3709
3710static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
3711{
3712	struct intel_uncore *uncore = &dev_priv->uncore;
3713	u32 enable_mask;
3714
3715	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
3716			  I915_ERROR_MEMORY_REFRESH));
3717
3718	/* Unmask the interrupts that we always want on. */
3719	dev_priv->irq_mask =
3720		~(I915_ASLE_INTERRUPT |
3721		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3722		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3723		  I915_MASTER_ERROR_INTERRUPT);
3724
3725	enable_mask =
3726		I915_ASLE_INTERRUPT |
3727		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3728		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3729		I915_MASTER_ERROR_INTERRUPT |
3730		I915_USER_INTERRUPT;
3731
3732	if (I915_HAS_HOTPLUG(dev_priv)) {
3733		/* Enable in IER... */
3734		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3735		/* and unmask in IMR */
3736		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3737	}
3738
3739	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
3740
3741	/* Interrupt setup is already guaranteed to be single-threaded, this is
3742	 * just to make the assert_spin_locked check happy. */
3743	spin_lock_irq(&dev_priv->irq_lock);
3744	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3745	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3746	spin_unlock_irq(&dev_priv->irq_lock);
3747
3748	i915_enable_asle_pipestat(dev_priv);
3749}
3750
3751static irqreturn_t i915_irq_handler(int irq, void *arg)
3752{
3753	struct drm_i915_private *dev_priv = arg;
3754	irqreturn_t ret = IRQ_NONE;
3755
3756	if (!intel_irqs_enabled(dev_priv))
3757		return IRQ_NONE;
3758
3759	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3760	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3761
3762	do {
3763		u32 pipe_stats[I915_MAX_PIPES] = {};
3764		u32 eir = 0, eir_stuck = 0;
3765		u32 hotplug_status = 0;
3766		u32 iir;
3767
3768		iir = I915_READ(GEN2_IIR);
3769		if (iir == 0)
3770			break;
3771
3772		ret = IRQ_HANDLED;
3773
3774		if (I915_HAS_HOTPLUG(dev_priv) &&
3775		    iir & I915_DISPLAY_PORT_INTERRUPT)
3776			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3777
3778		/* Call regardless, as some status bits might not be
3779		 * signalled in iir */
3780		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3781
3782		if (iir & I915_MASTER_ERROR_INTERRUPT)
3783			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3784
3785		I915_WRITE(GEN2_IIR, iir);
3786
3787		if (iir & I915_USER_INTERRUPT)
3788			intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
3789
3790		if (iir & I915_MASTER_ERROR_INTERRUPT)
3791			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
3792
3793		if (hotplug_status)
3794			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3795
3796		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3797	} while (0);
3798
3799	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3800
3801	return ret;
3802}
3803
3804static void i965_irq_reset(struct drm_i915_private *dev_priv)
3805{
3806	struct intel_uncore *uncore = &dev_priv->uncore;
3807
3808	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3809	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3810
3811	i9xx_pipestat_irq_reset(dev_priv);
3812
3813	GEN3_IRQ_RESET(uncore, GEN2_);
3814}
3815
3816static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
3817{
3818	struct intel_uncore *uncore = &dev_priv->uncore;
3819	u32 enable_mask;
3820	u32 error_mask;
3821
3822	/*
3823	 * Enable some error detection, note the instruction error mask
3824	 * bit is reserved, so we leave it masked.
3825	 */
3826	if (IS_G4X(dev_priv)) {
3827		error_mask = ~(GM45_ERROR_PAGE_TABLE |
3828			       GM45_ERROR_MEM_PRIV |
3829			       GM45_ERROR_CP_PRIV |
3830			       I915_ERROR_MEMORY_REFRESH);
3831	} else {
3832		error_mask = ~(I915_ERROR_PAGE_TABLE |
3833			       I915_ERROR_MEMORY_REFRESH);
3834	}
3835	I915_WRITE(EMR, error_mask);
3836
3837	/* Unmask the interrupts that we always want on. */
3838	dev_priv->irq_mask =
3839		~(I915_ASLE_INTERRUPT |
3840		  I915_DISPLAY_PORT_INTERRUPT |
3841		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3842		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3843		  I915_MASTER_ERROR_INTERRUPT);
3844
3845	enable_mask =
3846		I915_ASLE_INTERRUPT |
3847		I915_DISPLAY_PORT_INTERRUPT |
3848		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3849		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3850		I915_MASTER_ERROR_INTERRUPT |
3851		I915_USER_INTERRUPT;
3852
3853	if (IS_G4X(dev_priv))
3854		enable_mask |= I915_BSD_USER_INTERRUPT;
3855
3856	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
3857
3858	/* Interrupt setup is already guaranteed to be single-threaded, this is
3859	 * just to make the assert_spin_locked check happy. */
3860	spin_lock_irq(&dev_priv->irq_lock);
3861	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3862	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3863	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3864	spin_unlock_irq(&dev_priv->irq_lock);
3865
3866	i915_enable_asle_pipestat(dev_priv);
3867}
3868
3869static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
3870{
3871	u32 hotplug_en;
3872
3873	lockdep_assert_held(&dev_priv->irq_lock);
3874
3875	/* Note HDMI and DP share hotplug bits */
3876	/* enable bits are the same for all generations */
3877	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
3878	/* Programming the CRT detection parameters tends
3879	   to generate a spurious hotplug event about three
3880	   seconds later.  So just do it once.
3881	*/
3882	if (IS_G4X(dev_priv))
3883		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3884	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3885
3886	/* Ignore TV since it's buggy */
3887	i915_hotplug_interrupt_update_locked(dev_priv,
3888					     HOTPLUG_INT_EN_MASK |
3889					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
3890					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
3891					     hotplug_en);
3892}
3893
3894static irqreturn_t i965_irq_handler(int irq, void *arg)
3895{
3896	struct drm_i915_private *dev_priv = arg;
3897	irqreturn_t ret = IRQ_NONE;
3898
3899	if (!intel_irqs_enabled(dev_priv))
3900		return IRQ_NONE;
3901
3902	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3903	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3904
3905	do {
3906		u32 pipe_stats[I915_MAX_PIPES] = {};
3907		u32 eir = 0, eir_stuck = 0;
3908		u32 hotplug_status = 0;
3909		u32 iir;
3910
3911		iir = I915_READ(GEN2_IIR);
3912		if (iir == 0)
3913			break;
3914
3915		ret = IRQ_HANDLED;
3916
3917		if (iir & I915_DISPLAY_PORT_INTERRUPT)
3918			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3919
3920		/* Call regardless, as some status bits might not be
3921		 * signalled in iir */
3922		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3923
3924		if (iir & I915_MASTER_ERROR_INTERRUPT)
3925			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3926
3927		I915_WRITE(GEN2_IIR, iir);
3928
3929		if (iir & I915_USER_INTERRUPT)
3930			intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
3931
3932		if (iir & I915_BSD_USER_INTERRUPT)
3933			intel_engine_signal_breadcrumbs(dev_priv->gt.engine[VCS0]);
3934
3935		if (iir & I915_MASTER_ERROR_INTERRUPT)
3936			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
3937
3938		if (hotplug_status)
3939			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3940
3941		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3942	} while (0);
3943
3944	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3945
3946	return ret;
3947}
3948
3949/**
3950 * intel_irq_init - initializes irq support
3951 * @dev_priv: i915 device instance
3952 *
3953 * This function initializes all the irq support including work items, timers
3954 * and all the vtables. It does not setup the interrupt itself though.
3955 */
3956void intel_irq_init(struct drm_i915_private *dev_priv)
3957{
3958	struct drm_device *dev = &dev_priv->drm;
3959	int i;
3960
3961	intel_hpd_init_pins(dev_priv);
3962
3963	intel_hpd_init_work(dev_priv);
3964
3965	INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
3966	for (i = 0; i < MAX_L3_SLICES; ++i)
3967		dev_priv->l3_parity.remap_info[i] = NULL;
3968
3969	/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
3970	if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
3971		dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
3972
3973	dev->vblank_disable_immediate = true;
3974
3975	/* Most platforms treat the display irq block as an always-on
3976	 * power domain. vlv/chv can disable it at runtime and need
3977	 * special care to avoid writing any of the display block registers
3978	 * outside of the power domain. We defer setting up the display irqs
3979	 * in this case to the runtime pm.
3980	 */
3981	dev_priv->display_irqs_enabled = true;
3982	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3983		dev_priv->display_irqs_enabled = false;
3984
3985	dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
3986	/* If we have MST support, we want to avoid doing short HPD IRQ storm
3987	 * detection, as short HPD storms will occur as a natural part of
3988	 * sideband messaging with MST.
3989	 * On older platforms however, IRQ storms can occur with both long and
3990	 * short pulses, as seen on some G4x systems.
3991	 */
3992	dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
3993
3994	if (HAS_GMCH(dev_priv)) {
3995		if (I915_HAS_HOTPLUG(dev_priv))
3996			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3997	} else {
3998		if (HAS_PCH_JSP(dev_priv))
3999			dev_priv->display.hpd_irq_setup = jsp_hpd_irq_setup;
4000		else if (HAS_PCH_MCC(dev_priv))
4001			dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup;
4002		else if (INTEL_GEN(dev_priv) >= 11)
4003			dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
4004		else if (IS_GEN9_LP(dev_priv))
4005			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4006		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
4007			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4008		else
4009			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4010	}
4011}
4012
4013/**
4014 * intel_irq_fini - deinitializes IRQ support
4015 * @i915: i915 device instance
4016 *
4017 * This function deinitializes all the IRQ support.
4018 */
4019void intel_irq_fini(struct drm_i915_private *i915)
4020{
4021	int i;
4022
4023	for (i = 0; i < MAX_L3_SLICES; ++i)
4024		kfree(i915->l3_parity.remap_info[i]);
4025}
4026
4027static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
4028{
4029	if (HAS_GMCH(dev_priv)) {
4030		if (IS_CHERRYVIEW(dev_priv))
4031			return cherryview_irq_handler;
4032		else if (IS_VALLEYVIEW(dev_priv))
4033			return valleyview_irq_handler;
4034		else if (IS_GEN(dev_priv, 4))
4035			return i965_irq_handler;
4036		else if (IS_GEN(dev_priv, 3))
4037			return i915_irq_handler;
4038		else
4039			return i8xx_irq_handler;
4040	} else {
4041		if (HAS_MASTER_UNIT_IRQ(dev_priv))
4042			return dg1_irq_handler;
4043		if (INTEL_GEN(dev_priv) >= 11)
4044			return gen11_irq_handler;
4045		else if (INTEL_GEN(dev_priv) >= 8)
4046			return gen8_irq_handler;
4047		else
4048			return ilk_irq_handler;
4049	}
4050}
4051
4052static void intel_irq_reset(struct drm_i915_private *dev_priv)
4053{
4054	if (HAS_GMCH(dev_priv)) {
4055		if (IS_CHERRYVIEW(dev_priv))
4056			cherryview_irq_reset(dev_priv);
4057		else if (IS_VALLEYVIEW(dev_priv))
4058			valleyview_irq_reset(dev_priv);
4059		else if (IS_GEN(dev_priv, 4))
4060			i965_irq_reset(dev_priv);
4061		else if (IS_GEN(dev_priv, 3))
4062			i915_irq_reset(dev_priv);
4063		else
4064			i8xx_irq_reset(dev_priv);
4065	} else {
4066		if (INTEL_GEN(dev_priv) >= 11)
4067			gen11_irq_reset(dev_priv);
4068		else if (INTEL_GEN(dev_priv) >= 8)
4069			gen8_irq_reset(dev_priv);
4070		else
4071			ilk_irq_reset(dev_priv);
4072	}
4073}
4074
4075static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4076{
4077	if (HAS_GMCH(dev_priv)) {
4078		if (IS_CHERRYVIEW(dev_priv))
4079			cherryview_irq_postinstall(dev_priv);
4080		else if (IS_VALLEYVIEW(dev_priv))
4081			valleyview_irq_postinstall(dev_priv);
4082		else if (IS_GEN(dev_priv, 4))
4083			i965_irq_postinstall(dev_priv);
4084		else if (IS_GEN(dev_priv, 3))
4085			i915_irq_postinstall(dev_priv);
4086		else
4087			i8xx_irq_postinstall(dev_priv);
4088	} else {
4089		if (INTEL_GEN(dev_priv) >= 11)
4090			gen11_irq_postinstall(dev_priv);
4091		else if (INTEL_GEN(dev_priv) >= 8)
4092			gen8_irq_postinstall(dev_priv);
4093		else
4094			ilk_irq_postinstall(dev_priv);
4095	}
4096}
4097
4098/**
4099 * intel_irq_install - enables the hardware interrupt
4100 * @dev_priv: i915 device instance
4101 *
4102 * This function enables the hardware interrupt handling, but leaves the hotplug
4103 * handling still disabled. It is called after intel_irq_init().
4104 *
4105 * In the driver load and resume code we need working interrupts in a few places
4106 * but don't want to deal with the hassle of concurrent probe and hotplug
4107 * workers. Hence the split into this two-stage approach.
4108 */
4109int intel_irq_install(struct drm_i915_private *dev_priv)
4110{
4111	int irq = dev_priv->drm.pdev->irq;
4112	int ret;
4113
4114	/*
4115	 * We enable some interrupt sources in our postinstall hooks, so mark
4116	 * interrupts as enabled _before_ actually enabling them to avoid
4117	 * special cases in our ordering checks.
4118	 */
4119	dev_priv->runtime_pm.irqs_enabled = true;
4120
4121	dev_priv->drm.irq_enabled = true;
4122
4123	intel_irq_reset(dev_priv);
4124
4125	ret = request_irq(irq, intel_irq_handler(dev_priv),
4126			  IRQF_SHARED, DRIVER_NAME, dev_priv);
4127	if (ret < 0) {
4128		dev_priv->drm.irq_enabled = false;
4129		return ret;
4130	}
4131
4132	intel_irq_postinstall(dev_priv);
4133
4134	return ret;
4135}
4136
4137/**
4138 * intel_irq_uninstall - finilizes all irq handling
4139 * @dev_priv: i915 device instance
4140 *
4141 * This stops interrupt and hotplug handling and unregisters and frees all
4142 * resources acquired in the init functions.
4143 */
4144void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4145{
4146	int irq = dev_priv->drm.pdev->irq;
4147
4148	/*
4149	 * FIXME we can get called twice during driver probe
4150	 * error handling as well as during driver remove due to
4151	 * intel_modeset_driver_remove() calling us out of sequence.
4152	 * Would be nice if it didn't do that...
4153	 */
4154	if (!dev_priv->drm.irq_enabled)
4155		return;
4156
4157	dev_priv->drm.irq_enabled = false;
4158
4159	intel_irq_reset(dev_priv);
4160
4161	free_irq(irq, dev_priv);
4162
4163	intel_hpd_cancel_work(dev_priv);
4164	dev_priv->runtime_pm.irqs_enabled = false;
4165}
4166
4167/**
4168 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4169 * @dev_priv: i915 device instance
4170 *
4171 * This function is used to disable interrupts at runtime, both in the runtime
4172 * pm and the system suspend/resume code.
4173 */
4174void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4175{
4176	intel_irq_reset(dev_priv);
4177	dev_priv->runtime_pm.irqs_enabled = false;
4178	intel_synchronize_irq(dev_priv);
4179}
4180
4181/**
4182 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4183 * @dev_priv: i915 device instance
4184 *
4185 * This function is used to enable interrupts at runtime, both in the runtime
4186 * pm and the system suspend/resume code.
4187 */
4188void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4189{
4190	dev_priv->runtime_pm.irqs_enabled = true;
4191	intel_irq_reset(dev_priv);
4192	intel_irq_postinstall(dev_priv);
4193}
4194
4195bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
4196{
4197	/*
4198	 * We only use drm_irq_uninstall() at unload and VT switch, so
4199	 * this is the only thing we need to check.
4200	 */
4201	return dev_priv->runtime_pm.irqs_enabled;
4202}
4203
4204void intel_synchronize_irq(struct drm_i915_private *i915)
4205{
4206	synchronize_irq(i915->drm.pdev->irq);
4207}
4208