1/*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include <linux/kernel.h>
25
26#include "i915_drv.h"
27#include "i915_irq.h"
28#include "intel_display_types.h"
29#include "intel_hotplug.h"
30#include "intel_hotplug_irq.h"
31
32/**
33 * DOC: Hotplug
34 *
35 * Simply put, hotplug occurs when a display is connected to or disconnected
36 * from the system. However, there may be adapters and docking stations and
37 * Display Port short pulses and MST devices involved, complicating matters.
38 *
39 * Hotplug in i915 is handled in many different levels of abstraction.
40 *
41 * The platform dependent interrupt handling code in i915_irq.c enables,
42 * disables, and does preliminary handling of the interrupts. The interrupt
43 * handlers gather the hotplug detect (HPD) information from relevant registers
44 * into a platform independent mask of hotplug pins that have fired.
45 *
46 * The platform independent interrupt handler intel_hpd_irq_handler() in
47 * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes
48 * further processing to appropriate bottom halves (Display Port specific and
49 * regular hotplug).
50 *
51 * The Display Port work function i915_digport_work_func() calls into
52 * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long
53 * pulses, with failures and non-MST long pulses triggering regular hotplug
54 * processing on the connector.
55 *
56 * The regular hotplug work function i915_hotplug_work_func() calls connector
57 * detect hooks, and, if connector status changes, triggers sending of hotplug
58 * uevent to userspace via drm_kms_helper_hotplug_event().
59 *
60 * Finally, the userspace is responsible for triggering a modeset upon receiving
61 * the hotplug uevent, disabling or enabling the crtc as needed.
62 *
63 * The hotplug interrupt storm detection and mitigation code keeps track of the
64 * number of interrupts per hotplug pin per a period of time, and if the number
65 * of interrupts exceeds a certain threshold, the interrupt is disabled for a
66 * while before being re-enabled. The intention is to mitigate issues raising
67 * from broken hardware triggering massive amounts of interrupts and grinding
68 * the system to a halt.
69 *
70 * Current implementation expects that hotplug interrupt storm will not be
71 * seen when display port sink is connected, hence on platforms whose DP
72 * callback is handled by i915_digport_work_func reenabling of hpd is not
73 * performed (it was never expected to be disabled in the first place ;) )
74 * this is specific to DP sinks handled by this routine and any other display
75 * such as HDMI or DVI enabled on the same port will have proper logic since
76 * it will use i915_hotplug_work_func where this logic is handled.
77 */
78
79/**
80 * intel_hpd_pin_default - return default pin associated with certain port.
81 * @dev_priv: private driver data pointer
82 * @port: the hpd port to get associated pin
83 *
84 * It is only valid and used by digital port encoder.
85 *
86 * Return pin that is associatade with @port.
87 */
88enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
89				   enum port port)
90{
91	return HPD_PORT_A + port - PORT_A;
92}
93
94/* Threshold == 5 for long IRQs, 50 for short */
95#define HPD_STORM_DEFAULT_THRESHOLD	50
96
97#define HPD_STORM_DETECT_PERIOD		1000
98#define HPD_STORM_REENABLE_DELAY	(2 * 60 * 1000)
99#define HPD_RETRY_DELAY			1000
100
101static enum hpd_pin
102intel_connector_hpd_pin(struct intel_connector *connector)
103{
104	struct intel_encoder *encoder = intel_attached_encoder(connector);
105
106	/*
107	 * MST connectors get their encoder attached dynamically
108	 * so need to make sure we have an encoder here. But since
109	 * MST encoders have their hpd_pin set to HPD_NONE we don't
110	 * have to special case them beyond that.
111	 */
112	return encoder ? encoder->hpd_pin : HPD_NONE;
113}
114
115/**
116 * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
117 * @dev_priv: private driver data pointer
118 * @pin: the pin to gather stats on
119 * @long_hpd: whether the HPD IRQ was long or short
120 *
121 * Gather stats about HPD IRQs from the specified @pin, and detect IRQ
122 * storms. Only the pin specific stats and state are changed, the caller is
123 * responsible for further action.
124 *
125 * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
126 * stored in @dev_priv->display.hotplug.hpd_storm_threshold which defaults to
127 * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
128 * short IRQs count as +1. If this threshold is exceeded, it's considered an
129 * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
130 *
131 * By default, most systems will only count long IRQs towards
132 * &dev_priv->display.hotplug.hpd_storm_threshold. However, some older systems also
133 * suffer from short IRQ storms and must also track these. Because short IRQ
134 * storms are naturally caused by sideband interactions with DP MST devices,
135 * short IRQ detection is only enabled for systems without DP MST support.
136 * Systems which are new enough to support DP MST are far less likely to
137 * suffer from IRQ storms at all, so this is fine.
138 *
139 * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs,
140 * and should only be adjusted for automated hotplug testing.
141 *
142 * Return true if an IRQ storm was detected on @pin.
143 */
144static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
145				       enum hpd_pin pin, bool long_hpd)
146{
147	struct intel_hotplug *hpd = &dev_priv->display.hotplug;
148	unsigned long start = hpd->stats[pin].last_jiffies;
149	unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
150	const int increment = long_hpd ? 10 : 1;
151	const int threshold = hpd->hpd_storm_threshold;
152	bool storm = false;
153
154	if (!threshold ||
155	    (!long_hpd && !dev_priv->display.hotplug.hpd_short_storm_enabled))
156		return false;
157
158	if (!time_in_range(jiffies, start, end)) {
159		hpd->stats[pin].last_jiffies = jiffies;
160		hpd->stats[pin].count = 0;
161	}
162
163	hpd->stats[pin].count += increment;
164	if (hpd->stats[pin].count > threshold) {
165		hpd->stats[pin].state = HPD_MARK_DISABLED;
166		drm_dbg_kms(&dev_priv->drm,
167			    "HPD interrupt storm detected on PIN %d\n", pin);
168		storm = true;
169	} else {
170		drm_dbg_kms(&dev_priv->drm,
171			    "Received HPD interrupt on PIN %d - cnt: %d\n",
172			      pin,
173			      hpd->stats[pin].count);
174	}
175
176	return storm;
177}
178
179static void
180intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
181{
182	struct drm_connector_list_iter conn_iter;
183	struct intel_connector *connector;
184	bool hpd_disabled = false;
185
186	lockdep_assert_held(&dev_priv->irq_lock);
187
188	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
189	for_each_intel_connector_iter(connector, &conn_iter) {
190		enum hpd_pin pin;
191
192		if (connector->base.polled != DRM_CONNECTOR_POLL_HPD)
193			continue;
194
195		pin = intel_connector_hpd_pin(connector);
196		if (pin == HPD_NONE ||
197		    dev_priv->display.hotplug.stats[pin].state != HPD_MARK_DISABLED)
198			continue;
199
200		drm_info(&dev_priv->drm,
201			 "HPD interrupt storm detected on connector %s: "
202			 "switching from hotplug detection to polling\n",
203			 connector->base.name);
204
205		dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED;
206		connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
207			DRM_CONNECTOR_POLL_DISCONNECT;
208		hpd_disabled = true;
209	}
210	drm_connector_list_iter_end(&conn_iter);
211
212	/* Enable polling and queue hotplug re-enabling. */
213	if (hpd_disabled) {
214		drm_kms_helper_poll_reschedule(&dev_priv->drm);
215		mod_delayed_work(dev_priv->unordered_wq,
216				 &dev_priv->display.hotplug.reenable_work,
217				 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
218	}
219}
220
221static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
222{
223	struct drm_i915_private *dev_priv =
224		container_of(work, typeof(*dev_priv),
225			     display.hotplug.reenable_work.work);
226	struct drm_connector_list_iter conn_iter;
227	struct intel_connector *connector;
228	intel_wakeref_t wakeref;
229	enum hpd_pin pin;
230
231	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
232
233	spin_lock_irq(&dev_priv->irq_lock);
234
235	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
236	for_each_intel_connector_iter(connector, &conn_iter) {
237		pin = intel_connector_hpd_pin(connector);
238		if (pin == HPD_NONE ||
239		    dev_priv->display.hotplug.stats[pin].state != HPD_DISABLED)
240			continue;
241
242		if (connector->base.polled != connector->polled)
243			drm_dbg(&dev_priv->drm,
244				"Reenabling HPD on connector %s\n",
245				connector->base.name);
246		connector->base.polled = connector->polled;
247	}
248	drm_connector_list_iter_end(&conn_iter);
249
250	for_each_hpd_pin(pin) {
251		if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED)
252			dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED;
253	}
254
255	intel_hpd_irq_setup(dev_priv);
256
257	spin_unlock_irq(&dev_priv->irq_lock);
258
259	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
260}
261
262enum intel_hotplug_state
263intel_encoder_hotplug(struct intel_encoder *encoder,
264		      struct intel_connector *connector)
265{
266	struct drm_device *dev = connector->base.dev;
267	enum drm_connector_status old_status;
268	u64 old_epoch_counter;
269	bool ret = false;
270
271	drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
272	old_status = connector->base.status;
273	old_epoch_counter = connector->base.epoch_counter;
274
275	connector->base.status =
276		drm_helper_probe_detect(&connector->base, NULL, false);
277
278	if (old_epoch_counter != connector->base.epoch_counter)
279		ret = true;
280
281	if (ret) {
282		drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n",
283			    connector->base.base.id,
284			    connector->base.name,
285			    drm_get_connector_status_name(old_status),
286			    drm_get_connector_status_name(connector->base.status),
287			    old_epoch_counter,
288			    connector->base.epoch_counter);
289		return INTEL_HOTPLUG_CHANGED;
290	}
291	return INTEL_HOTPLUG_UNCHANGED;
292}
293
294static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
295{
296	return intel_encoder_is_dig_port(encoder) &&
297		enc_to_dig_port(encoder)->hpd_pulse != NULL;
298}
299
300static void i915_digport_work_func(struct work_struct *work)
301{
302	struct drm_i915_private *dev_priv =
303		container_of(work, struct drm_i915_private, display.hotplug.dig_port_work);
304	u32 long_port_mask, short_port_mask;
305	struct intel_encoder *encoder;
306	u32 old_bits = 0;
307
308	spin_lock_irq(&dev_priv->irq_lock);
309	long_port_mask = dev_priv->display.hotplug.long_port_mask;
310	dev_priv->display.hotplug.long_port_mask = 0;
311	short_port_mask = dev_priv->display.hotplug.short_port_mask;
312	dev_priv->display.hotplug.short_port_mask = 0;
313	spin_unlock_irq(&dev_priv->irq_lock);
314
315	for_each_intel_encoder(&dev_priv->drm, encoder) {
316		struct intel_digital_port *dig_port;
317		enum port port = encoder->port;
318		bool long_hpd, short_hpd;
319		enum irqreturn ret;
320
321		if (!intel_encoder_has_hpd_pulse(encoder))
322			continue;
323
324		long_hpd = long_port_mask & BIT(port);
325		short_hpd = short_port_mask & BIT(port);
326
327		if (!long_hpd && !short_hpd)
328			continue;
329
330		dig_port = enc_to_dig_port(encoder);
331
332		ret = dig_port->hpd_pulse(dig_port, long_hpd);
333		if (ret == IRQ_NONE) {
334			/* fall back to old school hpd */
335			old_bits |= BIT(encoder->hpd_pin);
336		}
337	}
338
339	if (old_bits) {
340		spin_lock_irq(&dev_priv->irq_lock);
341		dev_priv->display.hotplug.event_bits |= old_bits;
342		spin_unlock_irq(&dev_priv->irq_lock);
343		queue_delayed_work(dev_priv->unordered_wq,
344				   &dev_priv->display.hotplug.hotplug_work, 0);
345	}
346}
347
348/**
349 * intel_hpd_trigger_irq - trigger an hpd irq event for a port
350 * @dig_port: digital port
351 *
352 * Trigger an HPD interrupt event for the given port, emulating a short pulse
353 * generated by the sink, and schedule the dig port work to handle it.
354 */
355void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
356{
357	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
358
359	spin_lock_irq(&i915->irq_lock);
360	i915->display.hotplug.short_port_mask |= BIT(dig_port->base.port);
361	spin_unlock_irq(&i915->irq_lock);
362
363	queue_work(i915->display.hotplug.dp_wq, &i915->display.hotplug.dig_port_work);
364}
365
366/*
367 * Handle hotplug events outside the interrupt handler proper.
368 */
369static void i915_hotplug_work_func(struct work_struct *work)
370{
371	struct drm_i915_private *dev_priv =
372		container_of(work, struct drm_i915_private,
373			     display.hotplug.hotplug_work.work);
374	struct drm_connector_list_iter conn_iter;
375	struct intel_connector *connector;
376	u32 changed = 0, retry = 0;
377	u32 hpd_event_bits;
378	u32 hpd_retry_bits;
379	struct drm_connector *first_changed_connector = NULL;
380	int changed_connectors = 0;
381
382	mutex_lock(&dev_priv->drm.mode_config.mutex);
383	drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n");
384
385	spin_lock_irq(&dev_priv->irq_lock);
386
387	hpd_event_bits = dev_priv->display.hotplug.event_bits;
388	dev_priv->display.hotplug.event_bits = 0;
389	hpd_retry_bits = dev_priv->display.hotplug.retry_bits;
390	dev_priv->display.hotplug.retry_bits = 0;
391
392	/* Enable polling for connectors which had HPD IRQ storms */
393	intel_hpd_irq_storm_switch_to_polling(dev_priv);
394
395	spin_unlock_irq(&dev_priv->irq_lock);
396
397	/* Skip calling encode hotplug handlers if ignore long HPD set*/
398	if (dev_priv->display.hotplug.ignore_long_hpd) {
399		drm_dbg_kms(&dev_priv->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n");
400		mutex_unlock(&dev_priv->drm.mode_config.mutex);
401		return;
402	}
403
404	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
405	for_each_intel_connector_iter(connector, &conn_iter) {
406		enum hpd_pin pin;
407		u32 hpd_bit;
408
409		pin = intel_connector_hpd_pin(connector);
410		if (pin == HPD_NONE)
411			continue;
412
413		hpd_bit = BIT(pin);
414		if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) {
415			struct intel_encoder *encoder =
416				intel_attached_encoder(connector);
417
418			if (hpd_event_bits & hpd_bit)
419				connector->hotplug_retries = 0;
420			else
421				connector->hotplug_retries++;
422
423			drm_dbg_kms(&dev_priv->drm,
424				    "Connector %s (pin %i) received hotplug event. (retry %d)\n",
425				    connector->base.name, pin,
426				    connector->hotplug_retries);
427
428			switch (encoder->hotplug(encoder, connector)) {
429			case INTEL_HOTPLUG_UNCHANGED:
430				break;
431			case INTEL_HOTPLUG_CHANGED:
432				changed |= hpd_bit;
433				changed_connectors++;
434				if (!first_changed_connector) {
435					drm_connector_get(&connector->base);
436					first_changed_connector = &connector->base;
437				}
438				break;
439			case INTEL_HOTPLUG_RETRY:
440				retry |= hpd_bit;
441				break;
442			}
443		}
444	}
445	drm_connector_list_iter_end(&conn_iter);
446	mutex_unlock(&dev_priv->drm.mode_config.mutex);
447
448	if (changed_connectors == 1)
449		drm_kms_helper_connector_hotplug_event(first_changed_connector);
450	else if (changed_connectors > 0)
451		drm_kms_helper_hotplug_event(&dev_priv->drm);
452
453	if (first_changed_connector)
454		drm_connector_put(first_changed_connector);
455
456	/* Remove shared HPD pins that have changed */
457	retry &= ~changed;
458	if (retry) {
459		spin_lock_irq(&dev_priv->irq_lock);
460		dev_priv->display.hotplug.retry_bits |= retry;
461		spin_unlock_irq(&dev_priv->irq_lock);
462
463		mod_delayed_work(dev_priv->unordered_wq,
464				 &dev_priv->display.hotplug.hotplug_work,
465				 msecs_to_jiffies(HPD_RETRY_DELAY));
466	}
467}
468
469
470/**
471 * intel_hpd_irq_handler - main hotplug irq handler
472 * @dev_priv: drm_i915_private
473 * @pin_mask: a mask of hpd pins that have triggered the irq
474 * @long_mask: a mask of hpd pins that may be long hpd pulses
475 *
476 * This is the main hotplug irq handler for all platforms. The platform specific
477 * irq handlers call the platform specific hotplug irq handlers, which read and
478 * decode the appropriate registers into bitmasks about hpd pins that have
479 * triggered (@pin_mask), and which of those pins may be long pulses
480 * (@long_mask). The @long_mask is ignored if the port corresponding to the pin
481 * is not a digital port.
482 *
483 * Here, we do hotplug irq storm detection and mitigation, and pass further
484 * processing to appropriate bottom halves.
485 */
486void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
487			   u32 pin_mask, u32 long_mask)
488{
489	struct intel_encoder *encoder;
490	bool storm_detected = false;
491	bool queue_dig = false, queue_hp = false;
492	u32 long_hpd_pulse_mask = 0;
493	u32 short_hpd_pulse_mask = 0;
494	enum hpd_pin pin;
495
496	if (!pin_mask)
497		return;
498
499	spin_lock(&dev_priv->irq_lock);
500
501	/*
502	 * Determine whether ->hpd_pulse() exists for each pin, and
503	 * whether we have a short or a long pulse. This is needed
504	 * as each pin may have up to two encoders (HDMI and DP) and
505	 * only the one of them (DP) will have ->hpd_pulse().
506	 */
507	for_each_intel_encoder(&dev_priv->drm, encoder) {
508		enum port port = encoder->port;
509		bool long_hpd;
510
511		pin = encoder->hpd_pin;
512		if (!(BIT(pin) & pin_mask))
513			continue;
514
515		if (!intel_encoder_has_hpd_pulse(encoder))
516			continue;
517
518		long_hpd = long_mask & BIT(pin);
519
520		drm_dbg(&dev_priv->drm,
521			"digital hpd on [ENCODER:%d:%s] - %s\n",
522			encoder->base.base.id, encoder->base.name,
523			long_hpd ? "long" : "short");
524		queue_dig = true;
525
526		if (long_hpd) {
527			long_hpd_pulse_mask |= BIT(pin);
528			dev_priv->display.hotplug.long_port_mask |= BIT(port);
529		} else {
530			short_hpd_pulse_mask |= BIT(pin);
531			dev_priv->display.hotplug.short_port_mask |= BIT(port);
532		}
533	}
534
535	/* Now process each pin just once */
536	for_each_hpd_pin(pin) {
537		bool long_hpd;
538
539		if (!(BIT(pin) & pin_mask))
540			continue;
541
542		if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) {
543			/*
544			 * On GMCH platforms the interrupt mask bits only
545			 * prevent irq generation, not the setting of the
546			 * hotplug bits itself. So only WARN about unexpected
547			 * interrupts on saner platforms.
548			 */
549			drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv),
550				      "Received HPD interrupt on pin %d although disabled\n",
551				      pin);
552			continue;
553		}
554
555		if (dev_priv->display.hotplug.stats[pin].state != HPD_ENABLED)
556			continue;
557
558		/*
559		 * Delegate to ->hpd_pulse() if one of the encoders for this
560		 * pin has it, otherwise let the hotplug_work deal with this
561		 * pin directly.
562		 */
563		if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
564			long_hpd = long_hpd_pulse_mask & BIT(pin);
565		} else {
566			dev_priv->display.hotplug.event_bits |= BIT(pin);
567			long_hpd = true;
568			queue_hp = true;
569		}
570
571		if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
572			dev_priv->display.hotplug.event_bits &= ~BIT(pin);
573			storm_detected = true;
574			queue_hp = true;
575		}
576	}
577
578	/*
579	 * Disable any IRQs that storms were detected on. Polling enablement
580	 * happens later in our hotplug work.
581	 */
582	if (storm_detected)
583		intel_hpd_irq_setup(dev_priv);
584	spin_unlock(&dev_priv->irq_lock);
585
586	/*
587	 * Our hotplug handler can grab modeset locks (by calling down into the
588	 * fb helpers). Hence it must not be run on our own dev-priv->wq work
589	 * queue for otherwise the flush_work in the pageflip code will
590	 * deadlock.
591	 */
592	if (queue_dig)
593		queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work);
594	if (queue_hp)
595		queue_delayed_work(dev_priv->unordered_wq,
596				   &dev_priv->display.hotplug.hotplug_work, 0);
597}
598
599/**
600 * intel_hpd_init - initializes and enables hpd support
601 * @dev_priv: i915 device instance
602 *
603 * This function enables the hotplug support. It requires that interrupts have
604 * already been enabled with intel_irq_init_hw(). From this point on hotplug and
605 * poll request can run concurrently to other code, so locking rules must be
606 * obeyed.
607 *
608 * This is a separate step from interrupt enabling to simplify the locking rules
609 * in the driver load and resume code.
610 *
611 * Also see: intel_hpd_poll_enable() and intel_hpd_poll_disable().
612 */
613void intel_hpd_init(struct drm_i915_private *dev_priv)
614{
615	int i;
616
617	if (!HAS_DISPLAY(dev_priv))
618		return;
619
620	for_each_hpd_pin(i) {
621		dev_priv->display.hotplug.stats[i].count = 0;
622		dev_priv->display.hotplug.stats[i].state = HPD_ENABLED;
623	}
624
625	/*
626	 * Interrupt setup is already guaranteed to be single-threaded, this is
627	 * just to make the assert_spin_locked checks happy.
628	 */
629	spin_lock_irq(&dev_priv->irq_lock);
630	intel_hpd_irq_setup(dev_priv);
631	spin_unlock_irq(&dev_priv->irq_lock);
632}
633
634static void i915_hpd_poll_init_work(struct work_struct *work)
635{
636	struct drm_i915_private *dev_priv =
637		container_of(work, struct drm_i915_private,
638			     display.hotplug.poll_init_work);
639	struct drm_connector_list_iter conn_iter;
640	struct intel_connector *connector;
641	bool enabled;
642
643	mutex_lock(&dev_priv->drm.mode_config.mutex);
644
645	enabled = READ_ONCE(dev_priv->display.hotplug.poll_enabled);
646
647	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
648	for_each_intel_connector_iter(connector, &conn_iter) {
649		enum hpd_pin pin;
650
651		pin = intel_connector_hpd_pin(connector);
652		if (pin == HPD_NONE)
653			continue;
654
655		connector->base.polled = connector->polled;
656
657		if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD)
658			connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
659				DRM_CONNECTOR_POLL_DISCONNECT;
660	}
661	drm_connector_list_iter_end(&conn_iter);
662
663	if (enabled)
664		drm_kms_helper_poll_reschedule(&dev_priv->drm);
665
666	mutex_unlock(&dev_priv->drm.mode_config.mutex);
667
668	/*
669	 * We might have missed any hotplugs that happened while we were
670	 * in the middle of disabling polling
671	 */
672	if (!enabled)
673		drm_helper_hpd_irq_event(&dev_priv->drm);
674}
675
676/**
677 * intel_hpd_poll_enable - enable polling for connectors with hpd
678 * @dev_priv: i915 device instance
679 *
680 * This function enables polling for all connectors which support HPD.
681 * Under certain conditions HPD may not be functional. On most Intel GPUs,
682 * this happens when we enter runtime suspend.
683 * On Valleyview and Cherryview systems, this also happens when we shut off all
684 * of the powerwells.
685 *
686 * Since this function can get called in contexts where we're already holding
687 * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
688 * worker.
689 *
690 * Also see: intel_hpd_init() and intel_hpd_poll_disable().
691 */
692void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
693{
694	if (!HAS_DISPLAY(dev_priv) ||
695	    !INTEL_DISPLAY_ENABLED(dev_priv))
696		return;
697
698	WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, true);
699
700	/*
701	 * We might already be holding dev->mode_config.mutex, so do this in a
702	 * seperate worker
703	 * As well, there's no issue if we race here since we always reschedule
704	 * this worker anyway
705	 */
706	queue_work(dev_priv->unordered_wq,
707		   &dev_priv->display.hotplug.poll_init_work);
708}
709
710/**
711 * intel_hpd_poll_disable - disable polling for connectors with hpd
712 * @dev_priv: i915 device instance
713 *
714 * This function disables polling for all connectors which support HPD.
715 * Under certain conditions HPD may not be functional. On most Intel GPUs,
716 * this happens when we enter runtime suspend.
717 * On Valleyview and Cherryview systems, this also happens when we shut off all
718 * of the powerwells.
719 *
720 * Since this function can get called in contexts where we're already holding
721 * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
722 * worker.
723 *
724 * Also used during driver init to initialize connector->polled
725 * appropriately for all connectors.
726 *
727 * Also see: intel_hpd_init() and intel_hpd_poll_enable().
728 */
729void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
730{
731	if (!HAS_DISPLAY(dev_priv))
732		return;
733
734	WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false);
735	queue_work(dev_priv->unordered_wq,
736		   &dev_priv->display.hotplug.poll_init_work);
737}
738
739void intel_hpd_init_early(struct drm_i915_private *i915)
740{
741	INIT_DELAYED_WORK(&i915->display.hotplug.hotplug_work,
742			  i915_hotplug_work_func);
743	INIT_WORK(&i915->display.hotplug.dig_port_work, i915_digport_work_func);
744	INIT_WORK(&i915->display.hotplug.poll_init_work, i915_hpd_poll_init_work);
745	INIT_DELAYED_WORK(&i915->display.hotplug.reenable_work,
746			  intel_hpd_irq_storm_reenable_work);
747
748	i915->display.hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
749	/* If we have MST support, we want to avoid doing short HPD IRQ storm
750	 * detection, as short HPD storms will occur as a natural part of
751	 * sideband messaging with MST.
752	 * On older platforms however, IRQ storms can occur with both long and
753	 * short pulses, as seen on some G4x systems.
754	 */
755	i915->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(i915);
756}
757
758void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
759{
760	if (!HAS_DISPLAY(dev_priv))
761		return;
762
763	spin_lock_irq(&dev_priv->irq_lock);
764
765	dev_priv->display.hotplug.long_port_mask = 0;
766	dev_priv->display.hotplug.short_port_mask = 0;
767	dev_priv->display.hotplug.event_bits = 0;
768	dev_priv->display.hotplug.retry_bits = 0;
769
770	spin_unlock_irq(&dev_priv->irq_lock);
771
772	cancel_work_sync(&dev_priv->display.hotplug.dig_port_work);
773	cancel_delayed_work_sync(&dev_priv->display.hotplug.hotplug_work);
774	cancel_work_sync(&dev_priv->display.hotplug.poll_init_work);
775	cancel_delayed_work_sync(&dev_priv->display.hotplug.reenable_work);
776}
777
778bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
779{
780	bool ret = false;
781
782	if (pin == HPD_NONE)
783		return false;
784
785	spin_lock_irq(&dev_priv->irq_lock);
786	if (dev_priv->display.hotplug.stats[pin].state == HPD_ENABLED) {
787		dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED;
788		ret = true;
789	}
790	spin_unlock_irq(&dev_priv->irq_lock);
791
792	return ret;
793}
794
795void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
796{
797	if (pin == HPD_NONE)
798		return;
799
800	spin_lock_irq(&dev_priv->irq_lock);
801	dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED;
802	spin_unlock_irq(&dev_priv->irq_lock);
803}
804
805static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
806{
807	struct drm_i915_private *dev_priv = m->private;
808	struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
809
810	/* Synchronize with everything first in case there's been an HPD
811	 * storm, but we haven't finished handling it in the kernel yet
812	 */
813	intel_synchronize_irq(dev_priv);
814	flush_work(&dev_priv->display.hotplug.dig_port_work);
815	flush_delayed_work(&dev_priv->display.hotplug.hotplug_work);
816
817	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
818	seq_printf(m, "Detected: %s\n",
819		   str_yes_no(delayed_work_pending(&hotplug->reenable_work)));
820
821	return 0;
822}
823
824static ssize_t i915_hpd_storm_ctl_write(struct file *file,
825					const char __user *ubuf, size_t len,
826					loff_t *offp)
827{
828	struct seq_file *m = file->private_data;
829	struct drm_i915_private *dev_priv = m->private;
830	struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
831	unsigned int new_threshold;
832	int i;
833	char *newline;
834	char tmp[16];
835
836	if (len >= sizeof(tmp))
837		return -EINVAL;
838
839	if (copy_from_user(tmp, ubuf, len))
840		return -EFAULT;
841
842	tmp[len] = '\0';
843
844	/* Strip newline, if any */
845	newline = strchr(tmp, '\n');
846	if (newline)
847		*newline = '\0';
848
849	if (strcmp(tmp, "reset") == 0)
850		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
851	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
852		return -EINVAL;
853
854	if (new_threshold > 0)
855		drm_dbg_kms(&dev_priv->drm,
856			    "Setting HPD storm detection threshold to %d\n",
857			    new_threshold);
858	else
859		drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
860
861	spin_lock_irq(&dev_priv->irq_lock);
862	hotplug->hpd_storm_threshold = new_threshold;
863	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
864	for_each_hpd_pin(i)
865		hotplug->stats[i].count = 0;
866	spin_unlock_irq(&dev_priv->irq_lock);
867
868	/* Re-enable hpd immediately if we were in an irq storm */
869	flush_delayed_work(&dev_priv->display.hotplug.reenable_work);
870
871	return len;
872}
873
874static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
875{
876	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
877}
878
879static const struct file_operations i915_hpd_storm_ctl_fops = {
880	.owner = THIS_MODULE,
881	.open = i915_hpd_storm_ctl_open,
882	.read = seq_read,
883	.llseek = seq_lseek,
884	.release = single_release,
885	.write = i915_hpd_storm_ctl_write
886};
887
888static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
889{
890	struct drm_i915_private *dev_priv = m->private;
891
892	seq_printf(m, "Enabled: %s\n",
893		   str_yes_no(dev_priv->display.hotplug.hpd_short_storm_enabled));
894
895	return 0;
896}
897
898static int
899i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
900{
901	return single_open(file, i915_hpd_short_storm_ctl_show,
902			   inode->i_private);
903}
904
905static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
906					      const char __user *ubuf,
907					      size_t len, loff_t *offp)
908{
909	struct seq_file *m = file->private_data;
910	struct drm_i915_private *dev_priv = m->private;
911	struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
912	char *newline;
913	char tmp[16];
914	int i;
915	bool new_state;
916
917	if (len >= sizeof(tmp))
918		return -EINVAL;
919
920	if (copy_from_user(tmp, ubuf, len))
921		return -EFAULT;
922
923	tmp[len] = '\0';
924
925	/* Strip newline, if any */
926	newline = strchr(tmp, '\n');
927	if (newline)
928		*newline = '\0';
929
930	/* Reset to the "default" state for this system */
931	if (strcmp(tmp, "reset") == 0)
932		new_state = !HAS_DP_MST(dev_priv);
933	else if (kstrtobool(tmp, &new_state) != 0)
934		return -EINVAL;
935
936	drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
937		    new_state ? "En" : "Dis");
938
939	spin_lock_irq(&dev_priv->irq_lock);
940	hotplug->hpd_short_storm_enabled = new_state;
941	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
942	for_each_hpd_pin(i)
943		hotplug->stats[i].count = 0;
944	spin_unlock_irq(&dev_priv->irq_lock);
945
946	/* Re-enable hpd immediately if we were in an irq storm */
947	flush_delayed_work(&dev_priv->display.hotplug.reenable_work);
948
949	return len;
950}
951
952static const struct file_operations i915_hpd_short_storm_ctl_fops = {
953	.owner = THIS_MODULE,
954	.open = i915_hpd_short_storm_ctl_open,
955	.read = seq_read,
956	.llseek = seq_lseek,
957	.release = single_release,
958	.write = i915_hpd_short_storm_ctl_write,
959};
960
961void intel_hpd_debugfs_register(struct drm_i915_private *i915)
962{
963	struct drm_minor *minor = i915->drm.primary;
964
965	debugfs_create_file("i915_hpd_storm_ctl", 0644, minor->debugfs_root,
966			    i915, &i915_hpd_storm_ctl_fops);
967	debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root,
968			    i915, &i915_hpd_short_storm_ctl_fops);
969	debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root,
970			    &i915->display.hotplug.ignore_long_hpd);
971}
972