1/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2 */
3/*
4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 */
29
30#include <linux/acpi.h>
31#include <linux/device.h>
32#include <linux/module.h>
33#include <linux/oom.h>
34#include <linux/pci.h>
35#include <linux/pm.h>
36#include <linux/pm_runtime.h>
37#include <linux/slab.h>
38#include <linux/string_helpers.h>
39#include <linux/vga_switcheroo.h>
40#include <linux/vt.h>
41
42#include <drm/drm_aperture.h>
43#include <drm/drm_atomic_helper.h>
44#include <drm/drm_ioctl.h>
45#include <drm/drm_managed.h>
46#include <drm/drm_probe_helper.h>
47
48#include "display/intel_acpi.h"
49#include "display/intel_bw.h"
50#include "display/intel_cdclk.h"
51#include "display/intel_display_driver.h"
52#include "display/intel_display_types.h"
53#include "display/intel_dmc.h"
54#include "display/intel_dp.h"
55#include "display/intel_dpt.h"
56#include "display/intel_fbdev.h"
57#include "display/intel_hotplug.h"
58#include "display/intel_overlay.h"
59#include "display/intel_pch_refclk.h"
60#include "display/intel_pipe_crc.h"
61#include "display/intel_pps.h"
62#include "display/intel_sprite.h"
63#include "display/intel_vga.h"
64#include "display/skl_watermark.h"
65
66#include "gem/i915_gem_context.h"
67#include "gem/i915_gem_create.h"
68#include "gem/i915_gem_dmabuf.h"
69#include "gem/i915_gem_ioctls.h"
70#include "gem/i915_gem_mman.h"
71#include "gem/i915_gem_pm.h"
72#include "gt/intel_gt.h"
73#include "gt/intel_gt_pm.h"
74#include "gt/intel_rc6.h"
75
76#include "pxp/intel_pxp.h"
77#include "pxp/intel_pxp_debugfs.h"
78#include "pxp/intel_pxp_pm.h"
79
80#include "soc/intel_dram.h"
81#include "soc/intel_gmch.h"
82
83#include "i915_debugfs.h"
84#include "i915_driver.h"
85#include "i915_drm_client.h"
86#include "i915_drv.h"
87#include "i915_file_private.h"
88#include "i915_getparam.h"
89#include "i915_hwmon.h"
90#include "i915_ioc32.h"
91#include "i915_ioctl.h"
92#include "i915_irq.h"
93#include "i915_memcpy.h"
94#include "i915_perf.h"
95#include "i915_query.h"
96#include "i915_suspend.h"
97#include "i915_switcheroo.h"
98#include "i915_sysfs.h"
99#include "i915_utils.h"
100#include "i915_vgpu.h"
101#include "intel_clock_gating.h"
102#include "intel_gvt.h"
103#include "intel_memory_region.h"
104#include "intel_pci_config.h"
105#include "intel_pcode.h"
106#include "intel_region_ttm.h"
107#include "vlv_suspend.h"
108
109static const struct drm_driver i915_drm_driver;
110
111static int i915_workqueues_init(struct drm_i915_private *dev_priv)
112{
113	/*
114	 * The i915 workqueue is primarily used for batched retirement of
115	 * requests (and thus managing bo) once the task has been completed
116	 * by the GPU. i915_retire_requests() is called directly when we
117	 * need high-priority retirement, such as waiting for an explicit
118	 * bo.
119	 *
120	 * It is also used for periodic low-priority events, such as
121	 * idle-timers and recording error state.
122	 *
123	 * All tasks on the workqueue are expected to acquire the dev mutex
124	 * so there is no point in running more than one instance of the
125	 * workqueue at any time.  Use an ordered one.
126	 */
127	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
128	if (dev_priv->wq == NULL)
129		goto out_err;
130
131	dev_priv->display.hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
132	if (dev_priv->display.hotplug.dp_wq == NULL)
133		goto out_free_wq;
134
135	/*
136	 * The unordered i915 workqueue should be used for all work
137	 * scheduling that do not require running in order, which used
138	 * to be scheduled on the system_wq before moving to a driver
139	 * instance due deprecation of flush_scheduled_work().
140	 */
141	dev_priv->unordered_wq = alloc_workqueue("i915-unordered", 0, 0);
142	if (dev_priv->unordered_wq == NULL)
143		goto out_free_dp_wq;
144
145	return 0;
146
147out_free_dp_wq:
148	destroy_workqueue(dev_priv->display.hotplug.dp_wq);
149out_free_wq:
150	destroy_workqueue(dev_priv->wq);
151out_err:
152	drm_err(&dev_priv->drm, "Failed to allocate workqueues.\n");
153
154	return -ENOMEM;
155}
156
157static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
158{
159	destroy_workqueue(dev_priv->unordered_wq);
160	destroy_workqueue(dev_priv->display.hotplug.dp_wq);
161	destroy_workqueue(dev_priv->wq);
162}
163
164/*
165 * We don't keep the workarounds for pre-production hardware, so we expect our
166 * driver to fail on these machines in one way or another. A little warning on
167 * dmesg may help both the user and the bug triagers.
168 *
169 * Our policy for removing pre-production workarounds is to keep the
170 * current gen workarounds as a guide to the bring-up of the next gen
171 * (workarounds have a habit of persisting!). Anything older than that
172 * should be removed along with the complications they introduce.
173 */
174static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
175{
176	bool pre = false;
177
178	pre |= IS_HASWELL_EARLY_SDV(dev_priv);
179	pre |= IS_SKYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x6;
180	pre |= IS_BROXTON(dev_priv) && INTEL_REVID(dev_priv) < 0xA;
181	pre |= IS_KABYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
182	pre |= IS_GEMINILAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x3;
183	pre |= IS_ICELAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x7;
184	pre |= IS_TIGERLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
185	pre |= IS_DG1(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
186
187	if (pre) {
188		drm_err(&dev_priv->drm, "This is a pre-production stepping. "
189			  "It may not be fully functional.\n");
190		add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
191	}
192}
193
194static void sanitize_gpu(struct drm_i915_private *i915)
195{
196	if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) {
197		struct intel_gt *gt;
198		unsigned int i;
199
200		for_each_gt(gt, i915, i)
201			__intel_gt_reset(gt, ALL_ENGINES);
202	}
203}
204
205/**
206 * i915_driver_early_probe - setup state not requiring device access
207 * @dev_priv: device private
208 *
209 * Initialize everything that is a "SW-only" state, that is state not
210 * requiring accessing the device or exposing the driver via kernel internal
211 * or userspace interfaces. Example steps belonging here: lock initialization,
212 * system memory allocation, setting up device specific attributes and
213 * function hooks not requiring accessing the device.
214 */
215static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
216{
217	int ret = 0;
218
219	if (i915_inject_probe_failure(dev_priv))
220		return -ENODEV;
221
222	intel_device_info_runtime_init_early(dev_priv);
223
224	intel_step_init(dev_priv);
225
226	intel_uncore_mmio_debug_init_early(dev_priv);
227
228	spin_lock_init(&dev_priv->irq_lock);
229	spin_lock_init(&dev_priv->gpu_error.lock);
230	mutex_init(&dev_priv->display.backlight.lock);
231
232	mutex_init(&dev_priv->sb_lock);
233	cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
234
235	mutex_init(&dev_priv->display.audio.mutex);
236	mutex_init(&dev_priv->display.wm.wm_mutex);
237	mutex_init(&dev_priv->display.pps.mutex);
238	mutex_init(&dev_priv->display.hdcp.hdcp_mutex);
239
240	i915_memcpy_init_early(dev_priv);
241	intel_runtime_pm_init_early(&dev_priv->runtime_pm);
242
243	ret = i915_workqueues_init(dev_priv);
244	if (ret < 0)
245		return ret;
246
247	ret = vlv_suspend_init(dev_priv);
248	if (ret < 0)
249		goto err_workqueues;
250
251	ret = intel_region_ttm_device_init(dev_priv);
252	if (ret)
253		goto err_ttm;
254
255	ret = intel_root_gt_init_early(dev_priv);
256	if (ret < 0)
257		goto err_rootgt;
258
259	i915_gem_init_early(dev_priv);
260
261	/* This must be called before any calls to HAS_PCH_* */
262	intel_detect_pch(dev_priv);
263
264	intel_irq_init(dev_priv);
265	intel_display_driver_early_probe(dev_priv);
266	intel_clock_gating_hooks_init(dev_priv);
267
268	intel_detect_preproduction_hw(dev_priv);
269
270	return 0;
271
272err_rootgt:
273	intel_region_ttm_device_fini(dev_priv);
274err_ttm:
275	vlv_suspend_cleanup(dev_priv);
276err_workqueues:
277	i915_workqueues_cleanup(dev_priv);
278	return ret;
279}
280
281/**
282 * i915_driver_late_release - cleanup the setup done in
283 *			       i915_driver_early_probe()
284 * @dev_priv: device private
285 */
286static void i915_driver_late_release(struct drm_i915_private *dev_priv)
287{
288	intel_irq_fini(dev_priv);
289	intel_power_domains_cleanup(dev_priv);
290	i915_gem_cleanup_early(dev_priv);
291	intel_gt_driver_late_release_all(dev_priv);
292	intel_region_ttm_device_fini(dev_priv);
293	vlv_suspend_cleanup(dev_priv);
294	i915_workqueues_cleanup(dev_priv);
295
296	cpu_latency_qos_remove_request(&dev_priv->sb_qos);
297	mutex_destroy(&dev_priv->sb_lock);
298
299	i915_params_free(&dev_priv->params);
300}
301
302/**
303 * i915_driver_mmio_probe - setup device MMIO
304 * @dev_priv: device private
305 *
306 * Setup minimal device state necessary for MMIO accesses later in the
307 * initialization sequence. The setup here should avoid any other device-wide
308 * side effects or exposing the driver via kernel internal or user space
309 * interfaces.
310 */
311static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
312{
313	struct intel_gt *gt;
314	int ret, i;
315
316	if (i915_inject_probe_failure(dev_priv))
317		return -ENODEV;
318
319	ret = intel_gmch_bridge_setup(dev_priv);
320	if (ret < 0)
321		return ret;
322
323	for_each_gt(gt, dev_priv, i) {
324		ret = intel_uncore_init_mmio(gt->uncore);
325		if (ret)
326			return ret;
327
328		ret = drmm_add_action_or_reset(&dev_priv->drm,
329					       intel_uncore_fini_mmio,
330					       gt->uncore);
331		if (ret)
332			return ret;
333	}
334
335	/* Try to make sure MCHBAR is enabled before poking at it */
336	intel_gmch_bar_setup(dev_priv);
337	intel_device_info_runtime_init(dev_priv);
338
339	for_each_gt(gt, dev_priv, i) {
340		ret = intel_gt_init_mmio(gt);
341		if (ret)
342			goto err_uncore;
343	}
344
345	/* As early as possible, scrub existing GPU state before clobbering */
346	sanitize_gpu(dev_priv);
347
348	return 0;
349
350err_uncore:
351	intel_gmch_bar_teardown(dev_priv);
352
353	return ret;
354}
355
356/**
357 * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe()
358 * @dev_priv: device private
359 */
360static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
361{
362	intel_gmch_bar_teardown(dev_priv);
363}
364
365/**
366 * i915_set_dma_info - set all relevant PCI dma info as configured for the
367 * platform
368 * @i915: valid i915 instance
369 *
370 * Set the dma max segment size, device and coherent masks.  The dma mask set
371 * needs to occur before i915_ggtt_probe_hw.
372 *
373 * A couple of platforms have special needs.  Address them as well.
374 *
375 */
376static int i915_set_dma_info(struct drm_i915_private *i915)
377{
378	unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size;
379	int ret;
380
381	GEM_BUG_ON(!mask_size);
382
383	/*
384	 * We don't have a max segment size, so set it to the max so sg's
385	 * debugging layer doesn't complain
386	 */
387	dma_set_max_seg_size(i915->drm.dev, UINT_MAX);
388
389	ret = dma_set_mask(i915->drm.dev, DMA_BIT_MASK(mask_size));
390	if (ret)
391		goto mask_err;
392
393	/* overlay on gen2 is broken and can't address above 1G */
394	if (GRAPHICS_VER(i915) == 2)
395		mask_size = 30;
396
397	/*
398	 * 965GM sometimes incorrectly writes to hardware status page (HWS)
399	 * using 32bit addressing, overwriting memory if HWS is located
400	 * above 4GB.
401	 *
402	 * The documentation also mentions an issue with undefined
403	 * behaviour if any general state is accessed within a page above 4GB,
404	 * which also needs to be handled carefully.
405	 */
406	if (IS_I965G(i915) || IS_I965GM(i915))
407		mask_size = 32;
408
409	ret = dma_set_coherent_mask(i915->drm.dev, DMA_BIT_MASK(mask_size));
410	if (ret)
411		goto mask_err;
412
413	return 0;
414
415mask_err:
416	drm_err(&i915->drm, "Can't set DMA mask/consistent mask (%d)\n", ret);
417	return ret;
418}
419
420static int i915_pcode_init(struct drm_i915_private *i915)
421{
422	struct intel_gt *gt;
423	int id, ret;
424
425	for_each_gt(gt, i915, id) {
426		ret = intel_pcode_init(gt->uncore);
427		if (ret) {
428			drm_err(&gt->i915->drm, "gt%d: intel_pcode_init failed %d\n", id, ret);
429			return ret;
430		}
431	}
432
433	return 0;
434}
435
436/**
437 * i915_driver_hw_probe - setup state requiring device access
438 * @dev_priv: device private
439 *
440 * Setup state that requires accessing the device, but doesn't require
441 * exposing the driver via kernel internal or userspace interfaces.
442 */
443static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
444{
445	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
446	int ret;
447
448	if (i915_inject_probe_failure(dev_priv))
449		return -ENODEV;
450
451	if (HAS_PPGTT(dev_priv)) {
452		if (intel_vgpu_active(dev_priv) &&
453		    !intel_vgpu_has_full_ppgtt(dev_priv)) {
454			i915_report_error(dev_priv,
455					  "incompatible vGPU found, support for isolated ppGTT required\n");
456			return -ENXIO;
457		}
458	}
459
460	if (HAS_EXECLISTS(dev_priv)) {
461		/*
462		 * Older GVT emulation depends upon intercepting CSB mmio,
463		 * which we no longer use, preferring to use the HWSP cache
464		 * instead.
465		 */
466		if (intel_vgpu_active(dev_priv) &&
467		    !intel_vgpu_has_hwsp_emulation(dev_priv)) {
468			i915_report_error(dev_priv,
469					  "old vGPU host found, support for HWSP emulation required\n");
470			return -ENXIO;
471		}
472	}
473
474	/* needs to be done before ggtt probe */
475	intel_dram_edram_detect(dev_priv);
476
477	ret = i915_set_dma_info(dev_priv);
478	if (ret)
479		return ret;
480
481	ret = i915_perf_init(dev_priv);
482	if (ret)
483		return ret;
484
485	ret = i915_ggtt_probe_hw(dev_priv);
486	if (ret)
487		goto err_perf;
488
489	ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, dev_priv->drm.driver);
490	if (ret)
491		goto err_ggtt;
492
493	ret = i915_ggtt_init_hw(dev_priv);
494	if (ret)
495		goto err_ggtt;
496
497	/*
498	 * Make sure we probe lmem before we probe stolen-lmem. The BAR size
499	 * might be different due to bar resizing.
500	 */
501	ret = intel_gt_tiles_init(dev_priv);
502	if (ret)
503		goto err_ggtt;
504
505	ret = intel_memory_regions_hw_probe(dev_priv);
506	if (ret)
507		goto err_ggtt;
508
509	ret = i915_ggtt_enable_hw(dev_priv);
510	if (ret) {
511		drm_err(&dev_priv->drm, "failed to enable GGTT\n");
512		goto err_mem_regions;
513	}
514
515	pci_set_master(pdev);
516
517	/* On the 945G/GM, the chipset reports the MSI capability on the
518	 * integrated graphics even though the support isn't actually there
519	 * according to the published specs.  It doesn't appear to function
520	 * correctly in testing on 945G.
521	 * This may be a side effect of MSI having been made available for PEG
522	 * and the registers being closely associated.
523	 *
524	 * According to chipset errata, on the 965GM, MSI interrupts may
525	 * be lost or delayed, and was defeatured. MSI interrupts seem to
526	 * get lost on g4x as well, and interrupt delivery seems to stay
527	 * properly dead afterwards. So we'll just disable them for all
528	 * pre-gen5 chipsets.
529	 *
530	 * dp aux and gmbus irq on gen4 seems to be able to generate legacy
531	 * interrupts even when in MSI mode. This results in spurious
532	 * interrupt warnings if the legacy irq no. is shared with another
533	 * device. The kernel then disables that interrupt source and so
534	 * prevents the other device from working properly.
535	 */
536	if (GRAPHICS_VER(dev_priv) >= 5) {
537		if (pci_enable_msi(pdev) < 0)
538			drm_dbg(&dev_priv->drm, "can't enable MSI");
539	}
540
541	ret = intel_gvt_init(dev_priv);
542	if (ret)
543		goto err_msi;
544
545	intel_opregion_setup(dev_priv);
546
547	ret = i915_pcode_init(dev_priv);
548	if (ret)
549		goto err_opregion;
550
551	/*
552	 * Fill the dram structure to get the system dram info. This will be
553	 * used for memory latency calculation.
554	 */
555	intel_dram_detect(dev_priv);
556
557	intel_bw_init_hw(dev_priv);
558
559	return 0;
560
561err_opregion:
562	intel_opregion_cleanup(dev_priv);
563err_msi:
564	if (pdev->msi_enabled)
565		pci_disable_msi(pdev);
566err_mem_regions:
567	intel_memory_regions_driver_release(dev_priv);
568err_ggtt:
569	i915_ggtt_driver_release(dev_priv);
570	i915_gem_drain_freed_objects(dev_priv);
571	i915_ggtt_driver_late_release(dev_priv);
572err_perf:
573	i915_perf_fini(dev_priv);
574	return ret;
575}
576
577/**
578 * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe()
579 * @dev_priv: device private
580 */
581static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
582{
583	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
584
585	i915_perf_fini(dev_priv);
586
587	intel_opregion_cleanup(dev_priv);
588
589	if (pdev->msi_enabled)
590		pci_disable_msi(pdev);
591}
592
593/**
594 * i915_driver_register - register the driver with the rest of the system
595 * @dev_priv: device private
596 *
597 * Perform any steps necessary to make the driver available via kernel
598 * internal or userspace interfaces.
599 */
600static void i915_driver_register(struct drm_i915_private *dev_priv)
601{
602	struct intel_gt *gt;
603	unsigned int i;
604
605	i915_gem_driver_register(dev_priv);
606	i915_pmu_register(dev_priv);
607
608	intel_vgpu_register(dev_priv);
609
610	/* Reveal our presence to userspace */
611	if (drm_dev_register(&dev_priv->drm, 0)) {
612		drm_err(&dev_priv->drm,
613			"Failed to register driver for userspace access!\n");
614		return;
615	}
616
617	i915_debugfs_register(dev_priv);
618	i915_setup_sysfs(dev_priv);
619
620	/* Depends on sysfs having been initialized */
621	i915_perf_register(dev_priv);
622
623	for_each_gt(gt, dev_priv, i)
624		intel_gt_driver_register(gt);
625
626	intel_pxp_debugfs_register(dev_priv->pxp);
627
628	i915_hwmon_register(dev_priv);
629
630	intel_display_driver_register(dev_priv);
631
632	intel_power_domains_enable(dev_priv);
633	intel_runtime_pm_enable(&dev_priv->runtime_pm);
634
635	intel_register_dsm_handler();
636
637	if (i915_switcheroo_register(dev_priv))
638		drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n");
639}
640
641/**
642 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
643 * @dev_priv: device private
644 */
645static void i915_driver_unregister(struct drm_i915_private *dev_priv)
646{
647	struct intel_gt *gt;
648	unsigned int i;
649
650	i915_switcheroo_unregister(dev_priv);
651
652	intel_unregister_dsm_handler();
653
654	intel_runtime_pm_disable(&dev_priv->runtime_pm);
655	intel_power_domains_disable(dev_priv);
656
657	intel_display_driver_unregister(dev_priv);
658
659	intel_pxp_fini(dev_priv);
660
661	for_each_gt(gt, dev_priv, i)
662		intel_gt_driver_unregister(gt);
663
664	i915_hwmon_unregister(dev_priv);
665
666	i915_perf_unregister(dev_priv);
667	i915_pmu_unregister(dev_priv);
668
669	i915_teardown_sysfs(dev_priv);
670	drm_dev_unplug(&dev_priv->drm);
671
672	i915_gem_driver_unregister(dev_priv);
673}
674
675void
676i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p)
677{
678	drm_printf(p, "iommu: %s\n",
679		   str_enabled_disabled(i915_vtd_active(i915)));
680}
681
682static void i915_welcome_messages(struct drm_i915_private *dev_priv)
683{
684	if (drm_debug_enabled(DRM_UT_DRIVER)) {
685		struct drm_printer p = drm_debug_printer("i915 device info:");
686		struct intel_gt *gt;
687		unsigned int i;
688
689		drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
690			   INTEL_DEVID(dev_priv),
691			   INTEL_REVID(dev_priv),
692			   intel_platform_name(INTEL_INFO(dev_priv)->platform),
693			   intel_subplatform(RUNTIME_INFO(dev_priv),
694					     INTEL_INFO(dev_priv)->platform),
695			   GRAPHICS_VER(dev_priv));
696
697		intel_device_info_print(INTEL_INFO(dev_priv),
698					RUNTIME_INFO(dev_priv), &p);
699		intel_display_device_info_print(DISPLAY_INFO(dev_priv),
700						DISPLAY_RUNTIME_INFO(dev_priv), &p);
701		i915_print_iommu_status(dev_priv, &p);
702		for_each_gt(gt, dev_priv, i)
703			intel_gt_info_print(&gt->info, &p);
704	}
705
706	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
707		drm_info(&dev_priv->drm, "DRM_I915_DEBUG enabled\n");
708	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
709		drm_info(&dev_priv->drm, "DRM_I915_DEBUG_GEM enabled\n");
710	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
711		drm_info(&dev_priv->drm,
712			 "DRM_I915_DEBUG_RUNTIME_PM enabled\n");
713}
714
715static struct drm_i915_private *
716i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
717{
718	const struct intel_device_info *match_info =
719		(struct intel_device_info *)ent->driver_data;
720	struct drm_i915_private *i915;
721
722	i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver,
723				  struct drm_i915_private, drm);
724	if (IS_ERR(i915))
725		return i915;
726
727	pci_set_drvdata(pdev, i915);
728
729	/* Device parameters start as a copy of module parameters. */
730	i915_params_copy(&i915->params, &i915_modparams);
731
732	/* Set up device info and initial runtime info. */
733	intel_device_info_driver_create(i915, pdev->device, match_info);
734
735	return i915;
736}
737
738/**
739 * i915_driver_probe - setup chip and create an initial config
740 * @pdev: PCI device
741 * @ent: matching PCI ID entry
742 *
743 * The driver probe routine has to do several things:
744 *   - drive output discovery via intel_display_driver_probe()
745 *   - initialize the memory manager
746 *   - allocate initial config memory
747 *   - setup the DRM framebuffer with the allocated memory
748 */
749int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
750{
751	struct drm_i915_private *i915;
752	int ret;
753
754	ret = pci_enable_device(pdev);
755	if (ret) {
756		pr_err("Failed to enable graphics device: %pe\n", ERR_PTR(ret));
757		return ret;
758	}
759
760	i915 = i915_driver_create(pdev, ent);
761	if (IS_ERR(i915)) {
762		pci_disable_device(pdev);
763		return PTR_ERR(i915);
764	}
765
766	ret = i915_driver_early_probe(i915);
767	if (ret < 0)
768		goto out_pci_disable;
769
770	disable_rpm_wakeref_asserts(&i915->runtime_pm);
771
772	intel_vgpu_detect(i915);
773
774	ret = intel_gt_probe_all(i915);
775	if (ret < 0)
776		goto out_runtime_pm_put;
777
778	ret = i915_driver_mmio_probe(i915);
779	if (ret < 0)
780		goto out_runtime_pm_put;
781
782	ret = i915_driver_hw_probe(i915);
783	if (ret < 0)
784		goto out_cleanup_mmio;
785
786	ret = intel_display_driver_probe_noirq(i915);
787	if (ret < 0)
788		goto out_cleanup_hw;
789
790	ret = intel_irq_install(i915);
791	if (ret)
792		goto out_cleanup_modeset;
793
794	ret = intel_display_driver_probe_nogem(i915);
795	if (ret)
796		goto out_cleanup_irq;
797
798	ret = i915_gem_init(i915);
799	if (ret)
800		goto out_cleanup_modeset2;
801
802	intel_pxp_init(i915);
803
804	ret = intel_display_driver_probe(i915);
805	if (ret)
806		goto out_cleanup_gem;
807
808	i915_driver_register(i915);
809
810	enable_rpm_wakeref_asserts(&i915->runtime_pm);
811
812	i915_welcome_messages(i915);
813
814	i915->do_release = true;
815
816	return 0;
817
818out_cleanup_gem:
819	i915_gem_suspend(i915);
820	i915_gem_driver_remove(i915);
821	i915_gem_driver_release(i915);
822out_cleanup_modeset2:
823	/* FIXME clean up the error path */
824	intel_display_driver_remove(i915);
825	intel_irq_uninstall(i915);
826	intel_display_driver_remove_noirq(i915);
827	goto out_cleanup_modeset;
828out_cleanup_irq:
829	intel_irq_uninstall(i915);
830out_cleanup_modeset:
831	intel_display_driver_remove_nogem(i915);
832out_cleanup_hw:
833	i915_driver_hw_remove(i915);
834	intel_memory_regions_driver_release(i915);
835	i915_ggtt_driver_release(i915);
836	i915_gem_drain_freed_objects(i915);
837	i915_ggtt_driver_late_release(i915);
838out_cleanup_mmio:
839	i915_driver_mmio_release(i915);
840out_runtime_pm_put:
841	enable_rpm_wakeref_asserts(&i915->runtime_pm);
842	i915_driver_late_release(i915);
843out_pci_disable:
844	pci_disable_device(pdev);
845	i915_probe_error(i915, "Device initialization failed (%d)\n", ret);
846	return ret;
847}
848
849void i915_driver_remove(struct drm_i915_private *i915)
850{
851	intel_wakeref_t wakeref;
852
853	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
854
855	i915_driver_unregister(i915);
856
857	/* Flush any external code that still may be under the RCU lock */
858	synchronize_rcu();
859
860	i915_gem_suspend(i915);
861
862	intel_gvt_driver_remove(i915);
863
864	intel_display_driver_remove(i915);
865
866	intel_irq_uninstall(i915);
867
868	intel_display_driver_remove_noirq(i915);
869
870	i915_reset_error_state(i915);
871	i915_gem_driver_remove(i915);
872
873	intel_display_driver_remove_nogem(i915);
874
875	i915_driver_hw_remove(i915);
876
877	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
878}
879
880static void i915_driver_release(struct drm_device *dev)
881{
882	struct drm_i915_private *dev_priv = to_i915(dev);
883	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
884	intel_wakeref_t wakeref;
885
886	if (!dev_priv->do_release)
887		return;
888
889	wakeref = intel_runtime_pm_get(rpm);
890
891	i915_gem_driver_release(dev_priv);
892
893	intel_memory_regions_driver_release(dev_priv);
894	i915_ggtt_driver_release(dev_priv);
895	i915_gem_drain_freed_objects(dev_priv);
896	i915_ggtt_driver_late_release(dev_priv);
897
898	i915_driver_mmio_release(dev_priv);
899
900	intel_runtime_pm_put(rpm, wakeref);
901
902	intel_runtime_pm_driver_release(rpm);
903
904	i915_driver_late_release(dev_priv);
905}
906
907static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
908{
909	struct drm_i915_private *i915 = to_i915(dev);
910	int ret;
911
912	ret = i915_gem_open(i915, file);
913	if (ret)
914		return ret;
915
916	return 0;
917}
918
919/**
920 * i915_driver_lastclose - clean up after all DRM clients have exited
921 * @dev: DRM device
922 *
923 * Take care of cleaning up after all DRM clients have exited.  In the
924 * mode setting case, we want to restore the kernel's initial mode (just
925 * in case the last client left us in a bad state).
926 *
927 * Additionally, in the non-mode setting case, we'll tear down the GTT
928 * and DMA structures, since the kernel won't be using them, and clea
929 * up any GEM state.
930 */
931static void i915_driver_lastclose(struct drm_device *dev)
932{
933	struct drm_i915_private *i915 = to_i915(dev);
934
935	intel_fbdev_restore_mode(i915);
936
937	vga_switcheroo_process_delayed_switch();
938}
939
940static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
941{
942	struct drm_i915_file_private *file_priv = file->driver_priv;
943
944	i915_gem_context_close(file);
945	i915_drm_client_put(file_priv->client);
946
947	kfree_rcu(file_priv, rcu);
948
949	/* Catch up with all the deferred frees from "this" client */
950	i915_gem_flush_free_objects(to_i915(dev));
951}
952
953static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
954{
955	struct intel_encoder *encoder;
956
957	if (!HAS_DISPLAY(dev_priv))
958		return;
959
960	/*
961	 * TODO: check and remove holding the modeset locks if none of
962	 * the encoders depends on this.
963	 */
964	drm_modeset_lock_all(&dev_priv->drm);
965	for_each_intel_encoder(&dev_priv->drm, encoder)
966		if (encoder->suspend)
967			encoder->suspend(encoder);
968	drm_modeset_unlock_all(&dev_priv->drm);
969
970	for_each_intel_encoder(&dev_priv->drm, encoder)
971		if (encoder->suspend_complete)
972			encoder->suspend_complete(encoder);
973}
974
975static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
976{
977	struct intel_encoder *encoder;
978
979	if (!HAS_DISPLAY(dev_priv))
980		return;
981
982	/*
983	 * TODO: check and remove holding the modeset locks if none of
984	 * the encoders depends on this.
985	 */
986	drm_modeset_lock_all(&dev_priv->drm);
987	for_each_intel_encoder(&dev_priv->drm, encoder)
988		if (encoder->shutdown)
989			encoder->shutdown(encoder);
990	drm_modeset_unlock_all(&dev_priv->drm);
991
992	for_each_intel_encoder(&dev_priv->drm, encoder)
993		if (encoder->shutdown_complete)
994			encoder->shutdown_complete(encoder);
995}
996
997void i915_driver_shutdown(struct drm_i915_private *i915)
998{
999	disable_rpm_wakeref_asserts(&i915->runtime_pm);
1000	intel_runtime_pm_disable(&i915->runtime_pm);
1001	intel_power_domains_disable(i915);
1002
1003	if (HAS_DISPLAY(i915)) {
1004		drm_kms_helper_poll_disable(&i915->drm);
1005
1006		drm_atomic_helper_shutdown(&i915->drm);
1007	}
1008
1009	intel_dp_mst_suspend(i915);
1010
1011	intel_runtime_pm_disable_interrupts(i915);
1012	intel_hpd_cancel_work(i915);
1013
1014	intel_suspend_encoders(i915);
1015	intel_shutdown_encoders(i915);
1016
1017	intel_dmc_suspend(i915);
1018
1019	i915_gem_suspend(i915);
1020
1021	/*
1022	 * The only requirement is to reboot with display DC states disabled,
1023	 * for now leaving all display power wells in the INIT power domain
1024	 * enabled.
1025	 *
1026	 * TODO:
1027	 * - unify the pci_driver::shutdown sequence here with the
1028	 *   pci_driver.driver.pm.poweroff,poweroff_late sequence.
1029	 * - unify the driver remove and system/runtime suspend sequences with
1030	 *   the above unified shutdown/poweroff sequence.
1031	 */
1032	intel_power_domains_driver_remove(i915);
1033	enable_rpm_wakeref_asserts(&i915->runtime_pm);
1034
1035	intel_runtime_pm_driver_release(&i915->runtime_pm);
1036}
1037
1038static bool suspend_to_idle(struct drm_i915_private *dev_priv)
1039{
1040#if IS_ENABLED(CONFIG_ACPI_SLEEP)
1041	if (acpi_target_system_state() < ACPI_STATE_S3)
1042		return true;
1043#endif
1044	return false;
1045}
1046
1047static void i915_drm_complete(struct drm_device *dev)
1048{
1049	struct drm_i915_private *i915 = to_i915(dev);
1050
1051	intel_pxp_resume_complete(i915->pxp);
1052}
1053
1054static int i915_drm_prepare(struct drm_device *dev)
1055{
1056	struct drm_i915_private *i915 = to_i915(dev);
1057
1058	intel_pxp_suspend_prepare(i915->pxp);
1059
1060	/*
1061	 * NB intel_display_driver_suspend() may issue new requests after we've
1062	 * ostensibly marked the GPU as ready-to-sleep here. We need to
1063	 * split out that work and pull it forward so that after point,
1064	 * the GPU is not woken again.
1065	 */
1066	return i915_gem_backup_suspend(i915);
1067}
1068
1069static int i915_drm_suspend(struct drm_device *dev)
1070{
1071	struct drm_i915_private *dev_priv = to_i915(dev);
1072	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
1073	pci_power_t opregion_target_state;
1074
1075	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1076
1077	/* We do a lot of poking in a lot of registers, make sure they work
1078	 * properly. */
1079	intel_power_domains_disable(dev_priv);
1080	if (HAS_DISPLAY(dev_priv))
1081		drm_kms_helper_poll_disable(dev);
1082
1083	pci_save_state(pdev);
1084
1085	intel_display_driver_suspend(dev_priv);
1086
1087	intel_dp_mst_suspend(dev_priv);
1088
1089	intel_runtime_pm_disable_interrupts(dev_priv);
1090	intel_hpd_cancel_work(dev_priv);
1091
1092	intel_suspend_encoders(dev_priv);
1093
1094	/* Must be called before GGTT is suspended. */
1095	intel_dpt_suspend(dev_priv);
1096	i915_ggtt_suspend(to_gt(dev_priv)->ggtt);
1097
1098	i915_save_display(dev_priv);
1099
1100	opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
1101	intel_opregion_suspend(dev_priv, opregion_target_state);
1102
1103	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
1104
1105	dev_priv->suspend_count++;
1106
1107	intel_dmc_suspend(dev_priv);
1108
1109	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1110
1111	i915_gem_drain_freed_objects(dev_priv);
1112
1113	return 0;
1114}
1115
1116static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
1117{
1118	struct drm_i915_private *dev_priv = to_i915(dev);
1119	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
1120	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1121	struct intel_gt *gt;
1122	int ret, i;
1123	bool s2idle = !hibernation && suspend_to_idle(dev_priv);
1124
1125	disable_rpm_wakeref_asserts(rpm);
1126
1127	intel_pxp_suspend(dev_priv->pxp);
1128
1129	i915_gem_suspend_late(dev_priv);
1130
1131	for_each_gt(gt, dev_priv, i)
1132		intel_uncore_suspend(gt->uncore);
1133
1134	intel_power_domains_suspend(dev_priv, s2idle);
1135
1136	intel_display_power_suspend_late(dev_priv);
1137
1138	ret = vlv_suspend_complete(dev_priv);
1139	if (ret) {
1140		drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret);
1141		intel_power_domains_resume(dev_priv);
1142
1143		goto out;
1144	}
1145
1146	pci_disable_device(pdev);
1147	/*
1148	 * During hibernation on some platforms the BIOS may try to access
1149	 * the device even though it's already in D3 and hang the machine. So
1150	 * leave the device in D0 on those platforms and hope the BIOS will
1151	 * power down the device properly. The issue was seen on multiple old
1152	 * GENs with different BIOS vendors, so having an explicit blacklist
1153	 * is inpractical; apply the workaround on everything pre GEN6. The
1154	 * platforms where the issue was seen:
1155	 * Lenovo Thinkpad X301, X61s, X60, T60, X41
1156	 * Fujitsu FSC S7110
1157	 * Acer Aspire 1830T
1158	 */
1159	if (!(hibernation && GRAPHICS_VER(dev_priv) < 6))
1160		pci_set_power_state(pdev, PCI_D3hot);
1161
1162out:
1163	enable_rpm_wakeref_asserts(rpm);
1164	if (!dev_priv->uncore.user_forcewake_count)
1165		intel_runtime_pm_driver_release(rpm);
1166
1167	return ret;
1168}
1169
1170int i915_driver_suspend_switcheroo(struct drm_i915_private *i915,
1171				   pm_message_t state)
1172{
1173	int error;
1174
1175	if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND &&
1176			     state.event != PM_EVENT_FREEZE))
1177		return -EINVAL;
1178
1179	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1180		return 0;
1181
1182	error = i915_drm_suspend(&i915->drm);
1183	if (error)
1184		return error;
1185
1186	return i915_drm_suspend_late(&i915->drm, false);
1187}
1188
1189static int i915_drm_resume(struct drm_device *dev)
1190{
1191	struct drm_i915_private *dev_priv = to_i915(dev);
1192	struct intel_gt *gt;
1193	int ret, i;
1194
1195	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1196
1197	ret = i915_pcode_init(dev_priv);
1198	if (ret)
1199		return ret;
1200
1201	sanitize_gpu(dev_priv);
1202
1203	ret = i915_ggtt_enable_hw(dev_priv);
1204	if (ret)
1205		drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
1206
1207	i915_ggtt_resume(to_gt(dev_priv)->ggtt);
1208
1209	for_each_gt(gt, dev_priv, i)
1210		if (GRAPHICS_VER(gt->i915) >= 8)
1211			setup_private_pat(gt);
1212
1213	/* Must be called after GGTT is resumed. */
1214	intel_dpt_resume(dev_priv);
1215
1216	intel_dmc_resume(dev_priv);
1217
1218	i915_restore_display(dev_priv);
1219	intel_pps_unlock_regs_wa(dev_priv);
1220
1221	intel_init_pch_refclk(dev_priv);
1222
1223	/*
1224	 * Interrupts have to be enabled before any batches are run. If not the
1225	 * GPU will hang. i915_gem_init_hw() will initiate batches to
1226	 * update/restore the context.
1227	 *
1228	 * drm_mode_config_reset() needs AUX interrupts.
1229	 *
1230	 * Modeset enabling in intel_display_driver_init_hw() also needs working
1231	 * interrupts.
1232	 */
1233	intel_runtime_pm_enable_interrupts(dev_priv);
1234
1235	if (HAS_DISPLAY(dev_priv))
1236		drm_mode_config_reset(dev);
1237
1238	i915_gem_resume(dev_priv);
1239
1240	intel_display_driver_init_hw(dev_priv);
1241
1242	intel_clock_gating_init(dev_priv);
1243	intel_hpd_init(dev_priv);
1244
1245	/* MST sideband requires HPD interrupts enabled */
1246	intel_dp_mst_resume(dev_priv);
1247	intel_display_driver_resume(dev_priv);
1248
1249	intel_hpd_poll_disable(dev_priv);
1250	if (HAS_DISPLAY(dev_priv))
1251		drm_kms_helper_poll_enable(dev);
1252
1253	intel_opregion_resume(dev_priv);
1254
1255	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
1256
1257	intel_power_domains_enable(dev_priv);
1258
1259	intel_gvt_resume(dev_priv);
1260
1261	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1262
1263	return 0;
1264}
1265
1266static int i915_drm_resume_early(struct drm_device *dev)
1267{
1268	struct drm_i915_private *dev_priv = to_i915(dev);
1269	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
1270	struct intel_gt *gt;
1271	int ret, i;
1272
1273	/*
1274	 * We have a resume ordering issue with the snd-hda driver also
1275	 * requiring our device to be power up. Due to the lack of a
1276	 * parent/child relationship we currently solve this with an early
1277	 * resume hook.
1278	 *
1279	 * FIXME: This should be solved with a special hdmi sink device or
1280	 * similar so that power domains can be employed.
1281	 */
1282
1283	/*
1284	 * Note that we need to set the power state explicitly, since we
1285	 * powered off the device during freeze and the PCI core won't power
1286	 * it back up for us during thaw. Powering off the device during
1287	 * freeze is not a hard requirement though, and during the
1288	 * suspend/resume phases the PCI core makes sure we get here with the
1289	 * device powered on. So in case we change our freeze logic and keep
1290	 * the device powered we can also remove the following set power state
1291	 * call.
1292	 */
1293	ret = pci_set_power_state(pdev, PCI_D0);
1294	if (ret) {
1295		drm_err(&dev_priv->drm,
1296			"failed to set PCI D0 power state (%d)\n", ret);
1297		return ret;
1298	}
1299
1300	/*
1301	 * Note that pci_enable_device() first enables any parent bridge
1302	 * device and only then sets the power state for this device. The
1303	 * bridge enabling is a nop though, since bridge devices are resumed
1304	 * first. The order of enabling power and enabling the device is
1305	 * imposed by the PCI core as described above, so here we preserve the
1306	 * same order for the freeze/thaw phases.
1307	 *
1308	 * TODO: eventually we should remove pci_disable_device() /
1309	 * pci_enable_enable_device() from suspend/resume. Due to how they
1310	 * depend on the device enable refcount we can't anyway depend on them
1311	 * disabling/enabling the device.
1312	 */
1313	if (pci_enable_device(pdev))
1314		return -EIO;
1315
1316	pci_set_master(pdev);
1317
1318	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1319
1320	ret = vlv_resume_prepare(dev_priv, false);
1321	if (ret)
1322		drm_err(&dev_priv->drm,
1323			"Resume prepare failed: %d, continuing anyway\n", ret);
1324
1325	for_each_gt(gt, dev_priv, i) {
1326		intel_uncore_resume_early(gt->uncore);
1327		intel_gt_check_and_clear_faults(gt);
1328	}
1329
1330	intel_display_power_resume_early(dev_priv);
1331
1332	intel_power_domains_resume(dev_priv);
1333
1334	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1335
1336	return ret;
1337}
1338
1339int i915_driver_resume_switcheroo(struct drm_i915_private *i915)
1340{
1341	int ret;
1342
1343	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1344		return 0;
1345
1346	ret = i915_drm_resume_early(&i915->drm);
1347	if (ret)
1348		return ret;
1349
1350	return i915_drm_resume(&i915->drm);
1351}
1352
1353static int i915_pm_prepare(struct device *kdev)
1354{
1355	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1356
1357	if (!i915) {
1358		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
1359		return -ENODEV;
1360	}
1361
1362	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1363		return 0;
1364
1365	return i915_drm_prepare(&i915->drm);
1366}
1367
1368static int i915_pm_suspend(struct device *kdev)
1369{
1370	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1371
1372	if (!i915) {
1373		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
1374		return -ENODEV;
1375	}
1376
1377	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1378		return 0;
1379
1380	return i915_drm_suspend(&i915->drm);
1381}
1382
1383static int i915_pm_suspend_late(struct device *kdev)
1384{
1385	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1386
1387	/*
1388	 * We have a suspend ordering issue with the snd-hda driver also
1389	 * requiring our device to be power up. Due to the lack of a
1390	 * parent/child relationship we currently solve this with an late
1391	 * suspend hook.
1392	 *
1393	 * FIXME: This should be solved with a special hdmi sink device or
1394	 * similar so that power domains can be employed.
1395	 */
1396	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1397		return 0;
1398
1399	return i915_drm_suspend_late(&i915->drm, false);
1400}
1401
1402static int i915_pm_poweroff_late(struct device *kdev)
1403{
1404	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1405
1406	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1407		return 0;
1408
1409	return i915_drm_suspend_late(&i915->drm, true);
1410}
1411
1412static int i915_pm_resume_early(struct device *kdev)
1413{
1414	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1415
1416	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1417		return 0;
1418
1419	return i915_drm_resume_early(&i915->drm);
1420}
1421
1422static int i915_pm_resume(struct device *kdev)
1423{
1424	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1425
1426	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1427		return 0;
1428
1429	return i915_drm_resume(&i915->drm);
1430}
1431
1432static void i915_pm_complete(struct device *kdev)
1433{
1434	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1435
1436	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1437		return;
1438
1439	i915_drm_complete(&i915->drm);
1440}
1441
1442/* freeze: before creating the hibernation_image */
1443static int i915_pm_freeze(struct device *kdev)
1444{
1445	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1446	int ret;
1447
1448	if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
1449		ret = i915_drm_suspend(&i915->drm);
1450		if (ret)
1451			return ret;
1452	}
1453
1454	ret = i915_gem_freeze(i915);
1455	if (ret)
1456		return ret;
1457
1458	return 0;
1459}
1460
1461static int i915_pm_freeze_late(struct device *kdev)
1462{
1463	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1464	int ret;
1465
1466	if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
1467		ret = i915_drm_suspend_late(&i915->drm, true);
1468		if (ret)
1469			return ret;
1470	}
1471
1472	ret = i915_gem_freeze_late(i915);
1473	if (ret)
1474		return ret;
1475
1476	return 0;
1477}
1478
1479/* thaw: called after creating the hibernation image, but before turning off. */
1480static int i915_pm_thaw_early(struct device *kdev)
1481{
1482	return i915_pm_resume_early(kdev);
1483}
1484
1485static int i915_pm_thaw(struct device *kdev)
1486{
1487	return i915_pm_resume(kdev);
1488}
1489
1490/* restore: called after loading the hibernation image. */
1491static int i915_pm_restore_early(struct device *kdev)
1492{
1493	return i915_pm_resume_early(kdev);
1494}
1495
1496static int i915_pm_restore(struct device *kdev)
1497{
1498	return i915_pm_resume(kdev);
1499}
1500
1501static int intel_runtime_suspend(struct device *kdev)
1502{
1503	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
1504	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1505	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
1506	struct pci_dev *root_pdev;
1507	struct intel_gt *gt;
1508	int ret, i;
1509
1510	if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
1511		return -ENODEV;
1512
1513	drm_dbg(&dev_priv->drm, "Suspending device\n");
1514
1515	disable_rpm_wakeref_asserts(rpm);
1516
1517	/*
1518	 * We are safe here against re-faults, since the fault handler takes
1519	 * an RPM reference.
1520	 */
1521	i915_gem_runtime_suspend(dev_priv);
1522
1523	intel_pxp_runtime_suspend(dev_priv->pxp);
1524
1525	for_each_gt(gt, dev_priv, i)
1526		intel_gt_runtime_suspend(gt);
1527
1528	intel_runtime_pm_disable_interrupts(dev_priv);
1529
1530	for_each_gt(gt, dev_priv, i)
1531		intel_uncore_suspend(gt->uncore);
1532
1533	intel_display_power_suspend(dev_priv);
1534
1535	ret = vlv_suspend_complete(dev_priv);
1536	if (ret) {
1537		drm_err(&dev_priv->drm,
1538			"Runtime suspend failed, disabling it (%d)\n", ret);
1539		intel_uncore_runtime_resume(&dev_priv->uncore);
1540
1541		intel_runtime_pm_enable_interrupts(dev_priv);
1542
1543		for_each_gt(gt, dev_priv, i)
1544			intel_gt_runtime_resume(gt);
1545
1546		enable_rpm_wakeref_asserts(rpm);
1547
1548		return ret;
1549	}
1550
1551	enable_rpm_wakeref_asserts(rpm);
1552	intel_runtime_pm_driver_release(rpm);
1553
1554	if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
1555		drm_err(&dev_priv->drm,
1556			"Unclaimed access detected prior to suspending\n");
1557
1558	/*
1559	 * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
1560	 * This should be totally removed when we handle the pci states properly
1561	 * on runtime PM.
1562	 */
1563	root_pdev = pcie_find_root_port(pdev);
1564	if (root_pdev)
1565		pci_d3cold_disable(root_pdev);
1566
1567	rpm->suspended = true;
1568
1569	/*
1570	 * FIXME: We really should find a document that references the arguments
1571	 * used below!
1572	 */
1573	if (IS_BROADWELL(dev_priv)) {
1574		/*
1575		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
1576		 * being detected, and the call we do at intel_runtime_resume()
1577		 * won't be able to restore them. Since PCI_D3hot matches the
1578		 * actual specification and appears to be working, use it.
1579		 */
1580		intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
1581	} else {
1582		/*
1583		 * current versions of firmware which depend on this opregion
1584		 * notification have repurposed the D1 definition to mean
1585		 * "runtime suspended" vs. what you would normally expect (D3)
1586		 * to distinguish it from notifications that might be sent via
1587		 * the suspend path.
1588		 */
1589		intel_opregion_notify_adapter(dev_priv, PCI_D1);
1590	}
1591
1592	assert_forcewakes_inactive(&dev_priv->uncore);
1593
1594	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
1595		intel_hpd_poll_enable(dev_priv);
1596
1597	drm_dbg(&dev_priv->drm, "Device suspended\n");
1598	return 0;
1599}
1600
1601static int intel_runtime_resume(struct device *kdev)
1602{
1603	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
1604	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1605	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
1606	struct pci_dev *root_pdev;
1607	struct intel_gt *gt;
1608	int ret, i;
1609
1610	if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
1611		return -ENODEV;
1612
1613	drm_dbg(&dev_priv->drm, "Resuming device\n");
1614
1615	drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count));
1616	disable_rpm_wakeref_asserts(rpm);
1617
1618	intel_opregion_notify_adapter(dev_priv, PCI_D0);
1619	rpm->suspended = false;
1620
1621	root_pdev = pcie_find_root_port(pdev);
1622	if (root_pdev)
1623		pci_d3cold_enable(root_pdev);
1624
1625	if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
1626		drm_dbg(&dev_priv->drm,
1627			"Unclaimed access during suspend, bios?\n");
1628
1629	intel_display_power_resume(dev_priv);
1630
1631	ret = vlv_resume_prepare(dev_priv, true);
1632
1633	for_each_gt(gt, dev_priv, i)
1634		intel_uncore_runtime_resume(gt->uncore);
1635
1636	intel_runtime_pm_enable_interrupts(dev_priv);
1637
1638	/*
1639	 * No point of rolling back things in case of an error, as the best
1640	 * we can do is to hope that things will still work (and disable RPM).
1641	 */
1642	for_each_gt(gt, dev_priv, i)
1643		intel_gt_runtime_resume(gt);
1644
1645	intel_pxp_runtime_resume(dev_priv->pxp);
1646
1647	/*
1648	 * On VLV/CHV display interrupts are part of the display
1649	 * power well, so hpd is reinitialized from there. For
1650	 * everyone else do it here.
1651	 */
1652	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
1653		intel_hpd_init(dev_priv);
1654		intel_hpd_poll_disable(dev_priv);
1655	}
1656
1657	skl_watermark_ipc_update(dev_priv);
1658
1659	enable_rpm_wakeref_asserts(rpm);
1660
1661	if (ret)
1662		drm_err(&dev_priv->drm,
1663			"Runtime resume failed, disabling it (%d)\n", ret);
1664	else
1665		drm_dbg(&dev_priv->drm, "Device resumed\n");
1666
1667	return ret;
1668}
1669
1670const struct dev_pm_ops i915_pm_ops = {
1671	/*
1672	 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
1673	 * PMSG_RESUME]
1674	 */
1675	.prepare = i915_pm_prepare,
1676	.suspend = i915_pm_suspend,
1677	.suspend_late = i915_pm_suspend_late,
1678	.resume_early = i915_pm_resume_early,
1679	.resume = i915_pm_resume,
1680	.complete = i915_pm_complete,
1681
1682	/*
1683	 * S4 event handlers
1684	 * @freeze, @freeze_late    : called (1) before creating the
1685	 *                            hibernation image [PMSG_FREEZE] and
1686	 *                            (2) after rebooting, before restoring
1687	 *                            the image [PMSG_QUIESCE]
1688	 * @thaw, @thaw_early       : called (1) after creating the hibernation
1689	 *                            image, before writing it [PMSG_THAW]
1690	 *                            and (2) after failing to create or
1691	 *                            restore the image [PMSG_RECOVER]
1692	 * @poweroff, @poweroff_late: called after writing the hibernation
1693	 *                            image, before rebooting [PMSG_HIBERNATE]
1694	 * @restore, @restore_early : called after rebooting and restoring the
1695	 *                            hibernation image [PMSG_RESTORE]
1696	 */
1697	.freeze = i915_pm_freeze,
1698	.freeze_late = i915_pm_freeze_late,
1699	.thaw_early = i915_pm_thaw_early,
1700	.thaw = i915_pm_thaw,
1701	.poweroff = i915_pm_suspend,
1702	.poweroff_late = i915_pm_poweroff_late,
1703	.restore_early = i915_pm_restore_early,
1704	.restore = i915_pm_restore,
1705
1706	/* S0ix (via runtime suspend) event handlers */
1707	.runtime_suspend = intel_runtime_suspend,
1708	.runtime_resume = intel_runtime_resume,
1709};
1710
1711static const struct file_operations i915_driver_fops = {
1712	.owner = THIS_MODULE,
1713	.open = drm_open,
1714	.release = drm_release_noglobal,
1715	.unlocked_ioctl = drm_ioctl,
1716	.mmap = i915_gem_mmap,
1717	.poll = drm_poll,
1718	.read = drm_read,
1719	.compat_ioctl = i915_ioc32_compat_ioctl,
1720	.llseek = noop_llseek,
1721#ifdef CONFIG_PROC_FS
1722	.show_fdinfo = drm_show_fdinfo,
1723#endif
1724};
1725
1726static int
1727i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
1728			  struct drm_file *file)
1729{
1730	return -ENODEV;
1731}
1732
1733static const struct drm_ioctl_desc i915_ioctls[] = {
1734	DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1735	DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
1736	DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
1737	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
1738	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
1739	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
1740	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW),
1741	DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1742	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1743	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1744	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1745	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
1746	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1747	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1748	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
1749	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
1750	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1751	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1752	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, drm_invalid_op, DRM_AUTH),
1753	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW),
1754	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1755	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1756	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW),
1757	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
1758	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
1759	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW),
1760	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1761	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1762	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
1763	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE_EXT, i915_gem_create_ext_ioctl, DRM_RENDER_ALLOW),
1764	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
1765	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
1766	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
1767	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW),
1768	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
1769	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
1770	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
1771	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
1772	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
1773	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
1774	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
1775	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
1776	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
1777	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
1778	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
1779	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW),
1780	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
1781	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
1782	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
1783	DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
1784	DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
1785	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
1786	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
1787	DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
1788	DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW),
1789	DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW),
1790	DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
1791	DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
1792	DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
1793};
1794
1795/*
1796 * Interface history:
1797 *
1798 * 1.1: Original.
1799 * 1.2: Add Power Management
1800 * 1.3: Add vblank support
1801 * 1.4: Fix cmdbuffer path, add heap destroy
1802 * 1.5: Add vblank pipe configuration
1803 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
1804 *      - Support vertical blank on secondary display pipe
1805 */
1806#define DRIVER_MAJOR		1
1807#define DRIVER_MINOR		6
1808#define DRIVER_PATCHLEVEL	0
1809
1810static const struct drm_driver i915_drm_driver = {
1811	/* Don't use MTRRs here; the Xserver or userspace app should
1812	 * deal with them for Intel hardware.
1813	 */
1814	.driver_features =
1815	    DRIVER_GEM |
1816	    DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ |
1817	    DRIVER_SYNCOBJ_TIMELINE,
1818	.release = i915_driver_release,
1819	.open = i915_driver_open,
1820	.lastclose = i915_driver_lastclose,
1821	.postclose = i915_driver_postclose,
1822	.show_fdinfo = PTR_IF(IS_ENABLED(CONFIG_PROC_FS), i915_drm_client_fdinfo),
1823
1824	.gem_prime_import = i915_gem_prime_import,
1825
1826	.dumb_create = i915_gem_dumb_create,
1827	.dumb_map_offset = i915_gem_dumb_mmap_offset,
1828
1829	.ioctls = i915_ioctls,
1830	.num_ioctls = ARRAY_SIZE(i915_ioctls),
1831	.fops = &i915_driver_fops,
1832	.name = DRIVER_NAME,
1833	.desc = DRIVER_DESC,
1834	.date = DRIVER_DATE,
1835	.major = DRIVER_MAJOR,
1836	.minor = DRIVER_MINOR,
1837	.patchlevel = DRIVER_PATCHLEVEL,
1838};
1839