1/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2 */
3/*
4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 */
29
30#include <linux/acpi.h>
31#include <linux/device.h>
32#include <linux/oom.h>
33#include <linux/module.h>
34#include <linux/pci.h>
35#include <linux/pm.h>
36#include <linux/pm_runtime.h>
37#include <linux/pnp.h>
38#include <linux/slab.h>
39#include <linux/vga_switcheroo.h>
40#include <linux/vt.h>
41#include <acpi/video.h>
42
43#include <drm/drm_atomic_helper.h>
44#include <drm/drm_ioctl.h>
45#include <drm/drm_irq.h>
46#include <drm/drm_managed.h>
47#include <drm/drm_probe_helper.h>
48
49#include "display/intel_acpi.h"
50#include "display/intel_audio.h"
51#include "display/intel_bw.h"
52#include "display/intel_cdclk.h"
53#include "display/intel_csr.h"
54#include "display/intel_display_debugfs.h"
55#include "display/intel_display_types.h"
56#include "display/intel_dp.h"
57#include "display/intel_fbdev.h"
58#include "display/intel_hotplug.h"
59#include "display/intel_overlay.h"
60#include "display/intel_pipe_crc.h"
61#include "display/intel_sprite.h"
62#include "display/intel_vga.h"
63
64#include "gem/i915_gem_context.h"
65#include "gem/i915_gem_ioctls.h"
66#include "gem/i915_gem_mman.h"
67#include "gt/intel_gt.h"
68#include "gt/intel_gt_pm.h"
69#include "gt/intel_rc6.h"
70
71#include "i915_debugfs.h"
72#include "i915_drv.h"
73#include "i915_ioc32.h"
74#include "i915_irq.h"
75#include "i915_memcpy.h"
76#include "i915_perf.h"
77#include "i915_query.h"
78#include "i915_suspend.h"
79#include "i915_switcheroo.h"
80#include "i915_sysfs.h"
81#include "i915_trace.h"
82#include "i915_vgpu.h"
83#include "intel_dram.h"
84#include "intel_gvt.h"
85#include "intel_memory_region.h"
86#include "intel_pm.h"
87#include "intel_sideband.h"
88#include "vlv_suspend.h"
89
90static struct drm_driver driver;
91
92static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
93{
94	int domain = pci_domain_nr(dev_priv->drm.pdev->bus);
95
96	dev_priv->bridge_dev =
97		pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
98	if (!dev_priv->bridge_dev) {
99		drm_err(&dev_priv->drm, "bridge device not found\n");
100		return -1;
101	}
102	return 0;
103}
104
105/* Allocate space for the MCH regs if needed, return nonzero on error */
106static int
107intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
108{
109	int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
110	u32 temp_lo, temp_hi = 0;
111	u64 mchbar_addr;
112	int ret;
113
114	if (INTEL_GEN(dev_priv) >= 4)
115		pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
116	pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
117	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
118
119	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
120#ifdef CONFIG_PNP
121	if (mchbar_addr &&
122	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
123		return 0;
124#endif
125
126	/* Get some space for it */
127	dev_priv->mch_res.name = "i915 MCHBAR";
128	dev_priv->mch_res.flags = IORESOURCE_MEM;
129	ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
130				     &dev_priv->mch_res,
131				     MCHBAR_SIZE, MCHBAR_SIZE,
132				     PCIBIOS_MIN_MEM,
133				     0, pcibios_align_resource,
134				     dev_priv->bridge_dev);
135	if (ret) {
136		drm_dbg(&dev_priv->drm, "failed bus alloc: %d\n", ret);
137		dev_priv->mch_res.start = 0;
138		return ret;
139	}
140
141	if (INTEL_GEN(dev_priv) >= 4)
142		pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
143				       upper_32_bits(dev_priv->mch_res.start));
144
145	pci_write_config_dword(dev_priv->bridge_dev, reg,
146			       lower_32_bits(dev_priv->mch_res.start));
147	return 0;
148}
149
150/* Setup MCHBAR if possible, return true if we should disable it again */
151static void
152intel_setup_mchbar(struct drm_i915_private *dev_priv)
153{
154	int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
155	u32 temp;
156	bool enabled;
157
158	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
159		return;
160
161	dev_priv->mchbar_need_disable = false;
162
163	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
164		pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
165		enabled = !!(temp & DEVEN_MCHBAR_EN);
166	} else {
167		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
168		enabled = temp & 1;
169	}
170
171	/* If it's already enabled, don't have to do anything */
172	if (enabled)
173		return;
174
175	if (intel_alloc_mchbar_resource(dev_priv))
176		return;
177
178	dev_priv->mchbar_need_disable = true;
179
180	/* Space is allocated or reserved, so enable it. */
181	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
182		pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
183				       temp | DEVEN_MCHBAR_EN);
184	} else {
185		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
186		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
187	}
188}
189
190static void
191intel_teardown_mchbar(struct drm_i915_private *dev_priv)
192{
193	int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
194
195	if (dev_priv->mchbar_need_disable) {
196		if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
197			u32 deven_val;
198
199			pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
200					      &deven_val);
201			deven_val &= ~DEVEN_MCHBAR_EN;
202			pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
203					       deven_val);
204		} else {
205			u32 mchbar_val;
206
207			pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
208					      &mchbar_val);
209			mchbar_val &= ~1;
210			pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
211					       mchbar_val);
212		}
213	}
214
215	if (dev_priv->mch_res.start)
216		release_resource(&dev_priv->mch_res);
217}
218
219static int i915_workqueues_init(struct drm_i915_private *dev_priv)
220{
221	/*
222	 * The i915 workqueue is primarily used for batched retirement of
223	 * requests (and thus managing bo) once the task has been completed
224	 * by the GPU. i915_retire_requests() is called directly when we
225	 * need high-priority retirement, such as waiting for an explicit
226	 * bo.
227	 *
228	 * It is also used for periodic low-priority events, such as
229	 * idle-timers and recording error state.
230	 *
231	 * All tasks on the workqueue are expected to acquire the dev mutex
232	 * so there is no point in running more than one instance of the
233	 * workqueue at any time.  Use an ordered one.
234	 */
235	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
236	if (dev_priv->wq == NULL)
237		goto out_err;
238
239	dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
240	if (dev_priv->hotplug.dp_wq == NULL)
241		goto out_free_wq;
242
243	return 0;
244
245out_free_wq:
246	destroy_workqueue(dev_priv->wq);
247out_err:
248	drm_err(&dev_priv->drm, "Failed to allocate workqueues.\n");
249
250	return -ENOMEM;
251}
252
253static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
254{
255	destroy_workqueue(dev_priv->hotplug.dp_wq);
256	destroy_workqueue(dev_priv->wq);
257}
258
259/*
260 * We don't keep the workarounds for pre-production hardware, so we expect our
261 * driver to fail on these machines in one way or another. A little warning on
262 * dmesg may help both the user and the bug triagers.
263 *
264 * Our policy for removing pre-production workarounds is to keep the
265 * current gen workarounds as a guide to the bring-up of the next gen
266 * (workarounds have a habit of persisting!). Anything older than that
267 * should be removed along with the complications they introduce.
268 */
269static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
270{
271	bool pre = false;
272
273	pre |= IS_HSW_EARLY_SDV(dev_priv);
274	pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
275	pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
276	pre |= IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_A0);
277	pre |= IS_GLK_REVID(dev_priv, 0, GLK_REVID_A2);
278
279	if (pre) {
280		drm_err(&dev_priv->drm, "This is a pre-production stepping. "
281			  "It may not be fully functional.\n");
282		add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
283	}
284}
285
286static void sanitize_gpu(struct drm_i915_private *i915)
287{
288	if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
289		__intel_gt_reset(&i915->gt, ALL_ENGINES);
290}
291
292/**
293 * i915_driver_early_probe - setup state not requiring device access
294 * @dev_priv: device private
295 *
296 * Initialize everything that is a "SW-only" state, that is state not
297 * requiring accessing the device or exposing the driver via kernel internal
298 * or userspace interfaces. Example steps belonging here: lock initialization,
299 * system memory allocation, setting up device specific attributes and
300 * function hooks not requiring accessing the device.
301 */
302static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
303{
304	int ret = 0;
305
306	if (i915_inject_probe_failure(dev_priv))
307		return -ENODEV;
308
309	intel_device_info_subplatform_init(dev_priv);
310
311	intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
312	intel_uncore_init_early(&dev_priv->uncore, dev_priv);
313
314	spin_lock_init(&dev_priv->irq_lock);
315	spin_lock_init(&dev_priv->gpu_error.lock);
316	mutex_init(&dev_priv->backlight_lock);
317
318	mutex_init(&dev_priv->sb_lock);
319	cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
320
321	mutex_init(&dev_priv->av_mutex);
322	mutex_init(&dev_priv->wm.wm_mutex);
323	mutex_init(&dev_priv->pps_mutex);
324	mutex_init(&dev_priv->hdcp_comp_mutex);
325
326	i915_memcpy_init_early(dev_priv);
327	intel_runtime_pm_init_early(&dev_priv->runtime_pm);
328
329	ret = i915_workqueues_init(dev_priv);
330	if (ret < 0)
331		return ret;
332
333	ret = vlv_suspend_init(dev_priv);
334	if (ret < 0)
335		goto err_workqueues;
336
337	intel_wopcm_init_early(&dev_priv->wopcm);
338
339	intel_gt_init_early(&dev_priv->gt, dev_priv);
340
341	i915_gem_init_early(dev_priv);
342
343	/* This must be called before any calls to HAS_PCH_* */
344	intel_detect_pch(dev_priv);
345
346	intel_pm_setup(dev_priv);
347	ret = intel_power_domains_init(dev_priv);
348	if (ret < 0)
349		goto err_gem;
350	intel_irq_init(dev_priv);
351	intel_init_display_hooks(dev_priv);
352	intel_init_clock_gating_hooks(dev_priv);
353	intel_init_audio_hooks(dev_priv);
354
355	intel_detect_preproduction_hw(dev_priv);
356
357	return 0;
358
359err_gem:
360	i915_gem_cleanup_early(dev_priv);
361	intel_gt_driver_late_release(&dev_priv->gt);
362	vlv_suspend_cleanup(dev_priv);
363err_workqueues:
364	i915_workqueues_cleanup(dev_priv);
365	return ret;
366}
367
368/**
369 * i915_driver_late_release - cleanup the setup done in
370 *			       i915_driver_early_probe()
371 * @dev_priv: device private
372 */
373static void i915_driver_late_release(struct drm_i915_private *dev_priv)
374{
375	intel_irq_fini(dev_priv);
376	intel_power_domains_cleanup(dev_priv);
377	i915_gem_cleanup_early(dev_priv);
378	intel_gt_driver_late_release(&dev_priv->gt);
379	vlv_suspend_cleanup(dev_priv);
380	i915_workqueues_cleanup(dev_priv);
381
382	cpu_latency_qos_remove_request(&dev_priv->sb_qos);
383	mutex_destroy(&dev_priv->sb_lock);
384
385	i915_params_free(&dev_priv->params);
386}
387
388/**
389 * i915_driver_mmio_probe - setup device MMIO
390 * @dev_priv: device private
391 *
392 * Setup minimal device state necessary for MMIO accesses later in the
393 * initialization sequence. The setup here should avoid any other device-wide
394 * side effects or exposing the driver via kernel internal or user space
395 * interfaces.
396 */
397static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
398{
399	int ret;
400
401	if (i915_inject_probe_failure(dev_priv))
402		return -ENODEV;
403
404	if (i915_get_bridge_dev(dev_priv))
405		return -EIO;
406
407	ret = intel_uncore_init_mmio(&dev_priv->uncore);
408	if (ret < 0)
409		goto err_bridge;
410
411	/* Try to make sure MCHBAR is enabled before poking at it */
412	intel_setup_mchbar(dev_priv);
413
414	ret = intel_gt_init_mmio(&dev_priv->gt);
415	if (ret)
416		goto err_uncore;
417
418	/* As early as possible, scrub existing GPU state before clobbering */
419	sanitize_gpu(dev_priv);
420
421	return 0;
422
423err_uncore:
424	intel_teardown_mchbar(dev_priv);
425	intel_uncore_fini_mmio(&dev_priv->uncore);
426err_bridge:
427	pci_dev_put(dev_priv->bridge_dev);
428
429	return ret;
430}
431
432/**
433 * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe()
434 * @dev_priv: device private
435 */
436static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
437{
438	intel_teardown_mchbar(dev_priv);
439	intel_uncore_fini_mmio(&dev_priv->uncore);
440	pci_dev_put(dev_priv->bridge_dev);
441}
442
443static void intel_sanitize_options(struct drm_i915_private *dev_priv)
444{
445	intel_gvt_sanitize_options(dev_priv);
446}
447
448/**
449 * i915_set_dma_info - set all relevant PCI dma info as configured for the
450 * platform
451 * @i915: valid i915 instance
452 *
453 * Set the dma max segment size, device and coherent masks.  The dma mask set
454 * needs to occur before i915_ggtt_probe_hw.
455 *
456 * A couple of platforms have special needs.  Address them as well.
457 *
458 */
459static int i915_set_dma_info(struct drm_i915_private *i915)
460{
461	struct pci_dev *pdev = i915->drm.pdev;
462	unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size;
463	int ret;
464
465	GEM_BUG_ON(!mask_size);
466
467	/*
468	 * We don't have a max segment size, so set it to the max so sg's
469	 * debugging layer doesn't complain
470	 */
471	dma_set_max_seg_size(&pdev->dev, UINT_MAX);
472
473	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(mask_size));
474	if (ret)
475		goto mask_err;
476
477	/* overlay on gen2 is broken and can't address above 1G */
478	if (IS_GEN(i915, 2))
479		mask_size = 30;
480
481	/*
482	 * 965GM sometimes incorrectly writes to hardware status page (HWS)
483	 * using 32bit addressing, overwriting memory if HWS is located
484	 * above 4GB.
485	 *
486	 * The documentation also mentions an issue with undefined
487	 * behaviour if any general state is accessed within a page above 4GB,
488	 * which also needs to be handled carefully.
489	 */
490	if (IS_I965G(i915) || IS_I965GM(i915))
491		mask_size = 32;
492
493	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(mask_size));
494	if (ret)
495		goto mask_err;
496
497	return 0;
498
499mask_err:
500	drm_err(&i915->drm, "Can't set DMA mask/consistent mask (%d)\n", ret);
501	return ret;
502}
503
504/**
505 * i915_driver_hw_probe - setup state requiring device access
506 * @dev_priv: device private
507 *
508 * Setup state that requires accessing the device, but doesn't require
509 * exposing the driver via kernel internal or userspace interfaces.
510 */
511static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
512{
513	struct pci_dev *pdev = dev_priv->drm.pdev;
514	int ret;
515
516	if (i915_inject_probe_failure(dev_priv))
517		return -ENODEV;
518
519	intel_device_info_runtime_init(dev_priv);
520
521	if (HAS_PPGTT(dev_priv)) {
522		if (intel_vgpu_active(dev_priv) &&
523		    !intel_vgpu_has_full_ppgtt(dev_priv)) {
524			i915_report_error(dev_priv,
525					  "incompatible vGPU found, support for isolated ppGTT required\n");
526			return -ENXIO;
527		}
528	}
529
530	if (HAS_EXECLISTS(dev_priv)) {
531		/*
532		 * Older GVT emulation depends upon intercepting CSB mmio,
533		 * which we no longer use, preferring to use the HWSP cache
534		 * instead.
535		 */
536		if (intel_vgpu_active(dev_priv) &&
537		    !intel_vgpu_has_hwsp_emulation(dev_priv)) {
538			i915_report_error(dev_priv,
539					  "old vGPU host found, support for HWSP emulation required\n");
540			return -ENXIO;
541		}
542	}
543
544	intel_sanitize_options(dev_priv);
545
546	/* needs to be done before ggtt probe */
547	intel_dram_edram_detect(dev_priv);
548
549	ret = i915_set_dma_info(dev_priv);
550	if (ret)
551		return ret;
552
553	i915_perf_init(dev_priv);
554
555	ret = i915_ggtt_probe_hw(dev_priv);
556	if (ret)
557		goto err_perf;
558
559	ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb");
560	if (ret)
561		goto err_ggtt;
562
563	ret = i915_ggtt_init_hw(dev_priv);
564	if (ret)
565		goto err_ggtt;
566
567	ret = intel_memory_regions_hw_probe(dev_priv);
568	if (ret)
569		goto err_ggtt;
570
571	intel_gt_init_hw_early(&dev_priv->gt, &dev_priv->ggtt);
572
573	ret = i915_ggtt_enable_hw(dev_priv);
574	if (ret) {
575		drm_err(&dev_priv->drm, "failed to enable GGTT\n");
576		goto err_mem_regions;
577	}
578
579	pci_set_master(pdev);
580
581	intel_gt_init_workarounds(dev_priv);
582
583	/* On the 945G/GM, the chipset reports the MSI capability on the
584	 * integrated graphics even though the support isn't actually there
585	 * according to the published specs.  It doesn't appear to function
586	 * correctly in testing on 945G.
587	 * This may be a side effect of MSI having been made available for PEG
588	 * and the registers being closely associated.
589	 *
590	 * According to chipset errata, on the 965GM, MSI interrupts may
591	 * be lost or delayed, and was defeatured. MSI interrupts seem to
592	 * get lost on g4x as well, and interrupt delivery seems to stay
593	 * properly dead afterwards. So we'll just disable them for all
594	 * pre-gen5 chipsets.
595	 *
596	 * dp aux and gmbus irq on gen4 seems to be able to generate legacy
597	 * interrupts even when in MSI mode. This results in spurious
598	 * interrupt warnings if the legacy irq no. is shared with another
599	 * device. The kernel then disables that interrupt source and so
600	 * prevents the other device from working properly.
601	 */
602	if (INTEL_GEN(dev_priv) >= 5) {
603		if (pci_enable_msi(pdev) < 0)
604			drm_dbg(&dev_priv->drm, "can't enable MSI");
605	}
606
607	ret = intel_gvt_init(dev_priv);
608	if (ret)
609		goto err_msi;
610
611	intel_opregion_setup(dev_priv);
612
613	intel_pcode_init(dev_priv);
614
615	/*
616	 * Fill the dram structure to get the system raw bandwidth and
617	 * dram info. This will be used for memory latency calculation.
618	 */
619	intel_dram_detect(dev_priv);
620
621	intel_bw_init_hw(dev_priv);
622
623	return 0;
624
625err_msi:
626	if (pdev->msi_enabled)
627		pci_disable_msi(pdev);
628err_mem_regions:
629	intel_memory_regions_driver_release(dev_priv);
630err_ggtt:
631	i915_ggtt_driver_release(dev_priv);
632err_perf:
633	i915_perf_fini(dev_priv);
634	return ret;
635}
636
637/**
638 * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe()
639 * @dev_priv: device private
640 */
641static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
642{
643	struct pci_dev *pdev = dev_priv->drm.pdev;
644
645	i915_perf_fini(dev_priv);
646
647	if (pdev->msi_enabled)
648		pci_disable_msi(pdev);
649}
650
651/**
652 * i915_driver_register - register the driver with the rest of the system
653 * @dev_priv: device private
654 *
655 * Perform any steps necessary to make the driver available via kernel
656 * internal or userspace interfaces.
657 */
658static void i915_driver_register(struct drm_i915_private *dev_priv)
659{
660	struct drm_device *dev = &dev_priv->drm;
661
662	i915_gem_driver_register(dev_priv);
663	i915_pmu_register(dev_priv);
664
665	intel_vgpu_register(dev_priv);
666
667	/* Reveal our presence to userspace */
668	if (drm_dev_register(dev, 0) == 0) {
669		i915_debugfs_register(dev_priv);
670		intel_display_debugfs_register(dev_priv);
671		i915_setup_sysfs(dev_priv);
672
673		/* Depends on sysfs having been initialized */
674		i915_perf_register(dev_priv);
675	} else
676		drm_err(&dev_priv->drm,
677			"Failed to register driver for userspace access!\n");
678
679	if (HAS_DISPLAY(dev_priv)) {
680		/* Must be done after probing outputs */
681		intel_opregion_register(dev_priv);
682		acpi_video_register();
683	}
684
685	intel_gt_driver_register(&dev_priv->gt);
686
687	intel_audio_init(dev_priv);
688
689	/*
690	 * Some ports require correctly set-up hpd registers for detection to
691	 * work properly (leading to ghost connected connector status), e.g. VGA
692	 * on gm45.  Hence we can only set up the initial fbdev config after hpd
693	 * irqs are fully enabled. We do it last so that the async config
694	 * cannot run before the connectors are registered.
695	 */
696	intel_fbdev_initial_config_async(dev);
697
698	/*
699	 * We need to coordinate the hotplugs with the asynchronous fbdev
700	 * configuration, for which we use the fbdev->async_cookie.
701	 */
702	if (HAS_DISPLAY(dev_priv))
703		drm_kms_helper_poll_init(dev);
704
705	intel_power_domains_enable(dev_priv);
706	intel_runtime_pm_enable(&dev_priv->runtime_pm);
707
708	intel_register_dsm_handler();
709
710	if (i915_switcheroo_register(dev_priv))
711		drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n");
712}
713
714/**
715 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
716 * @dev_priv: device private
717 */
718static void i915_driver_unregister(struct drm_i915_private *dev_priv)
719{
720	i915_switcheroo_unregister(dev_priv);
721
722	intel_unregister_dsm_handler();
723
724	intel_runtime_pm_disable(&dev_priv->runtime_pm);
725	intel_power_domains_disable(dev_priv);
726
727	intel_fbdev_unregister(dev_priv);
728	intel_audio_deinit(dev_priv);
729
730	/*
731	 * After flushing the fbdev (incl. a late async config which will
732	 * have delayed queuing of a hotplug event), then flush the hotplug
733	 * events.
734	 */
735	drm_kms_helper_poll_fini(&dev_priv->drm);
736
737	intel_gt_driver_unregister(&dev_priv->gt);
738	acpi_video_unregister();
739	intel_opregion_unregister(dev_priv);
740
741	i915_perf_unregister(dev_priv);
742	i915_pmu_unregister(dev_priv);
743
744	i915_teardown_sysfs(dev_priv);
745	drm_dev_unplug(&dev_priv->drm);
746
747	i915_gem_driver_unregister(dev_priv);
748}
749
750static void i915_welcome_messages(struct drm_i915_private *dev_priv)
751{
752	if (drm_debug_enabled(DRM_UT_DRIVER)) {
753		struct drm_printer p = drm_debug_printer("i915 device info:");
754
755		drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
756			   INTEL_DEVID(dev_priv),
757			   INTEL_REVID(dev_priv),
758			   intel_platform_name(INTEL_INFO(dev_priv)->platform),
759			   intel_subplatform(RUNTIME_INFO(dev_priv),
760					     INTEL_INFO(dev_priv)->platform),
761			   INTEL_GEN(dev_priv));
762
763		intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
764		intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
765		intel_gt_info_print(&dev_priv->gt.info, &p);
766	}
767
768	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
769		drm_info(&dev_priv->drm, "DRM_I915_DEBUG enabled\n");
770	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
771		drm_info(&dev_priv->drm, "DRM_I915_DEBUG_GEM enabled\n");
772	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
773		drm_info(&dev_priv->drm,
774			 "DRM_I915_DEBUG_RUNTIME_PM enabled\n");
775}
776
777static struct drm_i915_private *
778i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
779{
780	const struct intel_device_info *match_info =
781		(struct intel_device_info *)ent->driver_data;
782	struct intel_device_info *device_info;
783	struct drm_i915_private *i915;
784
785	i915 = devm_drm_dev_alloc(&pdev->dev, &driver,
786				  struct drm_i915_private, drm);
787	if (IS_ERR(i915))
788		return i915;
789
790	i915->drm.pdev = pdev;
791	pci_set_drvdata(pdev, i915);
792
793	/* Device parameters start as a copy of module parameters. */
794	i915_params_copy(&i915->params, &i915_modparams);
795
796	/* Setup the write-once "constant" device info */
797	device_info = mkwrite_device_info(i915);
798	memcpy(device_info, match_info, sizeof(*device_info));
799	RUNTIME_INFO(i915)->device_id = pdev->device;
800
801	BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
802
803	return i915;
804}
805
806/**
807 * i915_driver_probe - setup chip and create an initial config
808 * @pdev: PCI device
809 * @ent: matching PCI ID entry
810 *
811 * The driver probe routine has to do several things:
812 *   - drive output discovery via intel_modeset_init()
813 *   - initialize the memory manager
814 *   - allocate initial config memory
815 *   - setup the DRM framebuffer with the allocated memory
816 */
817int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
818{
819	const struct intel_device_info *match_info =
820		(struct intel_device_info *)ent->driver_data;
821	struct drm_i915_private *i915;
822	int ret;
823
824	i915 = i915_driver_create(pdev, ent);
825	if (IS_ERR(i915))
826		return PTR_ERR(i915);
827
828	/* Disable nuclear pageflip by default on pre-ILK */
829	if (!i915->params.nuclear_pageflip && match_info->gen < 5)
830		i915->drm.driver_features &= ~DRIVER_ATOMIC;
831
832	/*
833	 * Check if we support fake LMEM -- for now we only unleash this for
834	 * the live selftests(test-and-exit).
835	 */
836#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
837	if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) {
838		if (INTEL_GEN(i915) >= 9 && i915_selftest.live < 0 &&
839		    i915->params.fake_lmem_start) {
840			mkwrite_device_info(i915)->memory_regions =
841				REGION_SMEM | REGION_LMEM | REGION_STOLEN;
842			mkwrite_device_info(i915)->is_dgfx = true;
843			GEM_BUG_ON(!HAS_LMEM(i915));
844			GEM_BUG_ON(!IS_DGFX(i915));
845		}
846	}
847#endif
848
849	ret = pci_enable_device(pdev);
850	if (ret)
851		goto out_fini;
852
853	ret = i915_driver_early_probe(i915);
854	if (ret < 0)
855		goto out_pci_disable;
856
857	disable_rpm_wakeref_asserts(&i915->runtime_pm);
858
859	intel_vgpu_detect(i915);
860
861	ret = i915_driver_mmio_probe(i915);
862	if (ret < 0)
863		goto out_runtime_pm_put;
864
865	ret = i915_driver_hw_probe(i915);
866	if (ret < 0)
867		goto out_cleanup_mmio;
868
869	ret = intel_modeset_init_noirq(i915);
870	if (ret < 0)
871		goto out_cleanup_hw;
872
873	ret = intel_irq_install(i915);
874	if (ret)
875		goto out_cleanup_modeset;
876
877	ret = intel_modeset_init_nogem(i915);
878	if (ret)
879		goto out_cleanup_irq;
880
881	ret = i915_gem_init(i915);
882	if (ret)
883		goto out_cleanup_modeset2;
884
885	ret = intel_modeset_init(i915);
886	if (ret)
887		goto out_cleanup_gem;
888
889	i915_driver_register(i915);
890
891	enable_rpm_wakeref_asserts(&i915->runtime_pm);
892
893	i915_welcome_messages(i915);
894
895	i915->do_release = true;
896
897	return 0;
898
899out_cleanup_gem:
900	i915_gem_suspend(i915);
901	i915_gem_driver_remove(i915);
902	i915_gem_driver_release(i915);
903out_cleanup_modeset2:
904	/* FIXME clean up the error path */
905	intel_modeset_driver_remove(i915);
906	intel_irq_uninstall(i915);
907	intel_modeset_driver_remove_noirq(i915);
908	goto out_cleanup_modeset;
909out_cleanup_irq:
910	intel_irq_uninstall(i915);
911out_cleanup_modeset:
912	intel_modeset_driver_remove_nogem(i915);
913out_cleanup_hw:
914	i915_driver_hw_remove(i915);
915	intel_memory_regions_driver_release(i915);
916	i915_ggtt_driver_release(i915);
917out_cleanup_mmio:
918	i915_driver_mmio_release(i915);
919out_runtime_pm_put:
920	enable_rpm_wakeref_asserts(&i915->runtime_pm);
921	i915_driver_late_release(i915);
922out_pci_disable:
923	pci_disable_device(pdev);
924out_fini:
925	i915_probe_error(i915, "Device initialization failed (%d)\n", ret);
926	return ret;
927}
928
929void i915_driver_remove(struct drm_i915_private *i915)
930{
931	disable_rpm_wakeref_asserts(&i915->runtime_pm);
932
933	i915_driver_unregister(i915);
934
935	/* Flush any external code that still may be under the RCU lock */
936	synchronize_rcu();
937
938	i915_gem_suspend(i915);
939
940	drm_atomic_helper_shutdown(&i915->drm);
941
942	intel_gvt_driver_remove(i915);
943
944	intel_modeset_driver_remove(i915);
945
946	intel_irq_uninstall(i915);
947
948	intel_modeset_driver_remove_noirq(i915);
949
950	i915_reset_error_state(i915);
951	i915_gem_driver_remove(i915);
952
953	intel_modeset_driver_remove_nogem(i915);
954
955	i915_driver_hw_remove(i915);
956
957	enable_rpm_wakeref_asserts(&i915->runtime_pm);
958}
959
960static void i915_driver_release(struct drm_device *dev)
961{
962	struct drm_i915_private *dev_priv = to_i915(dev);
963	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
964
965	if (!dev_priv->do_release)
966		return;
967
968	disable_rpm_wakeref_asserts(rpm);
969
970	i915_gem_driver_release(dev_priv);
971
972	intel_memory_regions_driver_release(dev_priv);
973	i915_ggtt_driver_release(dev_priv);
974	i915_gem_drain_freed_objects(dev_priv);
975
976	i915_driver_mmio_release(dev_priv);
977
978	enable_rpm_wakeref_asserts(rpm);
979	intel_runtime_pm_driver_release(rpm);
980
981	i915_driver_late_release(dev_priv);
982}
983
984static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
985{
986	struct drm_i915_private *i915 = to_i915(dev);
987	int ret;
988
989	ret = i915_gem_open(i915, file);
990	if (ret)
991		return ret;
992
993	return 0;
994}
995
996/**
997 * i915_driver_lastclose - clean up after all DRM clients have exited
998 * @dev: DRM device
999 *
1000 * Take care of cleaning up after all DRM clients have exited.  In the
1001 * mode setting case, we want to restore the kernel's initial mode (just
1002 * in case the last client left us in a bad state).
1003 *
1004 * Additionally, in the non-mode setting case, we'll tear down the GTT
1005 * and DMA structures, since the kernel won't be using them, and clea
1006 * up any GEM state.
1007 */
1008static void i915_driver_lastclose(struct drm_device *dev)
1009{
1010	intel_fbdev_restore_mode(dev);
1011	vga_switcheroo_process_delayed_switch();
1012}
1013
1014static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1015{
1016	struct drm_i915_file_private *file_priv = file->driver_priv;
1017
1018	i915_gem_context_close(file);
1019
1020	kfree_rcu(file_priv, rcu);
1021
1022	/* Catch up with all the deferred frees from "this" client */
1023	i915_gem_flush_free_objects(to_i915(dev));
1024}
1025
1026static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
1027{
1028	struct drm_device *dev = &dev_priv->drm;
1029	struct intel_encoder *encoder;
1030
1031	drm_modeset_lock_all(dev);
1032	for_each_intel_encoder(dev, encoder)
1033		if (encoder->suspend)
1034			encoder->suspend(encoder);
1035	drm_modeset_unlock_all(dev);
1036}
1037
1038static bool suspend_to_idle(struct drm_i915_private *dev_priv)
1039{
1040#if IS_ENABLED(CONFIG_ACPI_SLEEP)
1041	if (acpi_target_system_state() < ACPI_STATE_S3)
1042		return true;
1043#endif
1044	return false;
1045}
1046
1047static int i915_drm_prepare(struct drm_device *dev)
1048{
1049	struct drm_i915_private *i915 = to_i915(dev);
1050
1051	/*
1052	 * NB intel_display_suspend() may issue new requests after we've
1053	 * ostensibly marked the GPU as ready-to-sleep here. We need to
1054	 * split out that work and pull it forward so that after point,
1055	 * the GPU is not woken again.
1056	 */
1057	i915_gem_suspend(i915);
1058
1059	return 0;
1060}
1061
1062static int i915_drm_suspend(struct drm_device *dev)
1063{
1064	struct drm_i915_private *dev_priv = to_i915(dev);
1065	struct pci_dev *pdev = dev_priv->drm.pdev;
1066	pci_power_t opregion_target_state;
1067
1068	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1069
1070	/* We do a lot of poking in a lot of registers, make sure they work
1071	 * properly. */
1072	intel_power_domains_disable(dev_priv);
1073
1074	drm_kms_helper_poll_disable(dev);
1075
1076	pci_save_state(pdev);
1077
1078	intel_display_suspend(dev);
1079
1080	intel_dp_mst_suspend(dev_priv);
1081
1082	intel_runtime_pm_disable_interrupts(dev_priv);
1083	intel_hpd_cancel_work(dev_priv);
1084
1085	intel_suspend_encoders(dev_priv);
1086
1087	intel_suspend_hw(dev_priv);
1088
1089	i915_ggtt_suspend(&dev_priv->ggtt);
1090
1091	i915_save_state(dev_priv);
1092
1093	opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
1094	intel_opregion_suspend(dev_priv, opregion_target_state);
1095
1096	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
1097
1098	dev_priv->suspend_count++;
1099
1100	intel_csr_ucode_suspend(dev_priv);
1101
1102	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1103
1104	return 0;
1105}
1106
1107static enum i915_drm_suspend_mode
1108get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
1109{
1110	if (hibernate)
1111		return I915_DRM_SUSPEND_HIBERNATE;
1112
1113	if (suspend_to_idle(dev_priv))
1114		return I915_DRM_SUSPEND_IDLE;
1115
1116	return I915_DRM_SUSPEND_MEM;
1117}
1118
1119static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
1120{
1121	struct drm_i915_private *dev_priv = to_i915(dev);
1122	struct pci_dev *pdev = dev_priv->drm.pdev;
1123	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1124	int ret;
1125
1126	disable_rpm_wakeref_asserts(rpm);
1127
1128	i915_gem_suspend_late(dev_priv);
1129
1130	intel_uncore_suspend(&dev_priv->uncore);
1131
1132	intel_power_domains_suspend(dev_priv,
1133				    get_suspend_mode(dev_priv, hibernation));
1134
1135	intel_display_power_suspend_late(dev_priv);
1136
1137	ret = vlv_suspend_complete(dev_priv);
1138	if (ret) {
1139		drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret);
1140		intel_power_domains_resume(dev_priv);
1141
1142		goto out;
1143	}
1144
1145	pci_disable_device(pdev);
1146	/*
1147	 * During hibernation on some platforms the BIOS may try to access
1148	 * the device even though it's already in D3 and hang the machine. So
1149	 * leave the device in D0 on those platforms and hope the BIOS will
1150	 * power down the device properly. The issue was seen on multiple old
1151	 * GENs with different BIOS vendors, so having an explicit blacklist
1152	 * is inpractical; apply the workaround on everything pre GEN6. The
1153	 * platforms where the issue was seen:
1154	 * Lenovo Thinkpad X301, X61s, X60, T60, X41
1155	 * Fujitsu FSC S7110
1156	 * Acer Aspire 1830T
1157	 */
1158	if (!(hibernation && INTEL_GEN(dev_priv) < 6))
1159		pci_set_power_state(pdev, PCI_D3hot);
1160
1161out:
1162	enable_rpm_wakeref_asserts(rpm);
1163	if (!dev_priv->uncore.user_forcewake_count)
1164		intel_runtime_pm_driver_release(rpm);
1165
1166	return ret;
1167}
1168
1169int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
1170{
1171	int error;
1172
1173	if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND &&
1174			     state.event != PM_EVENT_FREEZE))
1175		return -EINVAL;
1176
1177	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1178		return 0;
1179
1180	error = i915_drm_suspend(&i915->drm);
1181	if (error)
1182		return error;
1183
1184	return i915_drm_suspend_late(&i915->drm, false);
1185}
1186
1187static int i915_drm_resume(struct drm_device *dev)
1188{
1189	struct drm_i915_private *dev_priv = to_i915(dev);
1190	int ret;
1191
1192	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1193
1194	sanitize_gpu(dev_priv);
1195
1196	ret = i915_ggtt_enable_hw(dev_priv);
1197	if (ret)
1198		drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
1199
1200	i915_ggtt_resume(&dev_priv->ggtt);
1201
1202	intel_csr_ucode_resume(dev_priv);
1203
1204	i915_restore_state(dev_priv);
1205	intel_pps_unlock_regs_wa(dev_priv);
1206
1207	intel_init_pch_refclk(dev_priv);
1208
1209	/*
1210	 * Interrupts have to be enabled before any batches are run. If not the
1211	 * GPU will hang. i915_gem_init_hw() will initiate batches to
1212	 * update/restore the context.
1213	 *
1214	 * drm_mode_config_reset() needs AUX interrupts.
1215	 *
1216	 * Modeset enabling in intel_modeset_init_hw() also needs working
1217	 * interrupts.
1218	 */
1219	intel_runtime_pm_enable_interrupts(dev_priv);
1220
1221	drm_mode_config_reset(dev);
1222
1223	i915_gem_resume(dev_priv);
1224
1225	intel_modeset_init_hw(dev_priv);
1226	intel_init_clock_gating(dev_priv);
1227
1228	spin_lock_irq(&dev_priv->irq_lock);
1229	if (dev_priv->display.hpd_irq_setup)
1230		dev_priv->display.hpd_irq_setup(dev_priv);
1231	spin_unlock_irq(&dev_priv->irq_lock);
1232
1233	intel_dp_mst_resume(dev_priv);
1234
1235	intel_display_resume(dev);
1236
1237	drm_kms_helper_poll_enable(dev);
1238
1239	/*
1240	 * ... but also need to make sure that hotplug processing
1241	 * doesn't cause havoc. Like in the driver load code we don't
1242	 * bother with the tiny race here where we might lose hotplug
1243	 * notifications.
1244	 * */
1245	intel_hpd_init(dev_priv);
1246
1247	intel_opregion_resume(dev_priv);
1248
1249	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
1250
1251	intel_power_domains_enable(dev_priv);
1252
1253	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1254
1255	return 0;
1256}
1257
1258static int i915_drm_resume_early(struct drm_device *dev)
1259{
1260	struct drm_i915_private *dev_priv = to_i915(dev);
1261	struct pci_dev *pdev = dev_priv->drm.pdev;
1262	int ret;
1263
1264	/*
1265	 * We have a resume ordering issue with the snd-hda driver also
1266	 * requiring our device to be power up. Due to the lack of a
1267	 * parent/child relationship we currently solve this with an early
1268	 * resume hook.
1269	 *
1270	 * FIXME: This should be solved with a special hdmi sink device or
1271	 * similar so that power domains can be employed.
1272	 */
1273
1274	/*
1275	 * Note that we need to set the power state explicitly, since we
1276	 * powered off the device during freeze and the PCI core won't power
1277	 * it back up for us during thaw. Powering off the device during
1278	 * freeze is not a hard requirement though, and during the
1279	 * suspend/resume phases the PCI core makes sure we get here with the
1280	 * device powered on. So in case we change our freeze logic and keep
1281	 * the device powered we can also remove the following set power state
1282	 * call.
1283	 */
1284	ret = pci_set_power_state(pdev, PCI_D0);
1285	if (ret) {
1286		drm_err(&dev_priv->drm,
1287			"failed to set PCI D0 power state (%d)\n", ret);
1288		return ret;
1289	}
1290
1291	/*
1292	 * Note that pci_enable_device() first enables any parent bridge
1293	 * device and only then sets the power state for this device. The
1294	 * bridge enabling is a nop though, since bridge devices are resumed
1295	 * first. The order of enabling power and enabling the device is
1296	 * imposed by the PCI core as described above, so here we preserve the
1297	 * same order for the freeze/thaw phases.
1298	 *
1299	 * TODO: eventually we should remove pci_disable_device() /
1300	 * pci_enable_enable_device() from suspend/resume. Due to how they
1301	 * depend on the device enable refcount we can't anyway depend on them
1302	 * disabling/enabling the device.
1303	 */
1304	if (pci_enable_device(pdev))
1305		return -EIO;
1306
1307	pci_set_master(pdev);
1308
1309	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1310
1311	ret = vlv_resume_prepare(dev_priv, false);
1312	if (ret)
1313		drm_err(&dev_priv->drm,
1314			"Resume prepare failed: %d, continuing anyway\n", ret);
1315
1316	intel_uncore_resume_early(&dev_priv->uncore);
1317
1318	intel_gt_check_and_clear_faults(&dev_priv->gt);
1319
1320	intel_display_power_resume_early(dev_priv);
1321
1322	intel_power_domains_resume(dev_priv);
1323
1324	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1325
1326	return ret;
1327}
1328
1329int i915_resume_switcheroo(struct drm_i915_private *i915)
1330{
1331	int ret;
1332
1333	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1334		return 0;
1335
1336	ret = i915_drm_resume_early(&i915->drm);
1337	if (ret)
1338		return ret;
1339
1340	return i915_drm_resume(&i915->drm);
1341}
1342
1343static int i915_pm_prepare(struct device *kdev)
1344{
1345	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1346
1347	if (!i915) {
1348		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
1349		return -ENODEV;
1350	}
1351
1352	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1353		return 0;
1354
1355	return i915_drm_prepare(&i915->drm);
1356}
1357
1358static int i915_pm_suspend(struct device *kdev)
1359{
1360	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1361
1362	if (!i915) {
1363		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
1364		return -ENODEV;
1365	}
1366
1367	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1368		return 0;
1369
1370	return i915_drm_suspend(&i915->drm);
1371}
1372
1373static int i915_pm_suspend_late(struct device *kdev)
1374{
1375	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1376
1377	/*
1378	 * We have a suspend ordering issue with the snd-hda driver also
1379	 * requiring our device to be power up. Due to the lack of a
1380	 * parent/child relationship we currently solve this with an late
1381	 * suspend hook.
1382	 *
1383	 * FIXME: This should be solved with a special hdmi sink device or
1384	 * similar so that power domains can be employed.
1385	 */
1386	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1387		return 0;
1388
1389	return i915_drm_suspend_late(&i915->drm, false);
1390}
1391
1392static int i915_pm_poweroff_late(struct device *kdev)
1393{
1394	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1395
1396	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1397		return 0;
1398
1399	return i915_drm_suspend_late(&i915->drm, true);
1400}
1401
1402static int i915_pm_resume_early(struct device *kdev)
1403{
1404	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1405
1406	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1407		return 0;
1408
1409	return i915_drm_resume_early(&i915->drm);
1410}
1411
1412static int i915_pm_resume(struct device *kdev)
1413{
1414	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1415
1416	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1417		return 0;
1418
1419	return i915_drm_resume(&i915->drm);
1420}
1421
1422/* freeze: before creating the hibernation_image */
1423static int i915_pm_freeze(struct device *kdev)
1424{
1425	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1426	int ret;
1427
1428	if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
1429		ret = i915_drm_suspend(&i915->drm);
1430		if (ret)
1431			return ret;
1432	}
1433
1434	ret = i915_gem_freeze(i915);
1435	if (ret)
1436		return ret;
1437
1438	return 0;
1439}
1440
1441static int i915_pm_freeze_late(struct device *kdev)
1442{
1443	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1444	int ret;
1445
1446	if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
1447		ret = i915_drm_suspend_late(&i915->drm, true);
1448		if (ret)
1449			return ret;
1450	}
1451
1452	ret = i915_gem_freeze_late(i915);
1453	if (ret)
1454		return ret;
1455
1456	return 0;
1457}
1458
1459/* thaw: called after creating the hibernation image, but before turning off. */
1460static int i915_pm_thaw_early(struct device *kdev)
1461{
1462	return i915_pm_resume_early(kdev);
1463}
1464
1465static int i915_pm_thaw(struct device *kdev)
1466{
1467	return i915_pm_resume(kdev);
1468}
1469
1470/* restore: called after loading the hibernation image. */
1471static int i915_pm_restore_early(struct device *kdev)
1472{
1473	return i915_pm_resume_early(kdev);
1474}
1475
1476static int i915_pm_restore(struct device *kdev)
1477{
1478	return i915_pm_resume(kdev);
1479}
1480
1481static int intel_runtime_suspend(struct device *kdev)
1482{
1483	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
1484	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1485	int ret;
1486
1487	if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
1488		return -ENODEV;
1489
1490	drm_dbg_kms(&dev_priv->drm, "Suspending device\n");
1491
1492	disable_rpm_wakeref_asserts(rpm);
1493
1494	/*
1495	 * We are safe here against re-faults, since the fault handler takes
1496	 * an RPM reference.
1497	 */
1498	i915_gem_runtime_suspend(dev_priv);
1499
1500	intel_gt_runtime_suspend(&dev_priv->gt);
1501
1502	intel_runtime_pm_disable_interrupts(dev_priv);
1503
1504	intel_uncore_suspend(&dev_priv->uncore);
1505
1506	intel_display_power_suspend(dev_priv);
1507
1508	ret = vlv_suspend_complete(dev_priv);
1509	if (ret) {
1510		drm_err(&dev_priv->drm,
1511			"Runtime suspend failed, disabling it (%d)\n", ret);
1512		intel_uncore_runtime_resume(&dev_priv->uncore);
1513
1514		intel_runtime_pm_enable_interrupts(dev_priv);
1515
1516		intel_gt_runtime_resume(&dev_priv->gt);
1517
1518		enable_rpm_wakeref_asserts(rpm);
1519
1520		return ret;
1521	}
1522
1523	enable_rpm_wakeref_asserts(rpm);
1524	intel_runtime_pm_driver_release(rpm);
1525
1526	if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
1527		drm_err(&dev_priv->drm,
1528			"Unclaimed access detected prior to suspending\n");
1529
1530	rpm->suspended = true;
1531
1532	/*
1533	 * FIXME: We really should find a document that references the arguments
1534	 * used below!
1535	 */
1536	if (IS_BROADWELL(dev_priv)) {
1537		/*
1538		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
1539		 * being detected, and the call we do at intel_runtime_resume()
1540		 * won't be able to restore them. Since PCI_D3hot matches the
1541		 * actual specification and appears to be working, use it.
1542		 */
1543		intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
1544	} else {
1545		/*
1546		 * current versions of firmware which depend on this opregion
1547		 * notification have repurposed the D1 definition to mean
1548		 * "runtime suspended" vs. what you would normally expect (D3)
1549		 * to distinguish it from notifications that might be sent via
1550		 * the suspend path.
1551		 */
1552		intel_opregion_notify_adapter(dev_priv, PCI_D1);
1553	}
1554
1555	assert_forcewakes_inactive(&dev_priv->uncore);
1556
1557	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
1558		intel_hpd_poll_init(dev_priv);
1559
1560	drm_dbg_kms(&dev_priv->drm, "Device suspended\n");
1561	return 0;
1562}
1563
1564static int intel_runtime_resume(struct device *kdev)
1565{
1566	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
1567	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1568	int ret;
1569
1570	if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
1571		return -ENODEV;
1572
1573	drm_dbg_kms(&dev_priv->drm, "Resuming device\n");
1574
1575	drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count));
1576	disable_rpm_wakeref_asserts(rpm);
1577
1578	intel_opregion_notify_adapter(dev_priv, PCI_D0);
1579	rpm->suspended = false;
1580	if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
1581		drm_dbg(&dev_priv->drm,
1582			"Unclaimed access during suspend, bios?\n");
1583
1584	intel_display_power_resume(dev_priv);
1585
1586	ret = vlv_resume_prepare(dev_priv, true);
1587
1588	intel_uncore_runtime_resume(&dev_priv->uncore);
1589
1590	intel_runtime_pm_enable_interrupts(dev_priv);
1591
1592	/*
1593	 * No point of rolling back things in case of an error, as the best
1594	 * we can do is to hope that things will still work (and disable RPM).
1595	 */
1596	intel_gt_runtime_resume(&dev_priv->gt);
1597
1598	/*
1599	 * On VLV/CHV display interrupts are part of the display
1600	 * power well, so hpd is reinitialized from there. For
1601	 * everyone else do it here.
1602	 */
1603	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
1604		intel_hpd_init(dev_priv);
1605
1606	intel_enable_ipc(dev_priv);
1607
1608	enable_rpm_wakeref_asserts(rpm);
1609
1610	if (ret)
1611		drm_err(&dev_priv->drm,
1612			"Runtime resume failed, disabling it (%d)\n", ret);
1613	else
1614		drm_dbg_kms(&dev_priv->drm, "Device resumed\n");
1615
1616	return ret;
1617}
1618
1619const struct dev_pm_ops i915_pm_ops = {
1620	/*
1621	 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
1622	 * PMSG_RESUME]
1623	 */
1624	.prepare = i915_pm_prepare,
1625	.suspend = i915_pm_suspend,
1626	.suspend_late = i915_pm_suspend_late,
1627	.resume_early = i915_pm_resume_early,
1628	.resume = i915_pm_resume,
1629
1630	/*
1631	 * S4 event handlers
1632	 * @freeze, @freeze_late    : called (1) before creating the
1633	 *                            hibernation image [PMSG_FREEZE] and
1634	 *                            (2) after rebooting, before restoring
1635	 *                            the image [PMSG_QUIESCE]
1636	 * @thaw, @thaw_early       : called (1) after creating the hibernation
1637	 *                            image, before writing it [PMSG_THAW]
1638	 *                            and (2) after failing to create or
1639	 *                            restore the image [PMSG_RECOVER]
1640	 * @poweroff, @poweroff_late: called after writing the hibernation
1641	 *                            image, before rebooting [PMSG_HIBERNATE]
1642	 * @restore, @restore_early : called after rebooting and restoring the
1643	 *                            hibernation image [PMSG_RESTORE]
1644	 */
1645	.freeze = i915_pm_freeze,
1646	.freeze_late = i915_pm_freeze_late,
1647	.thaw_early = i915_pm_thaw_early,
1648	.thaw = i915_pm_thaw,
1649	.poweroff = i915_pm_suspend,
1650	.poweroff_late = i915_pm_poweroff_late,
1651	.restore_early = i915_pm_restore_early,
1652	.restore = i915_pm_restore,
1653
1654	/* S0ix (via runtime suspend) event handlers */
1655	.runtime_suspend = intel_runtime_suspend,
1656	.runtime_resume = intel_runtime_resume,
1657};
1658
1659static const struct file_operations i915_driver_fops = {
1660	.owner = THIS_MODULE,
1661	.open = drm_open,
1662	.release = drm_release_noglobal,
1663	.unlocked_ioctl = drm_ioctl,
1664	.mmap = i915_gem_mmap,
1665	.poll = drm_poll,
1666	.read = drm_read,
1667	.compat_ioctl = i915_ioc32_compat_ioctl,
1668	.llseek = noop_llseek,
1669};
1670
1671static int
1672i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
1673			  struct drm_file *file)
1674{
1675	return -ENODEV;
1676}
1677
1678static const struct drm_ioctl_desc i915_ioctls[] = {
1679	DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1680	DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
1681	DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
1682	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
1683	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
1684	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
1685	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW),
1686	DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1687	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1688	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1689	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1690	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
1691	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1692	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1693	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
1694	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
1695	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1696	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1697	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH),
1698	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW),
1699	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1700	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1701	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW),
1702	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
1703	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
1704	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW),
1705	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1706	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1707	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
1708	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
1709	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
1710	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
1711	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW),
1712	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
1713	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
1714	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
1715	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
1716	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
1717	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
1718	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
1719	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
1720	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
1721	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
1722	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
1723	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW),
1724	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
1725	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
1726	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
1727	DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
1728	DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
1729	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
1730	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
1731	DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
1732	DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW),
1733	DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW),
1734	DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
1735	DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
1736	DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
1737};
1738
1739static struct drm_driver driver = {
1740	/* Don't use MTRRs here; the Xserver or userspace app should
1741	 * deal with them for Intel hardware.
1742	 */
1743	.driver_features =
1744	    DRIVER_GEM |
1745	    DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ |
1746	    DRIVER_SYNCOBJ_TIMELINE,
1747	.release = i915_driver_release,
1748	.open = i915_driver_open,
1749	.lastclose = i915_driver_lastclose,
1750	.postclose = i915_driver_postclose,
1751
1752	.gem_close_object = i915_gem_close_object,
1753	.gem_free_object_unlocked = i915_gem_free_object,
1754
1755	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1756	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1757	.gem_prime_export = i915_gem_prime_export,
1758	.gem_prime_import = i915_gem_prime_import,
1759
1760	.dumb_create = i915_gem_dumb_create,
1761	.dumb_map_offset = i915_gem_dumb_mmap_offset,
1762
1763	.ioctls = i915_ioctls,
1764	.num_ioctls = ARRAY_SIZE(i915_ioctls),
1765	.fops = &i915_driver_fops,
1766	.name = DRIVER_NAME,
1767	.desc = DRIVER_DESC,
1768	.date = DRIVER_DATE,
1769	.major = DRIVER_MAJOR,
1770	.minor = DRIVER_MINOR,
1771	.patchlevel = DRIVER_PATCHLEVEL,
1772};
1773