1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2018 Intel Corporation
5 */
6
7#include <linux/random.h>
8
9#include "gem/selftests/igt_gem_utils.h"
10#include "gem/selftests/mock_context.h"
11#include "gt/intel_gt.h"
12#include "gt/intel_gt_pm.h"
13
14#include "i915_selftest.h"
15
16#include "igt_flush_test.h"
17#include "mock_drm.h"
18
19static int switch_to_context(struct i915_gem_context *ctx)
20{
21	struct i915_gem_engines_iter it;
22	struct intel_context *ce;
23	int err = 0;
24
25	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
26		struct i915_request *rq;
27
28		rq = intel_context_create_request(ce);
29		if (IS_ERR(rq)) {
30			err = PTR_ERR(rq);
31			break;
32		}
33
34		i915_request_add(rq);
35	}
36	i915_gem_context_unlock_engines(ctx);
37
38	return err;
39}
40
41static void trash_stolen(struct drm_i915_private *i915)
42{
43	struct i915_ggtt *ggtt = &i915->ggtt;
44	const u64 slot = ggtt->error_capture.start;
45	const resource_size_t size = resource_size(&i915->dsm);
46	unsigned long page;
47	u32 prng = 0x12345678;
48
49	/* XXX: fsck. needs some more thought... */
50	if (!i915_ggtt_has_aperture(ggtt))
51		return;
52
53	for (page = 0; page < size; page += PAGE_SIZE) {
54		const dma_addr_t dma = i915->dsm.start + page;
55		u32 __iomem *s;
56		int x;
57
58		ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
59
60		s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
61		for (x = 0; x < PAGE_SIZE / sizeof(u32); x++) {
62			prng = next_pseudo_random32(prng);
63			iowrite32(prng, &s[x]);
64		}
65		io_mapping_unmap_atomic(s);
66	}
67
68	ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
69}
70
71static void simulate_hibernate(struct drm_i915_private *i915)
72{
73	intel_wakeref_t wakeref;
74
75	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
76
77	/*
78	 * As a final sting in the tail, invalidate stolen. Under a real S4,
79	 * stolen is lost and needs to be refilled on resume. However, under
80	 * CI we merely do S4-device testing (as full S4 is too unreliable
81	 * for automated testing across a cluster), so to simulate the effect
82	 * of stolen being trashed across S4, we trash it ourselves.
83	 */
84	trash_stolen(i915);
85
86	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
87}
88
89static int pm_prepare(struct drm_i915_private *i915)
90{
91	i915_gem_suspend(i915);
92
93	return 0;
94}
95
96static void pm_suspend(struct drm_i915_private *i915)
97{
98	intel_wakeref_t wakeref;
99
100	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
101		i915_ggtt_suspend(&i915->ggtt);
102		i915_gem_suspend_late(i915);
103	}
104}
105
106static void pm_hibernate(struct drm_i915_private *i915)
107{
108	intel_wakeref_t wakeref;
109
110	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
111		i915_ggtt_suspend(&i915->ggtt);
112
113		i915_gem_freeze(i915);
114		i915_gem_freeze_late(i915);
115	}
116}
117
118static void pm_resume(struct drm_i915_private *i915)
119{
120	intel_wakeref_t wakeref;
121
122	/*
123	 * Both suspend and hibernate follow the same wakeup path and assume
124	 * that runtime-pm just works.
125	 */
126	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
127		i915_ggtt_resume(&i915->ggtt);
128		i915_gem_resume(i915);
129	}
130}
131
132static int igt_gem_suspend(void *arg)
133{
134	struct drm_i915_private *i915 = arg;
135	struct i915_gem_context *ctx;
136	struct file *file;
137	int err;
138
139	file = mock_file(i915);
140	if (IS_ERR(file))
141		return PTR_ERR(file);
142
143	err = -ENOMEM;
144	ctx = live_context(i915, file);
145	if (!IS_ERR(ctx))
146		err = switch_to_context(ctx);
147	if (err)
148		goto out;
149
150	err = pm_prepare(i915);
151	if (err)
152		goto out;
153
154	pm_suspend(i915);
155
156	/* Here be dragons! Note that with S3RST any S3 may become S4! */
157	simulate_hibernate(i915);
158
159	pm_resume(i915);
160
161	err = switch_to_context(ctx);
162out:
163	fput(file);
164	return err;
165}
166
167static int igt_gem_hibernate(void *arg)
168{
169	struct drm_i915_private *i915 = arg;
170	struct i915_gem_context *ctx;
171	struct file *file;
172	int err;
173
174	file = mock_file(i915);
175	if (IS_ERR(file))
176		return PTR_ERR(file);
177
178	err = -ENOMEM;
179	ctx = live_context(i915, file);
180	if (!IS_ERR(ctx))
181		err = switch_to_context(ctx);
182	if (err)
183		goto out;
184
185	err = pm_prepare(i915);
186	if (err)
187		goto out;
188
189	pm_hibernate(i915);
190
191	/* Here be dragons! */
192	simulate_hibernate(i915);
193
194	pm_resume(i915);
195
196	err = switch_to_context(ctx);
197out:
198	fput(file);
199	return err;
200}
201
202static int igt_gem_ww_ctx(void *arg)
203{
204	struct drm_i915_private *i915 = arg;
205	struct drm_i915_gem_object *obj, *obj2;
206	struct i915_gem_ww_ctx ww;
207	int err = 0;
208
209	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
210	if (IS_ERR(obj))
211		return PTR_ERR(obj);
212
213	obj2 = i915_gem_object_create_internal(i915, PAGE_SIZE);
214	if (IS_ERR(obj2)) {
215		err = PTR_ERR(obj2);
216		goto put1;
217	}
218
219	i915_gem_ww_ctx_init(&ww, true);
220retry:
221	/* Lock the objects, twice for good measure (-EALREADY handling) */
222	err = i915_gem_object_lock(obj, &ww);
223	if (!err)
224		err = i915_gem_object_lock_interruptible(obj, &ww);
225	if (!err)
226		err = i915_gem_object_lock_interruptible(obj2, &ww);
227	if (!err)
228		err = i915_gem_object_lock(obj2, &ww);
229
230	if (err == -EDEADLK) {
231		err = i915_gem_ww_ctx_backoff(&ww);
232		if (!err)
233			goto retry;
234	}
235	i915_gem_ww_ctx_fini(&ww);
236	i915_gem_object_put(obj2);
237put1:
238	i915_gem_object_put(obj);
239	return err;
240}
241
242int i915_gem_live_selftests(struct drm_i915_private *i915)
243{
244	static const struct i915_subtest tests[] = {
245		SUBTEST(igt_gem_suspend),
246		SUBTEST(igt_gem_hibernate),
247		SUBTEST(igt_gem_ww_ctx),
248	};
249
250	if (intel_gt_is_wedged(&i915->gt))
251		return 0;
252
253	return i915_live_subtests(tests, i915);
254}
255