1/*
2 * Copyright (C) 2014 Red Hat
3 * Copyright (C) 2014 Intel Corp.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robdclark@gmail.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 */
27
28#include <linux/dma-fence.h>
29#include <linux/ktime.h>
30
31#include <drm/drm_atomic.h>
32#include <drm/drm_atomic_helper.h>
33#include <drm/drm_atomic_uapi.h>
34#include <drm/drm_bridge.h>
35#include <drm/drm_damage_helper.h>
36#include <drm/drm_device.h>
37#include <drm/drm_drv.h>
38#include <drm/drm_plane_helper.h>
39#include <drm/drm_print.h>
40#include <drm/drm_self_refresh_helper.h>
41#include <drm/drm_vblank.h>
42#include <drm/drm_writeback.h>
43
44#include "drm_crtc_helper_internal.h"
45#include "drm_crtc_internal.h"
46
47/**
48 * DOC: overview
49 *
50 * This helper library provides implementations of check and commit functions on
51 * top of the CRTC modeset helper callbacks and the plane helper callbacks. It
52 * also provides convenience implementations for the atomic state handling
53 * callbacks for drivers which don't need to subclass the drm core structures to
54 * add their own additional internal state.
55 *
56 * This library also provides default implementations for the check callback in
57 * drm_atomic_helper_check() and for the commit callback with
58 * drm_atomic_helper_commit(). But the individual stages and callbacks are
59 * exposed to allow drivers to mix and match and e.g. use the plane helpers only
60 * together with a driver private modeset implementation.
61 *
62 * This library also provides implementations for all the legacy driver
63 * interfaces on top of the atomic interface. See drm_atomic_helper_set_config(),
64 * drm_atomic_helper_disable_plane(), drm_atomic_helper_disable_plane() and the
65 * various functions to implement set_property callbacks. New drivers must not
66 * implement these functions themselves but must use the provided helpers.
67 *
68 * The atomic helper uses the same function table structures as all other
69 * modesetting helpers. See the documentation for &struct drm_crtc_helper_funcs,
70 * struct &drm_encoder_helper_funcs and &struct drm_connector_helper_funcs. It
71 * also shares the &struct drm_plane_helper_funcs function table with the plane
72 * helpers.
73 */
74static void
75drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
76				struct drm_plane_state *old_plane_state,
77				struct drm_plane_state *plane_state,
78				struct drm_plane *plane)
79{
80	struct drm_crtc_state *crtc_state;
81
82	if (old_plane_state->crtc) {
83		crtc_state = drm_atomic_get_new_crtc_state(state,
84							   old_plane_state->crtc);
85
86		if (WARN_ON(!crtc_state))
87			return;
88
89		crtc_state->planes_changed = true;
90	}
91
92	if (plane_state->crtc) {
93		crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
94
95		if (WARN_ON(!crtc_state))
96			return;
97
98		crtc_state->planes_changed = true;
99	}
100}
101
102static int handle_conflicting_encoders(struct drm_atomic_state *state,
103				       bool disable_conflicting_encoders)
104{
105	struct drm_connector_state *new_conn_state;
106	struct drm_connector *connector;
107	struct drm_connector_list_iter conn_iter;
108	struct drm_encoder *encoder;
109	unsigned encoder_mask = 0;
110	int i, ret = 0;
111
112	/*
113	 * First loop, find all newly assigned encoders from the connectors
114	 * part of the state. If the same encoder is assigned to multiple
115	 * connectors bail out.
116	 */
117	for_each_new_connector_in_state(state, connector, new_conn_state, i) {
118		const struct drm_connector_helper_funcs *funcs = connector->helper_private;
119		struct drm_encoder *new_encoder;
120
121		if (!new_conn_state->crtc)
122			continue;
123
124		if (funcs->atomic_best_encoder)
125			new_encoder = funcs->atomic_best_encoder(connector, new_conn_state);
126		else if (funcs->best_encoder)
127			new_encoder = funcs->best_encoder(connector);
128		else
129			new_encoder = drm_connector_get_single_encoder(connector);
130
131		if (new_encoder) {
132			if (encoder_mask & drm_encoder_mask(new_encoder)) {
133				DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
134					new_encoder->base.id, new_encoder->name,
135					connector->base.id, connector->name);
136
137				return -EINVAL;
138			}
139
140			encoder_mask |= drm_encoder_mask(new_encoder);
141		}
142	}
143
144	if (!encoder_mask)
145		return 0;
146
147	/*
148	 * Second loop, iterate over all connectors not part of the state.
149	 *
150	 * If a conflicting encoder is found and disable_conflicting_encoders
151	 * is not set, an error is returned. Userspace can provide a solution
152	 * through the atomic ioctl.
153	 *
154	 * If the flag is set conflicting connectors are removed from the CRTC
155	 * and the CRTC is disabled if no encoder is left. This preserves
156	 * compatibility with the legacy set_config behavior.
157	 */
158	drm_connector_list_iter_begin(state->dev, &conn_iter);
159	drm_for_each_connector_iter(connector, &conn_iter) {
160		struct drm_crtc_state *crtc_state;
161
162		if (drm_atomic_get_new_connector_state(state, connector))
163			continue;
164
165		encoder = connector->state->best_encoder;
166		if (!encoder || !(encoder_mask & drm_encoder_mask(encoder)))
167			continue;
168
169		if (!disable_conflicting_encoders) {
170			DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n",
171					 encoder->base.id, encoder->name,
172					 connector->state->crtc->base.id,
173					 connector->state->crtc->name,
174					 connector->base.id, connector->name);
175			ret = -EINVAL;
176			goto out;
177		}
178
179		new_conn_state = drm_atomic_get_connector_state(state, connector);
180		if (IS_ERR(new_conn_state)) {
181			ret = PTR_ERR(new_conn_state);
182			goto out;
183		}
184
185		DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n",
186				 encoder->base.id, encoder->name,
187				 new_conn_state->crtc->base.id, new_conn_state->crtc->name,
188				 connector->base.id, connector->name);
189
190		crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
191
192		ret = drm_atomic_set_crtc_for_connector(new_conn_state, NULL);
193		if (ret)
194			goto out;
195
196		if (!crtc_state->connector_mask) {
197			ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
198								NULL);
199			if (ret < 0)
200				goto out;
201
202			crtc_state->active = false;
203		}
204	}
205out:
206	drm_connector_list_iter_end(&conn_iter);
207
208	return ret;
209}
210
211static void
212set_best_encoder(struct drm_atomic_state *state,
213		 struct drm_connector_state *conn_state,
214		 struct drm_encoder *encoder)
215{
216	struct drm_crtc_state *crtc_state;
217	struct drm_crtc *crtc;
218
219	if (conn_state->best_encoder) {
220		/* Unset the encoder_mask in the old crtc state. */
221		crtc = conn_state->connector->state->crtc;
222
223		/* A NULL crtc is an error here because we should have
224		 * duplicated a NULL best_encoder when crtc was NULL.
225		 * As an exception restoring duplicated atomic state
226		 * during resume is allowed, so don't warn when
227		 * best_encoder is equal to encoder we intend to set.
228		 */
229		WARN_ON(!crtc && encoder != conn_state->best_encoder);
230		if (crtc) {
231			crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
232
233			crtc_state->encoder_mask &=
234				~drm_encoder_mask(conn_state->best_encoder);
235		}
236	}
237
238	if (encoder) {
239		crtc = conn_state->crtc;
240		WARN_ON(!crtc);
241		if (crtc) {
242			crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
243
244			crtc_state->encoder_mask |=
245				drm_encoder_mask(encoder);
246		}
247	}
248
249	conn_state->best_encoder = encoder;
250}
251
252static void
253steal_encoder(struct drm_atomic_state *state,
254	      struct drm_encoder *encoder)
255{
256	struct drm_crtc_state *crtc_state;
257	struct drm_connector *connector;
258	struct drm_connector_state *old_connector_state, *new_connector_state;
259	int i;
260
261	for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
262		struct drm_crtc *encoder_crtc;
263
264		if (new_connector_state->best_encoder != encoder)
265			continue;
266
267		encoder_crtc = old_connector_state->crtc;
268
269		DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
270				 encoder->base.id, encoder->name,
271				 encoder_crtc->base.id, encoder_crtc->name);
272
273		set_best_encoder(state, new_connector_state, NULL);
274
275		crtc_state = drm_atomic_get_new_crtc_state(state, encoder_crtc);
276		crtc_state->connectors_changed = true;
277
278		return;
279	}
280}
281
282static int
283update_connector_routing(struct drm_atomic_state *state,
284			 struct drm_connector *connector,
285			 struct drm_connector_state *old_connector_state,
286			 struct drm_connector_state *new_connector_state)
287{
288	const struct drm_connector_helper_funcs *funcs;
289	struct drm_encoder *new_encoder;
290	struct drm_crtc_state *crtc_state;
291
292	DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n",
293			 connector->base.id,
294			 connector->name);
295
296	if (old_connector_state->crtc != new_connector_state->crtc) {
297		if (old_connector_state->crtc) {
298			crtc_state = drm_atomic_get_new_crtc_state(state, old_connector_state->crtc);
299			crtc_state->connectors_changed = true;
300		}
301
302		if (new_connector_state->crtc) {
303			crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc);
304			crtc_state->connectors_changed = true;
305		}
306	}
307
308	if (!new_connector_state->crtc) {
309		DRM_DEBUG_ATOMIC("Disabling [CONNECTOR:%d:%s]\n",
310				connector->base.id,
311				connector->name);
312
313		set_best_encoder(state, new_connector_state, NULL);
314
315		return 0;
316	}
317
318	crtc_state = drm_atomic_get_new_crtc_state(state,
319						   new_connector_state->crtc);
320	/*
321	 * For compatibility with legacy users, we want to make sure that
322	 * we allow DPMS On->Off modesets on unregistered connectors. Modesets
323	 * which would result in anything else must be considered invalid, to
324	 * avoid turning on new displays on dead connectors.
325	 *
326	 * Since the connector can be unregistered at any point during an
327	 * atomic check or commit, this is racy. But that's OK: all we care
328	 * about is ensuring that userspace can't do anything but shut off the
329	 * display on a connector that was destroyed after it's been notified,
330	 * not before.
331	 *
332	 * Additionally, we also want to ignore connector registration when
333	 * we're trying to restore an atomic state during system resume since
334	 * there's a chance the connector may have been destroyed during the
335	 * process, but it's better to ignore that then cause
336	 * drm_atomic_helper_resume() to fail.
337	 */
338	if (!state->duplicated && drm_connector_is_unregistered(connector) &&
339	    crtc_state->active) {
340		DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] is not registered\n",
341				 connector->base.id, connector->name);
342		return -EINVAL;
343	}
344
345	funcs = connector->helper_private;
346
347	if (funcs->atomic_best_encoder)
348		new_encoder = funcs->atomic_best_encoder(connector,
349							 new_connector_state);
350	else if (funcs->best_encoder)
351		new_encoder = funcs->best_encoder(connector);
352	else
353		new_encoder = drm_connector_get_single_encoder(connector);
354
355	if (!new_encoder) {
356		DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
357				 connector->base.id,
358				 connector->name);
359		return -EINVAL;
360	}
361
362	if (!drm_encoder_crtc_ok(new_encoder, new_connector_state->crtc)) {
363		DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n",
364				 new_encoder->base.id,
365				 new_encoder->name,
366				 new_connector_state->crtc->base.id,
367				 new_connector_state->crtc->name);
368		return -EINVAL;
369	}
370
371	if (new_encoder == new_connector_state->best_encoder) {
372		set_best_encoder(state, new_connector_state, new_encoder);
373
374		DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
375				 connector->base.id,
376				 connector->name,
377				 new_encoder->base.id,
378				 new_encoder->name,
379				 new_connector_state->crtc->base.id,
380				 new_connector_state->crtc->name);
381
382		return 0;
383	}
384
385	steal_encoder(state, new_encoder);
386
387	set_best_encoder(state, new_connector_state, new_encoder);
388
389	crtc_state->connectors_changed = true;
390
391	DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
392			 connector->base.id,
393			 connector->name,
394			 new_encoder->base.id,
395			 new_encoder->name,
396			 new_connector_state->crtc->base.id,
397			 new_connector_state->crtc->name);
398
399	return 0;
400}
401
402static int
403mode_fixup(struct drm_atomic_state *state)
404{
405	struct drm_crtc *crtc;
406	struct drm_crtc_state *new_crtc_state;
407	struct drm_connector *connector;
408	struct drm_connector_state *new_conn_state;
409	int i;
410	int ret;
411
412	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
413		if (!new_crtc_state->mode_changed &&
414		    !new_crtc_state->connectors_changed)
415			continue;
416
417		drm_mode_copy(&new_crtc_state->adjusted_mode, &new_crtc_state->mode);
418	}
419
420	for_each_new_connector_in_state(state, connector, new_conn_state, i) {
421		const struct drm_encoder_helper_funcs *funcs;
422		struct drm_encoder *encoder;
423		struct drm_bridge *bridge;
424
425		WARN_ON(!!new_conn_state->best_encoder != !!new_conn_state->crtc);
426
427		if (!new_conn_state->crtc || !new_conn_state->best_encoder)
428			continue;
429
430		new_crtc_state =
431			drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
432
433		/*
434		 * Each encoder has at most one connector (since we always steal
435		 * it away), so we won't call ->mode_fixup twice.
436		 */
437		encoder = new_conn_state->best_encoder;
438		funcs = encoder->helper_private;
439
440		bridge = drm_bridge_chain_get_first_bridge(encoder);
441		ret = drm_atomic_bridge_chain_check(bridge,
442						    new_crtc_state,
443						    new_conn_state);
444		if (ret) {
445			DRM_DEBUG_ATOMIC("Bridge atomic check failed\n");
446			return ret;
447		}
448
449		if (funcs && funcs->atomic_check) {
450			ret = funcs->atomic_check(encoder, new_crtc_state,
451						  new_conn_state);
452			if (ret) {
453				DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] check failed\n",
454						 encoder->base.id, encoder->name);
455				return ret;
456			}
457		} else if (funcs && funcs->mode_fixup) {
458			ret = funcs->mode_fixup(encoder, &new_crtc_state->mode,
459						&new_crtc_state->adjusted_mode);
460			if (!ret) {
461				DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] fixup failed\n",
462						 encoder->base.id, encoder->name);
463				return -EINVAL;
464			}
465		}
466	}
467
468	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
469		const struct drm_crtc_helper_funcs *funcs;
470
471		if (!new_crtc_state->enable)
472			continue;
473
474		if (!new_crtc_state->mode_changed &&
475		    !new_crtc_state->connectors_changed)
476			continue;
477
478		funcs = crtc->helper_private;
479		if (!funcs || !funcs->mode_fixup)
480			continue;
481
482		ret = funcs->mode_fixup(crtc, &new_crtc_state->mode,
483					&new_crtc_state->adjusted_mode);
484		if (!ret) {
485			DRM_DEBUG_ATOMIC("[CRTC:%d:%s] fixup failed\n",
486					 crtc->base.id, crtc->name);
487			return -EINVAL;
488		}
489	}
490
491	return 0;
492}
493
494static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
495					    struct drm_encoder *encoder,
496					    struct drm_crtc *crtc,
497					    const struct drm_display_mode *mode)
498{
499	struct drm_bridge *bridge;
500	enum drm_mode_status ret;
501
502	ret = drm_encoder_mode_valid(encoder, mode);
503	if (ret != MODE_OK) {
504		DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] mode_valid() failed\n",
505				encoder->base.id, encoder->name);
506		return ret;
507	}
508
509	bridge = drm_bridge_chain_get_first_bridge(encoder);
510	ret = drm_bridge_chain_mode_valid(bridge, &connector->display_info,
511					  mode);
512	if (ret != MODE_OK) {
513		DRM_DEBUG_ATOMIC("[BRIDGE] mode_valid() failed\n");
514		return ret;
515	}
516
517	ret = drm_crtc_mode_valid(crtc, mode);
518	if (ret != MODE_OK) {
519		DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode_valid() failed\n",
520				crtc->base.id, crtc->name);
521		return ret;
522	}
523
524	return ret;
525}
526
527static int
528mode_valid(struct drm_atomic_state *state)
529{
530	struct drm_connector_state *conn_state;
531	struct drm_connector *connector;
532	int i;
533
534	for_each_new_connector_in_state(state, connector, conn_state, i) {
535		struct drm_encoder *encoder = conn_state->best_encoder;
536		struct drm_crtc *crtc = conn_state->crtc;
537		struct drm_crtc_state *crtc_state;
538		enum drm_mode_status mode_status;
539		const struct drm_display_mode *mode;
540
541		if (!crtc || !encoder)
542			continue;
543
544		crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
545		if (!crtc_state)
546			continue;
547		if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
548			continue;
549
550		mode = &crtc_state->mode;
551
552		mode_status = mode_valid_path(connector, encoder, crtc, mode);
553		if (mode_status != MODE_OK)
554			return -EINVAL;
555	}
556
557	return 0;
558}
559
560/**
561 * drm_atomic_helper_check_modeset - validate state object for modeset changes
562 * @dev: DRM device
563 * @state: the driver state object
564 *
565 * Check the state object to see if the requested state is physically possible.
566 * This does all the CRTC and connector related computations for an atomic
567 * update and adds any additional connectors needed for full modesets. It calls
568 * the various per-object callbacks in the follow order:
569 *
570 * 1. &drm_connector_helper_funcs.atomic_best_encoder for determining the new encoder.
571 * 2. &drm_connector_helper_funcs.atomic_check to validate the connector state.
572 * 3. If it's determined a modeset is needed then all connectors on the affected
573 *    CRTC are added and &drm_connector_helper_funcs.atomic_check is run on them.
574 * 4. &drm_encoder_helper_funcs.mode_valid, &drm_bridge_funcs.mode_valid and
575 *    &drm_crtc_helper_funcs.mode_valid are called on the affected components.
576 * 5. &drm_bridge_funcs.mode_fixup is called on all encoder bridges.
577 * 6. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state.
578 *    This function is only called when the encoder will be part of a configured CRTC,
579 *    it must not be used for implementing connector property validation.
580 *    If this function is NULL, &drm_atomic_encoder_helper_funcs.mode_fixup is called
581 *    instead.
582 * 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with CRTC constraints.
583 *
584 * &drm_crtc_state.mode_changed is set when the input mode is changed.
585 * &drm_crtc_state.connectors_changed is set when a connector is added or
586 * removed from the CRTC.  &drm_crtc_state.active_changed is set when
587 * &drm_crtc_state.active changes, which is used for DPMS.
588 * &drm_crtc_state.no_vblank is set from the result of drm_dev_has_vblank().
589 * See also: drm_atomic_crtc_needs_modeset()
590 *
591 * IMPORTANT:
592 *
593 * Drivers which set &drm_crtc_state.mode_changed (e.g. in their
594 * &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done
595 * without a full modeset) _must_ call this function afterwards after that
596 * change. It is permitted to call this function multiple times for the same
597 * update, e.g. when the &drm_crtc_helper_funcs.atomic_check functions depend
598 * upon the adjusted dotclock for fifo space allocation and watermark
599 * computation.
600 *
601 * RETURNS:
602 * Zero for success or -errno
603 */
604int
605drm_atomic_helper_check_modeset(struct drm_device *dev,
606				struct drm_atomic_state *state)
607{
608	struct drm_crtc *crtc;
609	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
610	struct drm_connector *connector;
611	struct drm_connector_state *old_connector_state, *new_connector_state;
612	int i, ret;
613	unsigned connectors_mask = 0;
614
615	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
616		bool has_connectors =
617			!!new_crtc_state->connector_mask;
618
619		WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
620
621		if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
622			DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n",
623					 crtc->base.id, crtc->name);
624			new_crtc_state->mode_changed = true;
625		}
626
627		if (old_crtc_state->enable != new_crtc_state->enable) {
628			DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enable changed\n",
629					 crtc->base.id, crtc->name);
630
631			/*
632			 * For clarity this assignment is done here, but
633			 * enable == 0 is only true when there are no
634			 * connectors and a NULL mode.
635			 *
636			 * The other way around is true as well. enable != 0
637			 * iff connectors are attached and a mode is set.
638			 */
639			new_crtc_state->mode_changed = true;
640			new_crtc_state->connectors_changed = true;
641		}
642
643		if (old_crtc_state->active != new_crtc_state->active) {
644			DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active changed\n",
645					 crtc->base.id, crtc->name);
646			new_crtc_state->active_changed = true;
647		}
648
649		if (new_crtc_state->enable != has_connectors) {
650			DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n",
651					 crtc->base.id, crtc->name);
652
653			return -EINVAL;
654		}
655
656		if (drm_dev_has_vblank(dev))
657			new_crtc_state->no_vblank = false;
658		else
659			new_crtc_state->no_vblank = true;
660	}
661
662	ret = handle_conflicting_encoders(state, false);
663	if (ret)
664		return ret;
665
666	for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
667		const struct drm_connector_helper_funcs *funcs = connector->helper_private;
668
669		WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
670
671		/*
672		 * This only sets crtc->connectors_changed for routing changes,
673		 * drivers must set crtc->connectors_changed themselves when
674		 * connector properties need to be updated.
675		 */
676		ret = update_connector_routing(state, connector,
677					       old_connector_state,
678					       new_connector_state);
679		if (ret)
680			return ret;
681		if (old_connector_state->crtc) {
682			new_crtc_state = drm_atomic_get_new_crtc_state(state,
683								       old_connector_state->crtc);
684			if (old_connector_state->link_status !=
685			    new_connector_state->link_status)
686				new_crtc_state->connectors_changed = true;
687
688			if (old_connector_state->max_requested_bpc !=
689			    new_connector_state->max_requested_bpc)
690				new_crtc_state->connectors_changed = true;
691		}
692
693		if (funcs->atomic_check)
694			ret = funcs->atomic_check(connector, state);
695		if (ret)
696			return ret;
697
698		connectors_mask |= BIT(i);
699	}
700
701	/*
702	 * After all the routing has been prepared we need to add in any
703	 * connector which is itself unchanged, but whose CRTC changes its
704	 * configuration. This must be done before calling mode_fixup in case a
705	 * crtc only changed its mode but has the same set of connectors.
706	 */
707	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
708		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
709			continue;
710
711		DRM_DEBUG_ATOMIC("[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n",
712				 crtc->base.id, crtc->name,
713				 new_crtc_state->enable ? 'y' : 'n',
714				 new_crtc_state->active ? 'y' : 'n');
715
716		ret = drm_atomic_add_affected_connectors(state, crtc);
717		if (ret != 0)
718			return ret;
719
720		ret = drm_atomic_add_affected_planes(state, crtc);
721		if (ret != 0)
722			return ret;
723	}
724
725	/*
726	 * Iterate over all connectors again, to make sure atomic_check()
727	 * has been called on them when a modeset is forced.
728	 */
729	for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
730		const struct drm_connector_helper_funcs *funcs = connector->helper_private;
731
732		if (connectors_mask & BIT(i))
733			continue;
734
735		if (funcs->atomic_check)
736			ret = funcs->atomic_check(connector, state);
737		if (ret)
738			return ret;
739	}
740
741	/*
742	 * Iterate over all connectors again, and add all affected bridges to
743	 * the state.
744	 */
745	for_each_oldnew_connector_in_state(state, connector,
746					   old_connector_state,
747					   new_connector_state, i) {
748		struct drm_encoder *encoder;
749
750		encoder = old_connector_state->best_encoder;
751		ret = drm_atomic_add_encoder_bridges(state, encoder);
752		if (ret)
753			return ret;
754
755		encoder = new_connector_state->best_encoder;
756		ret = drm_atomic_add_encoder_bridges(state, encoder);
757		if (ret)
758			return ret;
759	}
760
761	ret = mode_valid(state);
762	if (ret)
763		return ret;
764
765	return mode_fixup(state);
766}
767EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
768
769/**
770 * drm_atomic_helper_check_plane_state() - Check plane state for validity
771 * @plane_state: plane state to check
772 * @crtc_state: CRTC state to check
773 * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
774 * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
775 * @can_position: is it legal to position the plane such that it
776 *                doesn't cover the entire CRTC?  This will generally
777 *                only be false for primary planes.
778 * @can_update_disabled: can the plane be updated while the CRTC
779 *                       is disabled?
780 *
781 * Checks that a desired plane update is valid, and updates various
782 * bits of derived state (clipped coordinates etc.). Drivers that provide
783 * their own plane handling rather than helper-provided implementations may
784 * still wish to call this function to avoid duplication of error checking
785 * code.
786 *
787 * RETURNS:
788 * Zero if update appears valid, error code on failure
789 */
790int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
791					const struct drm_crtc_state *crtc_state,
792					int min_scale,
793					int max_scale,
794					bool can_position,
795					bool can_update_disabled)
796{
797	struct drm_framebuffer *fb = plane_state->fb;
798	struct drm_rect *src = &plane_state->src;
799	struct drm_rect *dst = &plane_state->dst;
800	unsigned int rotation = plane_state->rotation;
801	struct drm_rect clip = {};
802	int hscale, vscale;
803
804	WARN_ON(plane_state->crtc && plane_state->crtc != crtc_state->crtc);
805
806	*src = drm_plane_state_src(plane_state);
807	*dst = drm_plane_state_dest(plane_state);
808
809	if (!fb) {
810		plane_state->visible = false;
811		return 0;
812	}
813
814	/* crtc should only be NULL when disabling (i.e., !fb) */
815	if (WARN_ON(!plane_state->crtc)) {
816		plane_state->visible = false;
817		return 0;
818	}
819
820	if (!crtc_state->enable && !can_update_disabled) {
821		DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n");
822		return -EINVAL;
823	}
824
825	drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
826
827	/* Check scaling */
828	hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
829	vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
830	if (hscale < 0 || vscale < 0) {
831		DRM_DEBUG_KMS("Invalid scaling of plane\n");
832		drm_rect_debug_print("src: ", &plane_state->src, true);
833		drm_rect_debug_print("dst: ", &plane_state->dst, false);
834		return -ERANGE;
835	}
836
837	if (crtc_state->enable)
838		drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2);
839
840	plane_state->visible = drm_rect_clip_scaled(src, dst, &clip);
841
842	drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
843
844	if (!plane_state->visible)
845		/*
846		 * Plane isn't visible; some drivers can handle this
847		 * so we just return success here.  Drivers that can't
848		 * (including those that use the primary plane helper's
849		 * update function) will return an error from their
850		 * update_plane handler.
851		 */
852		return 0;
853
854	if (!can_position && !drm_rect_equals(dst, &clip)) {
855		DRM_DEBUG_KMS("Plane must cover entire CRTC\n");
856		drm_rect_debug_print("dst: ", dst, false);
857		drm_rect_debug_print("clip: ", &clip, false);
858		return -EINVAL;
859	}
860
861	return 0;
862}
863EXPORT_SYMBOL(drm_atomic_helper_check_plane_state);
864
865/**
866 * drm_atomic_helper_check_planes - validate state object for planes changes
867 * @dev: DRM device
868 * @state: the driver state object
869 *
870 * Check the state object to see if the requested state is physically possible.
871 * This does all the plane update related checks using by calling into the
872 * &drm_crtc_helper_funcs.atomic_check and &drm_plane_helper_funcs.atomic_check
873 * hooks provided by the driver.
874 *
875 * It also sets &drm_crtc_state.planes_changed to indicate that a CRTC has
876 * updated planes.
877 *
878 * RETURNS:
879 * Zero for success or -errno
880 */
881int
882drm_atomic_helper_check_planes(struct drm_device *dev,
883			       struct drm_atomic_state *state)
884{
885	struct drm_crtc *crtc;
886	struct drm_crtc_state *new_crtc_state;
887	struct drm_plane *plane;
888	struct drm_plane_state *new_plane_state, *old_plane_state;
889	int i, ret = 0;
890
891	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
892		const struct drm_plane_helper_funcs *funcs;
893
894		WARN_ON(!drm_modeset_is_locked(&plane->mutex));
895
896		funcs = plane->helper_private;
897
898		drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
899
900		drm_atomic_helper_check_plane_damage(state, new_plane_state);
901
902		if (!funcs || !funcs->atomic_check)
903			continue;
904
905		ret = funcs->atomic_check(plane, new_plane_state);
906		if (ret) {
907			DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
908					 plane->base.id, plane->name);
909			return ret;
910		}
911	}
912
913	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
914		const struct drm_crtc_helper_funcs *funcs;
915
916		funcs = crtc->helper_private;
917
918		if (!funcs || !funcs->atomic_check)
919			continue;
920
921		ret = funcs->atomic_check(crtc, new_crtc_state);
922		if (ret) {
923			DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
924					 crtc->base.id, crtc->name);
925			return ret;
926		}
927	}
928
929	return ret;
930}
931EXPORT_SYMBOL(drm_atomic_helper_check_planes);
932
933/**
934 * drm_atomic_helper_check - validate state object
935 * @dev: DRM device
936 * @state: the driver state object
937 *
938 * Check the state object to see if the requested state is physically possible.
939 * Only CRTCs and planes have check callbacks, so for any additional (global)
940 * checking that a driver needs it can simply wrap that around this function.
941 * Drivers without such needs can directly use this as their
942 * &drm_mode_config_funcs.atomic_check callback.
943 *
944 * This just wraps the two parts of the state checking for planes and modeset
945 * state in the default order: First it calls drm_atomic_helper_check_modeset()
946 * and then drm_atomic_helper_check_planes(). The assumption is that the
947 * @drm_plane_helper_funcs.atomic_check and @drm_crtc_helper_funcs.atomic_check
948 * functions depend upon an updated adjusted_mode.clock to e.g. properly compute
949 * watermarks.
950 *
951 * Note that zpos normalization will add all enable planes to the state which
952 * might not desired for some drivers.
953 * For example enable/disable of a cursor plane which have fixed zpos value
954 * would trigger all other enabled planes to be forced to the state change.
955 *
956 * RETURNS:
957 * Zero for success or -errno
958 */
959int drm_atomic_helper_check(struct drm_device *dev,
960			    struct drm_atomic_state *state)
961{
962	int ret;
963
964	ret = drm_atomic_helper_check_modeset(dev, state);
965	if (ret)
966		return ret;
967
968	if (dev->mode_config.normalize_zpos) {
969		ret = drm_atomic_normalize_zpos(dev, state);
970		if (ret)
971			return ret;
972	}
973
974	ret = drm_atomic_helper_check_planes(dev, state);
975	if (ret)
976		return ret;
977
978	if (state->legacy_cursor_update)
979		state->async_update = !drm_atomic_helper_async_check(dev, state);
980
981	drm_self_refresh_helper_alter_state(state);
982
983	return ret;
984}
985EXPORT_SYMBOL(drm_atomic_helper_check);
986
987static bool
988crtc_needs_disable(struct drm_crtc_state *old_state,
989		   struct drm_crtc_state *new_state)
990{
991	/*
992	 * No new_state means the CRTC is off, so the only criteria is whether
993	 * it's currently active or in self refresh mode.
994	 */
995	if (!new_state)
996		return drm_atomic_crtc_effectively_active(old_state);
997
998	/*
999	 * We need to disable bridge(s) and CRTC if we're transitioning out of
1000	 * self-refresh and changing CRTCs at the same time, because the
1001	 * bridge tracks self-refresh status via CRTC state.
1002	 */
1003	if (old_state->self_refresh_active &&
1004	    old_state->crtc != new_state->crtc)
1005		return true;
1006
1007	/*
1008	 * We also need to run through the crtc_funcs->disable() function if
1009	 * the CRTC is currently on, if it's transitioning to self refresh
1010	 * mode, or if it's in self refresh mode and needs to be fully
1011	 * disabled.
1012	 */
1013	return old_state->active ||
1014	       (old_state->self_refresh_active && !new_state->enable) ||
1015	       new_state->self_refresh_active;
1016}
1017
1018static void
1019disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
1020{
1021	struct drm_connector *connector;
1022	struct drm_connector_state *old_conn_state, *new_conn_state;
1023	struct drm_crtc *crtc;
1024	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1025	int i;
1026
1027	for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
1028		const struct drm_encoder_helper_funcs *funcs;
1029		struct drm_encoder *encoder;
1030		struct drm_bridge *bridge;
1031
1032		/* Shut down everything that's in the changeset and currently
1033		 * still on. So need to check the old, saved state. */
1034		if (!old_conn_state->crtc)
1035			continue;
1036
1037		old_crtc_state = drm_atomic_get_old_crtc_state(old_state, old_conn_state->crtc);
1038
1039		if (new_conn_state->crtc)
1040			new_crtc_state = drm_atomic_get_new_crtc_state(
1041						old_state,
1042						new_conn_state->crtc);
1043		else
1044			new_crtc_state = NULL;
1045
1046		if (!crtc_needs_disable(old_crtc_state, new_crtc_state) ||
1047		    !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
1048			continue;
1049
1050		encoder = old_conn_state->best_encoder;
1051
1052		/* We shouldn't get this far if we didn't previously have
1053		 * an encoder.. but WARN_ON() rather than explode.
1054		 */
1055		if (WARN_ON(!encoder))
1056			continue;
1057
1058		funcs = encoder->helper_private;
1059
1060		DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n",
1061				 encoder->base.id, encoder->name);
1062
1063		/*
1064		 * Each encoder has at most one connector (since we always steal
1065		 * it away), so we won't call disable hooks twice.
1066		 */
1067		bridge = drm_bridge_chain_get_first_bridge(encoder);
1068		drm_atomic_bridge_chain_disable(bridge, old_state);
1069
1070		/* Right function depends upon target state. */
1071		if (funcs) {
1072			if (funcs->atomic_disable)
1073				funcs->atomic_disable(encoder, old_state);
1074			else if (new_conn_state->crtc && funcs->prepare)
1075				funcs->prepare(encoder);
1076			else if (funcs->disable)
1077				funcs->disable(encoder);
1078			else if (funcs->dpms)
1079				funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
1080		}
1081
1082		drm_atomic_bridge_chain_post_disable(bridge, old_state);
1083	}
1084
1085	for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
1086		const struct drm_crtc_helper_funcs *funcs;
1087		int ret;
1088
1089		/* Shut down everything that needs a full modeset. */
1090		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
1091			continue;
1092
1093		if (!crtc_needs_disable(old_crtc_state, new_crtc_state))
1094			continue;
1095
1096		funcs = crtc->helper_private;
1097
1098		DRM_DEBUG_ATOMIC("disabling [CRTC:%d:%s]\n",
1099				 crtc->base.id, crtc->name);
1100
1101
1102		/* Right function depends upon target state. */
1103		if (new_crtc_state->enable && funcs->prepare)
1104			funcs->prepare(crtc);
1105		else if (funcs->atomic_disable)
1106			funcs->atomic_disable(crtc, old_crtc_state);
1107		else if (funcs->disable)
1108			funcs->disable(crtc);
1109		else if (funcs->dpms)
1110			funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
1111
1112		if (!drm_dev_has_vblank(dev))
1113			continue;
1114
1115		ret = drm_crtc_vblank_get(crtc);
1116		/*
1117		 * Self-refresh is not a true "disable"; ensure vblank remains
1118		 * enabled.
1119		 */
1120		if (new_crtc_state->self_refresh_active)
1121			WARN_ONCE(ret != 0,
1122				  "driver disabled vblank in self-refresh\n");
1123		else
1124			WARN_ONCE(ret != -EINVAL,
1125				  "driver forgot to call drm_crtc_vblank_off()\n");
1126		if (ret == 0)
1127			drm_crtc_vblank_put(crtc);
1128	}
1129}
1130
1131/**
1132 * drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state
1133 * @dev: DRM device
1134 * @old_state: atomic state object with old state structures
1135 *
1136 * This function updates all the various legacy modeset state pointers in
1137 * connectors, encoders and CRTCs.
1138 *
1139 * Drivers can use this for building their own atomic commit if they don't have
1140 * a pure helper-based modeset implementation.
1141 *
1142 * Since these updates are not synchronized with lockings, only code paths
1143 * called from &drm_mode_config_helper_funcs.atomic_commit_tail can look at the
1144 * legacy state filled out by this helper. Defacto this means this helper and
1145 * the legacy state pointers are only really useful for transitioning an
1146 * existing driver to the atomic world.
1147 */
1148void
1149drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
1150					      struct drm_atomic_state *old_state)
1151{
1152	struct drm_connector *connector;
1153	struct drm_connector_state *old_conn_state, *new_conn_state;
1154	struct drm_crtc *crtc;
1155	struct drm_crtc_state *new_crtc_state;
1156	int i;
1157
1158	/* clear out existing links and update dpms */
1159	for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
1160		if (connector->encoder) {
1161			WARN_ON(!connector->encoder->crtc);
1162
1163			connector->encoder->crtc = NULL;
1164			connector->encoder = NULL;
1165		}
1166
1167		crtc = new_conn_state->crtc;
1168		if ((!crtc && old_conn_state->crtc) ||
1169		    (crtc && drm_atomic_crtc_needs_modeset(crtc->state))) {
1170			int mode = DRM_MODE_DPMS_OFF;
1171
1172			if (crtc && crtc->state->active)
1173				mode = DRM_MODE_DPMS_ON;
1174
1175			connector->dpms = mode;
1176		}
1177	}
1178
1179	/* set new links */
1180	for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1181		if (!new_conn_state->crtc)
1182			continue;
1183
1184		if (WARN_ON(!new_conn_state->best_encoder))
1185			continue;
1186
1187		connector->encoder = new_conn_state->best_encoder;
1188		connector->encoder->crtc = new_conn_state->crtc;
1189	}
1190
1191	/* set legacy state in the crtc structure */
1192	for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
1193		struct drm_plane *primary = crtc->primary;
1194		struct drm_plane_state *new_plane_state;
1195
1196		crtc->mode = new_crtc_state->mode;
1197		crtc->enabled = new_crtc_state->enable;
1198
1199		new_plane_state =
1200			drm_atomic_get_new_plane_state(old_state, primary);
1201
1202		if (new_plane_state && new_plane_state->crtc == crtc) {
1203			crtc->x = new_plane_state->src_x >> 16;
1204			crtc->y = new_plane_state->src_y >> 16;
1205		}
1206	}
1207}
1208EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state);
1209
1210/**
1211 * drm_atomic_helper_calc_timestamping_constants - update vblank timestamping constants
1212 * @state: atomic state object
1213 *
1214 * Updates the timestamping constants used for precise vblank timestamps
1215 * by calling drm_calc_timestamping_constants() for all enabled crtcs in @state.
1216 */
1217void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state)
1218{
1219	struct drm_crtc_state *new_crtc_state;
1220	struct drm_crtc *crtc;
1221	int i;
1222
1223	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1224		if (new_crtc_state->enable)
1225			drm_calc_timestamping_constants(crtc,
1226							&new_crtc_state->adjusted_mode);
1227	}
1228}
1229EXPORT_SYMBOL(drm_atomic_helper_calc_timestamping_constants);
1230
1231static void
1232crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
1233{
1234	struct drm_crtc *crtc;
1235	struct drm_crtc_state *new_crtc_state;
1236	struct drm_connector *connector;
1237	struct drm_connector_state *new_conn_state;
1238	int i;
1239
1240	for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
1241		const struct drm_crtc_helper_funcs *funcs;
1242
1243		if (!new_crtc_state->mode_changed)
1244			continue;
1245
1246		funcs = crtc->helper_private;
1247
1248		if (new_crtc_state->enable && funcs->mode_set_nofb) {
1249			DRM_DEBUG_ATOMIC("modeset on [CRTC:%d:%s]\n",
1250					 crtc->base.id, crtc->name);
1251
1252			funcs->mode_set_nofb(crtc);
1253		}
1254	}
1255
1256	for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1257		const struct drm_encoder_helper_funcs *funcs;
1258		struct drm_encoder *encoder;
1259		struct drm_display_mode *mode, *adjusted_mode;
1260		struct drm_bridge *bridge;
1261
1262		if (!new_conn_state->best_encoder)
1263			continue;
1264
1265		encoder = new_conn_state->best_encoder;
1266		funcs = encoder->helper_private;
1267		new_crtc_state = new_conn_state->crtc->state;
1268		mode = &new_crtc_state->mode;
1269		adjusted_mode = &new_crtc_state->adjusted_mode;
1270
1271		if (!new_crtc_state->mode_changed)
1272			continue;
1273
1274		DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n",
1275				 encoder->base.id, encoder->name);
1276
1277		/*
1278		 * Each encoder has at most one connector (since we always steal
1279		 * it away), so we won't call mode_set hooks twice.
1280		 */
1281		if (funcs && funcs->atomic_mode_set) {
1282			funcs->atomic_mode_set(encoder, new_crtc_state,
1283					       new_conn_state);
1284		} else if (funcs && funcs->mode_set) {
1285			funcs->mode_set(encoder, mode, adjusted_mode);
1286		}
1287
1288		bridge = drm_bridge_chain_get_first_bridge(encoder);
1289		drm_bridge_chain_mode_set(bridge, mode, adjusted_mode);
1290	}
1291}
1292
1293/**
1294 * drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
1295 * @dev: DRM device
1296 * @old_state: atomic state object with old state structures
1297 *
1298 * This function shuts down all the outputs that need to be shut down and
1299 * prepares them (if required) with the new mode.
1300 *
1301 * For compatibility with legacy CRTC helpers this should be called before
1302 * drm_atomic_helper_commit_planes(), which is what the default commit function
1303 * does. But drivers with different needs can group the modeset commits together
1304 * and do the plane commits at the end. This is useful for drivers doing runtime
1305 * PM since planes updates then only happen when the CRTC is actually enabled.
1306 */
1307void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
1308					       struct drm_atomic_state *old_state)
1309{
1310	disable_outputs(dev, old_state);
1311
1312	drm_atomic_helper_update_legacy_modeset_state(dev, old_state);
1313	drm_atomic_helper_calc_timestamping_constants(old_state);
1314
1315	crtc_set_mode(dev, old_state);
1316}
1317EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
1318
1319static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
1320						struct drm_atomic_state *old_state)
1321{
1322	struct drm_connector *connector;
1323	struct drm_connector_state *new_conn_state;
1324	int i;
1325
1326	for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1327		const struct drm_connector_helper_funcs *funcs;
1328
1329		funcs = connector->helper_private;
1330		if (!funcs->atomic_commit)
1331			continue;
1332
1333		if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
1334			WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
1335			funcs->atomic_commit(connector, new_conn_state);
1336		}
1337	}
1338}
1339
1340/**
1341 * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
1342 * @dev: DRM device
1343 * @old_state: atomic state object with old state structures
1344 *
1345 * This function enables all the outputs with the new configuration which had to
1346 * be turned off for the update.
1347 *
1348 * For compatibility with legacy CRTC helpers this should be called after
1349 * drm_atomic_helper_commit_planes(), which is what the default commit function
1350 * does. But drivers with different needs can group the modeset commits together
1351 * and do the plane commits at the end. This is useful for drivers doing runtime
1352 * PM since planes updates then only happen when the CRTC is actually enabled.
1353 */
1354void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
1355					      struct drm_atomic_state *old_state)
1356{
1357	struct drm_crtc *crtc;
1358	struct drm_crtc_state *old_crtc_state;
1359	struct drm_crtc_state *new_crtc_state;
1360	struct drm_connector *connector;
1361	struct drm_connector_state *new_conn_state;
1362	int i;
1363
1364	for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
1365		const struct drm_crtc_helper_funcs *funcs;
1366
1367		/* Need to filter out CRTCs where only planes change. */
1368		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
1369			continue;
1370
1371		if (!new_crtc_state->active)
1372			continue;
1373
1374		funcs = crtc->helper_private;
1375
1376		if (new_crtc_state->enable) {
1377			DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
1378					 crtc->base.id, crtc->name);
1379			if (funcs->atomic_enable)
1380				funcs->atomic_enable(crtc, old_crtc_state);
1381			else if (funcs->commit)
1382				funcs->commit(crtc);
1383		}
1384	}
1385
1386	for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1387		const struct drm_encoder_helper_funcs *funcs;
1388		struct drm_encoder *encoder;
1389		struct drm_bridge *bridge;
1390
1391		if (!new_conn_state->best_encoder)
1392			continue;
1393
1394		if (!new_conn_state->crtc->state->active ||
1395		    !drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state))
1396			continue;
1397
1398		encoder = new_conn_state->best_encoder;
1399		funcs = encoder->helper_private;
1400
1401		DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n",
1402				 encoder->base.id, encoder->name);
1403
1404		/*
1405		 * Each encoder has at most one connector (since we always steal
1406		 * it away), so we won't call enable hooks twice.
1407		 */
1408		bridge = drm_bridge_chain_get_first_bridge(encoder);
1409		drm_atomic_bridge_chain_pre_enable(bridge, old_state);
1410
1411		if (funcs) {
1412			if (funcs->atomic_enable)
1413				funcs->atomic_enable(encoder, old_state);
1414			else if (funcs->enable)
1415				funcs->enable(encoder);
1416			else if (funcs->commit)
1417				funcs->commit(encoder);
1418		}
1419
1420		drm_atomic_bridge_chain_enable(bridge, old_state);
1421	}
1422
1423	drm_atomic_helper_commit_writebacks(dev, old_state);
1424}
1425EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
1426
1427/**
1428 * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
1429 * @dev: DRM device
1430 * @state: atomic state object with old state structures
1431 * @pre_swap: If true, do an interruptible wait, and @state is the new state.
1432 * 	Otherwise @state is the old state.
1433 *
1434 * For implicit sync, driver should fish the exclusive fence out from the
1435 * incoming fb's and stash it in the drm_plane_state.  This is called after
1436 * drm_atomic_helper_swap_state() so it uses the current plane state (and
1437 * just uses the atomic state to find the changed planes)
1438 *
1439 * Note that @pre_swap is needed since the point where we block for fences moves
1440 * around depending upon whether an atomic commit is blocking or
1441 * non-blocking. For non-blocking commit all waiting needs to happen after
1442 * drm_atomic_helper_swap_state() is called, but for blocking commits we want
1443 * to wait **before** we do anything that can't be easily rolled back. That is
1444 * before we call drm_atomic_helper_swap_state().
1445 *
1446 * Returns zero if success or < 0 if dma_fence_wait() fails.
1447 */
1448int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
1449				      struct drm_atomic_state *state,
1450				      bool pre_swap)
1451{
1452	struct drm_plane *plane;
1453	struct drm_plane_state *new_plane_state;
1454	int i, ret;
1455
1456	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
1457		if (!new_plane_state->fence)
1458			continue;
1459
1460		WARN_ON(!new_plane_state->fb);
1461
1462		/*
1463		 * If waiting for fences pre-swap (ie: nonblock), userspace can
1464		 * still interrupt the operation. Instead of blocking until the
1465		 * timer expires, make the wait interruptible.
1466		 */
1467		ret = dma_fence_wait(new_plane_state->fence, pre_swap);
1468		if (ret)
1469			return ret;
1470
1471		dma_fence_put(new_plane_state->fence);
1472		new_plane_state->fence = NULL;
1473	}
1474
1475	return 0;
1476}
1477EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
1478
1479/**
1480 * drm_atomic_helper_wait_for_vblanks - wait for vblank on CRTCs
1481 * @dev: DRM device
1482 * @old_state: atomic state object with old state structures
1483 *
1484 * Helper to, after atomic commit, wait for vblanks on all affected
1485 * CRTCs (ie. before cleaning up old framebuffers using
1486 * drm_atomic_helper_cleanup_planes()). It will only wait on CRTCs where the
1487 * framebuffers have actually changed to optimize for the legacy cursor and
1488 * plane update use-case.
1489 *
1490 * Drivers using the nonblocking commit tracking support initialized by calling
1491 * drm_atomic_helper_setup_commit() should look at
1492 * drm_atomic_helper_wait_for_flip_done() as an alternative.
1493 */
1494void
1495drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
1496		struct drm_atomic_state *old_state)
1497{
1498	struct drm_crtc *crtc;
1499	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1500	int i, ret;
1501	unsigned crtc_mask = 0;
1502
1503	 /*
1504	  * Legacy cursor ioctls are completely unsynced, and userspace
1505	  * relies on that (by doing tons of cursor updates).
1506	  */
1507	if (old_state->legacy_cursor_update)
1508		return;
1509
1510	for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
1511		if (!new_crtc_state->active)
1512			continue;
1513
1514		ret = drm_crtc_vblank_get(crtc);
1515		if (ret != 0)
1516			continue;
1517
1518		crtc_mask |= drm_crtc_mask(crtc);
1519		old_state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc);
1520	}
1521
1522	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
1523		if (!(crtc_mask & drm_crtc_mask(crtc)))
1524			continue;
1525
1526		ret = wait_event_timeout(dev->vblank[i].queue,
1527				old_state->crtcs[i].last_vblank_count !=
1528					drm_crtc_vblank_count(crtc),
1529				msecs_to_jiffies(100));
1530
1531		WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n",
1532		     crtc->base.id, crtc->name);
1533
1534		drm_crtc_vblank_put(crtc);
1535	}
1536}
1537EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
1538
1539/**
1540 * drm_atomic_helper_wait_for_flip_done - wait for all page flips to be done
1541 * @dev: DRM device
1542 * @old_state: atomic state object with old state structures
1543 *
1544 * Helper to, after atomic commit, wait for page flips on all affected
1545 * crtcs (ie. before cleaning up old framebuffers using
1546 * drm_atomic_helper_cleanup_planes()). Compared to
1547 * drm_atomic_helper_wait_for_vblanks() this waits for the completion on all
1548 * CRTCs, assuming that cursors-only updates are signalling their completion
1549 * immediately (or using a different path).
1550 *
1551 * This requires that drivers use the nonblocking commit tracking support
1552 * initialized using drm_atomic_helper_setup_commit().
1553 */
1554void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
1555					  struct drm_atomic_state *old_state)
1556{
1557	struct drm_crtc *crtc;
1558	int i;
1559
1560	for (i = 0; i < dev->mode_config.num_crtc; i++) {
1561		struct drm_crtc_commit *commit = old_state->crtcs[i].commit;
1562		int ret;
1563
1564		crtc = old_state->crtcs[i].ptr;
1565
1566		if (!crtc || !commit)
1567			continue;
1568
1569		ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
1570		if (ret == 0)
1571			DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
1572				  crtc->base.id, crtc->name);
1573	}
1574
1575	if (old_state->fake_commit)
1576		complete_all(&old_state->fake_commit->flip_done);
1577}
1578EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
1579
1580/**
1581 * drm_atomic_helper_commit_tail - commit atomic update to hardware
1582 * @old_state: atomic state object with old state structures
1583 *
1584 * This is the default implementation for the
1585 * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
1586 * that do not support runtime_pm or do not need the CRTC to be
1587 * enabled to perform a commit. Otherwise, see
1588 * drm_atomic_helper_commit_tail_rpm().
1589 *
1590 * Note that the default ordering of how the various stages are called is to
1591 * match the legacy modeset helper library closest.
1592 */
1593void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state)
1594{
1595	struct drm_device *dev = old_state->dev;
1596
1597	drm_atomic_helper_commit_modeset_disables(dev, old_state);
1598
1599	drm_atomic_helper_commit_planes(dev, old_state, 0);
1600
1601	drm_atomic_helper_commit_modeset_enables(dev, old_state);
1602
1603	drm_atomic_helper_fake_vblank(old_state);
1604
1605	drm_atomic_helper_commit_hw_done(old_state);
1606
1607	drm_atomic_helper_wait_for_vblanks(dev, old_state);
1608
1609	drm_atomic_helper_cleanup_planes(dev, old_state);
1610}
1611EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
1612
1613/**
1614 * drm_atomic_helper_commit_tail_rpm - commit atomic update to hardware
1615 * @old_state: new modeset state to be committed
1616 *
1617 * This is an alternative implementation for the
1618 * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
1619 * that support runtime_pm or need the CRTC to be enabled to perform a
1620 * commit. Otherwise, one should use the default implementation
1621 * drm_atomic_helper_commit_tail().
1622 */
1623void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
1624{
1625	struct drm_device *dev = old_state->dev;
1626
1627	drm_atomic_helper_commit_modeset_disables(dev, old_state);
1628
1629	drm_atomic_helper_commit_modeset_enables(dev, old_state);
1630
1631	drm_atomic_helper_commit_planes(dev, old_state,
1632					DRM_PLANE_COMMIT_ACTIVE_ONLY);
1633
1634	drm_atomic_helper_fake_vblank(old_state);
1635
1636	drm_atomic_helper_commit_hw_done(old_state);
1637
1638	drm_atomic_helper_wait_for_vblanks(dev, old_state);
1639
1640	drm_atomic_helper_cleanup_planes(dev, old_state);
1641}
1642EXPORT_SYMBOL(drm_atomic_helper_commit_tail_rpm);
1643
1644static void commit_tail(struct drm_atomic_state *old_state)
1645{
1646	struct drm_device *dev = old_state->dev;
1647	const struct drm_mode_config_helper_funcs *funcs;
1648	struct drm_crtc_state *new_crtc_state;
1649	struct drm_crtc *crtc;
1650	ktime_t start;
1651	s64 commit_time_ms;
1652	unsigned int i, new_self_refresh_mask = 0;
1653
1654	funcs = dev->mode_config.helper_private;
1655
1656	/*
1657	 * We're measuring the _entire_ commit, so the time will vary depending
1658	 * on how many fences and objects are involved. For the purposes of self
1659	 * refresh, this is desirable since it'll give us an idea of how
1660	 * congested things are. This will inform our decision on how often we
1661	 * should enter self refresh after idle.
1662	 *
1663	 * These times will be averaged out in the self refresh helpers to avoid
1664	 * overreacting over one outlier frame
1665	 */
1666	start = ktime_get();
1667
1668	drm_atomic_helper_wait_for_fences(dev, old_state, false);
1669
1670	drm_atomic_helper_wait_for_dependencies(old_state);
1671
1672	/*
1673	 * We cannot safely access new_crtc_state after
1674	 * drm_atomic_helper_commit_hw_done() so figure out which crtc's have
1675	 * self-refresh active beforehand:
1676	 */
1677	for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i)
1678		if (new_crtc_state->self_refresh_active)
1679			new_self_refresh_mask |= BIT(i);
1680
1681	if (funcs && funcs->atomic_commit_tail)
1682		funcs->atomic_commit_tail(old_state);
1683	else
1684		drm_atomic_helper_commit_tail(old_state);
1685
1686	commit_time_ms = ktime_ms_delta(ktime_get(), start);
1687	if (commit_time_ms > 0)
1688		drm_self_refresh_helper_update_avg_times(old_state,
1689						 (unsigned long)commit_time_ms,
1690						 new_self_refresh_mask);
1691
1692	drm_atomic_helper_commit_cleanup_done(old_state);
1693
1694	drm_atomic_state_put(old_state);
1695}
1696
1697static void commit_work(struct work_struct *work)
1698{
1699	struct drm_atomic_state *state = container_of(work,
1700						      struct drm_atomic_state,
1701						      commit_work);
1702	commit_tail(state);
1703}
1704
1705/**
1706 * drm_atomic_helper_async_check - check if state can be commited asynchronously
1707 * @dev: DRM device
1708 * @state: the driver state object
1709 *
1710 * This helper will check if it is possible to commit the state asynchronously.
1711 * Async commits are not supposed to swap the states like normal sync commits
1712 * but just do in-place changes on the current state.
1713 *
1714 * It will return 0 if the commit can happen in an asynchronous fashion or error
1715 * if not. Note that error just mean it can't be commited asynchronously, if it
1716 * fails the commit should be treated like a normal synchronous commit.
1717 */
1718int drm_atomic_helper_async_check(struct drm_device *dev,
1719				   struct drm_atomic_state *state)
1720{
1721	struct drm_crtc *crtc;
1722	struct drm_crtc_state *crtc_state;
1723	struct drm_plane *plane = NULL;
1724	struct drm_plane_state *old_plane_state = NULL;
1725	struct drm_plane_state *new_plane_state = NULL;
1726	const struct drm_plane_helper_funcs *funcs;
1727	int i, n_planes = 0;
1728
1729	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1730		if (drm_atomic_crtc_needs_modeset(crtc_state))
1731			return -EINVAL;
1732	}
1733
1734	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
1735		n_planes++;
1736
1737	/* FIXME: we support only single plane updates for now */
1738	if (n_planes != 1)
1739		return -EINVAL;
1740
1741	if (!new_plane_state->crtc ||
1742	    old_plane_state->crtc != new_plane_state->crtc)
1743		return -EINVAL;
1744
1745	funcs = plane->helper_private;
1746	if (!funcs->atomic_async_update)
1747		return -EINVAL;
1748
1749	if (new_plane_state->fence)
1750		return -EINVAL;
1751
1752	/*
1753	 * Don't do an async update if there is an outstanding commit modifying
1754	 * the plane.  This prevents our async update's changes from getting
1755	 * overridden by a previous synchronous update's state.
1756	 */
1757	if (old_plane_state->commit &&
1758	    !try_wait_for_completion(&old_plane_state->commit->hw_done))
1759		return -EBUSY;
1760
1761	return funcs->atomic_async_check(plane, new_plane_state);
1762}
1763EXPORT_SYMBOL(drm_atomic_helper_async_check);
1764
1765/**
1766 * drm_atomic_helper_async_commit - commit state asynchronously
1767 * @dev: DRM device
1768 * @state: the driver state object
1769 *
1770 * This function commits a state asynchronously, i.e., not vblank
1771 * synchronized. It should be used on a state only when
1772 * drm_atomic_async_check() succeeds. Async commits are not supposed to swap
1773 * the states like normal sync commits, but just do in-place changes on the
1774 * current state.
1775 *
1776 * TODO: Implement full swap instead of doing in-place changes.
1777 */
1778void drm_atomic_helper_async_commit(struct drm_device *dev,
1779				    struct drm_atomic_state *state)
1780{
1781	struct drm_plane *plane;
1782	struct drm_plane_state *plane_state;
1783	const struct drm_plane_helper_funcs *funcs;
1784	int i;
1785
1786	for_each_new_plane_in_state(state, plane, plane_state, i) {
1787		struct drm_framebuffer *new_fb = plane_state->fb;
1788		struct drm_framebuffer *old_fb = plane->state->fb;
1789
1790		funcs = plane->helper_private;
1791		funcs->atomic_async_update(plane, plane_state);
1792
1793		/*
1794		 * ->atomic_async_update() is supposed to update the
1795		 * plane->state in-place, make sure at least common
1796		 * properties have been properly updated.
1797		 */
1798		WARN_ON_ONCE(plane->state->fb != new_fb);
1799		WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
1800		WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
1801		WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
1802		WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
1803
1804		/*
1805		 * Make sure the FBs have been swapped so that cleanups in the
1806		 * new_state performs a cleanup in the old FB.
1807		 */
1808		WARN_ON_ONCE(plane_state->fb != old_fb);
1809	}
1810}
1811EXPORT_SYMBOL(drm_atomic_helper_async_commit);
1812
1813/**
1814 * drm_atomic_helper_commit - commit validated state object
1815 * @dev: DRM device
1816 * @state: the driver state object
1817 * @nonblock: whether nonblocking behavior is requested.
1818 *
1819 * This function commits a with drm_atomic_helper_check() pre-validated state
1820 * object. This can still fail when e.g. the framebuffer reservation fails. This
1821 * function implements nonblocking commits, using
1822 * drm_atomic_helper_setup_commit() and related functions.
1823 *
1824 * Committing the actual hardware state is done through the
1825 * &drm_mode_config_helper_funcs.atomic_commit_tail callback, or its default
1826 * implementation drm_atomic_helper_commit_tail().
1827 *
1828 * RETURNS:
1829 * Zero for success or -errno.
1830 */
1831int drm_atomic_helper_commit(struct drm_device *dev,
1832			     struct drm_atomic_state *state,
1833			     bool nonblock)
1834{
1835	int ret;
1836
1837	if (state->async_update) {
1838		ret = drm_atomic_helper_prepare_planes(dev, state);
1839		if (ret)
1840			return ret;
1841
1842		drm_atomic_helper_async_commit(dev, state);
1843		drm_atomic_helper_cleanup_planes(dev, state);
1844
1845		return 0;
1846	}
1847
1848	ret = drm_atomic_helper_setup_commit(state, nonblock);
1849	if (ret)
1850		return ret;
1851
1852	INIT_WORK(&state->commit_work, commit_work);
1853
1854	ret = drm_atomic_helper_prepare_planes(dev, state);
1855	if (ret)
1856		return ret;
1857
1858	if (!nonblock) {
1859		ret = drm_atomic_helper_wait_for_fences(dev, state, true);
1860		if (ret)
1861			goto err;
1862	}
1863
1864	/*
1865	 * This is the point of no return - everything below never fails except
1866	 * when the hw goes bonghits. Which means we can commit the new state on
1867	 * the software side now.
1868	 */
1869
1870	ret = drm_atomic_helper_swap_state(state, true);
1871	if (ret)
1872		goto err;
1873
1874	/*
1875	 * Everything below can be run asynchronously without the need to grab
1876	 * any modeset locks at all under one condition: It must be guaranteed
1877	 * that the asynchronous work has either been cancelled (if the driver
1878	 * supports it, which at least requires that the framebuffers get
1879	 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
1880	 * before the new state gets committed on the software side with
1881	 * drm_atomic_helper_swap_state().
1882	 *
1883	 * This scheme allows new atomic state updates to be prepared and
1884	 * checked in parallel to the asynchronous completion of the previous
1885	 * update. Which is important since compositors need to figure out the
1886	 * composition of the next frame right after having submitted the
1887	 * current layout.
1888	 *
1889	 * NOTE: Commit work has multiple phases, first hardware commit, then
1890	 * cleanup. We want them to overlap, hence need system_unbound_wq to
1891	 * make sure work items don't artificially stall on each another.
1892	 */
1893
1894	drm_atomic_state_get(state);
1895	if (nonblock)
1896		queue_work(system_unbound_wq, &state->commit_work);
1897	else
1898		commit_tail(state);
1899
1900	return 0;
1901
1902err:
1903	drm_atomic_helper_cleanup_planes(dev, state);
1904	return ret;
1905}
1906EXPORT_SYMBOL(drm_atomic_helper_commit);
1907
1908/**
1909 * DOC: implementing nonblocking commit
1910 *
1911 * Nonblocking atomic commits should use struct &drm_crtc_commit to sequence
1912 * different operations against each another. Locks, especially struct
1913 * &drm_modeset_lock, should not be held in worker threads or any other
1914 * asynchronous context used to commit the hardware state.
1915 *
1916 * drm_atomic_helper_commit() implements the recommended sequence for
1917 * nonblocking commits, using drm_atomic_helper_setup_commit() internally:
1918 *
1919 * 1. Run drm_atomic_helper_prepare_planes(). Since this can fail and we
1920 * need to propagate out of memory/VRAM errors to userspace, it must be called
1921 * synchronously.
1922 *
1923 * 2. Synchronize with any outstanding nonblocking commit worker threads which
1924 * might be affected by the new state update. This is handled by
1925 * drm_atomic_helper_setup_commit().
1926 *
1927 * Asynchronous workers need to have sufficient parallelism to be able to run
1928 * different atomic commits on different CRTCs in parallel. The simplest way to
1929 * achieve this is by running them on the &system_unbound_wq work queue. Note
1930 * that drivers are not required to split up atomic commits and run an
1931 * individual commit in parallel - userspace is supposed to do that if it cares.
1932 * But it might be beneficial to do that for modesets, since those necessarily
1933 * must be done as one global operation, and enabling or disabling a CRTC can
1934 * take a long time. But even that is not required.
1935 *
1936 * IMPORTANT: A &drm_atomic_state update for multiple CRTCs is sequenced
1937 * against all CRTCs therein. Therefore for atomic state updates which only flip
1938 * planes the driver must not get the struct &drm_crtc_state of unrelated CRTCs
1939 * in its atomic check code: This would prevent committing of atomic updates to
1940 * multiple CRTCs in parallel. In general, adding additional state structures
1941 * should be avoided as much as possible, because this reduces parallelism in
1942 * (nonblocking) commits, both due to locking and due to commit sequencing
1943 * requirements.
1944 *
1945 * 3. The software state is updated synchronously with
1946 * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
1947 * locks means concurrent callers never see inconsistent state. Note that commit
1948 * workers do not hold any locks; their access is only coordinated through
1949 * ordering. If workers would access state only through the pointers in the
1950 * free-standing state objects (currently not the case for any driver) then even
1951 * multiple pending commits could be in-flight at the same time.
1952 *
1953 * 4. Schedule a work item to do all subsequent steps, using the split-out
1954 * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
1955 * then cleaning up the framebuffers after the old framebuffer is no longer
1956 * being displayed. The scheduled work should synchronize against other workers
1957 * using the &drm_crtc_commit infrastructure as needed. See
1958 * drm_atomic_helper_setup_commit() for more details.
1959 */
1960
1961static int stall_checks(struct drm_crtc *crtc, bool nonblock)
1962{
1963	struct drm_crtc_commit *commit, *stall_commit = NULL;
1964	bool completed = true;
1965	int i;
1966	long ret = 0;
1967
1968	spin_lock(&crtc->commit_lock);
1969	i = 0;
1970	list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
1971		if (i == 0) {
1972			completed = try_wait_for_completion(&commit->flip_done);
1973			/* Userspace is not allowed to get ahead of the previous
1974			 * commit with nonblocking ones. */
1975			if (!completed && nonblock) {
1976				spin_unlock(&crtc->commit_lock);
1977				return -EBUSY;
1978			}
1979		} else if (i == 1) {
1980			stall_commit = drm_crtc_commit_get(commit);
1981			break;
1982		}
1983
1984		i++;
1985	}
1986	spin_unlock(&crtc->commit_lock);
1987
1988	if (!stall_commit)
1989		return 0;
1990
1991	/* We don't want to let commits get ahead of cleanup work too much,
1992	 * stalling on 2nd previous commit means triple-buffer won't ever stall.
1993	 */
1994	ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done,
1995							10*HZ);
1996	if (ret == 0)
1997		DRM_ERROR("[CRTC:%d:%s] cleanup_done timed out\n",
1998			  crtc->base.id, crtc->name);
1999
2000	drm_crtc_commit_put(stall_commit);
2001
2002	return ret < 0 ? ret : 0;
2003}
2004
2005static void release_crtc_commit(struct completion *completion)
2006{
2007	struct drm_crtc_commit *commit = container_of(completion,
2008						      typeof(*commit),
2009						      flip_done);
2010
2011	drm_crtc_commit_put(commit);
2012}
2013
2014static void init_commit(struct drm_crtc_commit *commit, struct drm_crtc *crtc)
2015{
2016	init_completion(&commit->flip_done);
2017	init_completion(&commit->hw_done);
2018	init_completion(&commit->cleanup_done);
2019	INIT_LIST_HEAD(&commit->commit_entry);
2020	kref_init(&commit->ref);
2021	commit->crtc = crtc;
2022}
2023
2024static struct drm_crtc_commit *
2025crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
2026{
2027	if (crtc) {
2028		struct drm_crtc_state *new_crtc_state;
2029
2030		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
2031
2032		return new_crtc_state->commit;
2033	}
2034
2035	if (!state->fake_commit) {
2036		state->fake_commit = kzalloc(sizeof(*state->fake_commit), GFP_KERNEL);
2037		if (!state->fake_commit)
2038			return NULL;
2039
2040		init_commit(state->fake_commit, NULL);
2041	}
2042
2043	return state->fake_commit;
2044}
2045
2046/**
2047 * drm_atomic_helper_setup_commit - setup possibly nonblocking commit
2048 * @state: new modeset state to be committed
2049 * @nonblock: whether nonblocking behavior is requested.
2050 *
2051 * This function prepares @state to be used by the atomic helper's support for
2052 * nonblocking commits. Drivers using the nonblocking commit infrastructure
2053 * should always call this function from their
2054 * &drm_mode_config_funcs.atomic_commit hook.
2055 *
2056 * To be able to use this support drivers need to use a few more helper
2057 * functions. drm_atomic_helper_wait_for_dependencies() must be called before
2058 * actually committing the hardware state, and for nonblocking commits this call
2059 * must be placed in the async worker. See also drm_atomic_helper_swap_state()
2060 * and its stall parameter, for when a driver's commit hooks look at the
2061 * &drm_crtc.state, &drm_plane.state or &drm_connector.state pointer directly.
2062 *
2063 * Completion of the hardware commit step must be signalled using
2064 * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed
2065 * to read or change any permanent software or hardware modeset state. The only
2066 * exception is state protected by other means than &drm_modeset_lock locks.
2067 * Only the free standing @state with pointers to the old state structures can
2068 * be inspected, e.g. to clean up old buffers using
2069 * drm_atomic_helper_cleanup_planes().
2070 *
2071 * At the very end, before cleaning up @state drivers must call
2072 * drm_atomic_helper_commit_cleanup_done().
2073 *
2074 * This is all implemented by in drm_atomic_helper_commit(), giving drivers a
2075 * complete and easy-to-use default implementation of the atomic_commit() hook.
2076 *
2077 * The tracking of asynchronously executed and still pending commits is done
2078 * using the core structure &drm_crtc_commit.
2079 *
2080 * By default there's no need to clean up resources allocated by this function
2081 * explicitly: drm_atomic_state_default_clear() will take care of that
2082 * automatically.
2083 *
2084 * Returns:
2085 *
2086 * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast,
2087 * -ENOMEM on allocation failures and -EINTR when a signal is pending.
2088 */
2089int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
2090				   bool nonblock)
2091{
2092	struct drm_crtc *crtc;
2093	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2094	struct drm_connector *conn;
2095	struct drm_connector_state *old_conn_state, *new_conn_state;
2096	struct drm_plane *plane;
2097	struct drm_plane_state *old_plane_state, *new_plane_state;
2098	struct drm_crtc_commit *commit;
2099	int i, ret;
2100
2101	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2102		commit = kzalloc(sizeof(*commit), GFP_KERNEL);
2103		if (!commit)
2104			return -ENOMEM;
2105
2106		init_commit(commit, crtc);
2107
2108		new_crtc_state->commit = commit;
2109
2110		ret = stall_checks(crtc, nonblock);
2111		if (ret)
2112			return ret;
2113
2114		/* Drivers only send out events when at least either current or
2115		 * new CRTC state is active. Complete right away if everything
2116		 * stays off. */
2117		if (!old_crtc_state->active && !new_crtc_state->active) {
2118			complete_all(&commit->flip_done);
2119			continue;
2120		}
2121
2122		/* Legacy cursor updates are fully unsynced. */
2123		if (state->legacy_cursor_update) {
2124			complete_all(&commit->flip_done);
2125			continue;
2126		}
2127
2128		if (!new_crtc_state->event) {
2129			commit->event = kzalloc(sizeof(*commit->event),
2130						GFP_KERNEL);
2131			if (!commit->event)
2132				return -ENOMEM;
2133
2134			new_crtc_state->event = commit->event;
2135		}
2136
2137		new_crtc_state->event->base.completion = &commit->flip_done;
2138		new_crtc_state->event->base.completion_release = release_crtc_commit;
2139		drm_crtc_commit_get(commit);
2140
2141		commit->abort_completion = true;
2142
2143		state->crtcs[i].commit = commit;
2144		drm_crtc_commit_get(commit);
2145	}
2146
2147	for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
2148		/* Userspace is not allowed to get ahead of the previous
2149		 * commit with nonblocking ones. */
2150		if (nonblock && old_conn_state->commit &&
2151		    !try_wait_for_completion(&old_conn_state->commit->flip_done))
2152			return -EBUSY;
2153
2154		/* Always track connectors explicitly for e.g. link retraining. */
2155		commit = crtc_or_fake_commit(state, new_conn_state->crtc ?: old_conn_state->crtc);
2156		if (!commit)
2157			return -ENOMEM;
2158
2159		new_conn_state->commit = drm_crtc_commit_get(commit);
2160	}
2161
2162	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
2163		/* Userspace is not allowed to get ahead of the previous
2164		 * commit with nonblocking ones. */
2165		if (nonblock && old_plane_state->commit &&
2166		    !try_wait_for_completion(&old_plane_state->commit->flip_done))
2167			return -EBUSY;
2168
2169		/* Always track planes explicitly for async pageflip support. */
2170		commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc);
2171		if (!commit)
2172			return -ENOMEM;
2173
2174		new_plane_state->commit = drm_crtc_commit_get(commit);
2175	}
2176
2177	return 0;
2178}
2179EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
2180
2181/**
2182 * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits
2183 * @old_state: atomic state object with old state structures
2184 *
2185 * This function waits for all preceeding commits that touch the same CRTC as
2186 * @old_state to both be committed to the hardware (as signalled by
2187 * drm_atomic_helper_commit_hw_done()) and executed by the hardware (as signalled
2188 * by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
2189 *
2190 * This is part of the atomic helper support for nonblocking commits, see
2191 * drm_atomic_helper_setup_commit() for an overview.
2192 */
2193void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
2194{
2195	struct drm_crtc *crtc;
2196	struct drm_crtc_state *old_crtc_state;
2197	struct drm_plane *plane;
2198	struct drm_plane_state *old_plane_state;
2199	struct drm_connector *conn;
2200	struct drm_connector_state *old_conn_state;
2201	struct drm_crtc_commit *commit;
2202	int i;
2203	long ret;
2204
2205	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
2206		commit = old_crtc_state->commit;
2207
2208		if (!commit)
2209			continue;
2210
2211		ret = wait_for_completion_timeout(&commit->hw_done,
2212						  10*HZ);
2213		if (ret == 0)
2214			DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n",
2215				  crtc->base.id, crtc->name);
2216
2217		/* Currently no support for overwriting flips, hence
2218		 * stall for previous one to execute completely. */
2219		ret = wait_for_completion_timeout(&commit->flip_done,
2220						  10*HZ);
2221		if (ret == 0)
2222			DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
2223				  crtc->base.id, crtc->name);
2224	}
2225
2226	for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
2227		commit = old_conn_state->commit;
2228
2229		if (!commit)
2230			continue;
2231
2232		ret = wait_for_completion_timeout(&commit->hw_done,
2233						  10*HZ);
2234		if (ret == 0)
2235			DRM_ERROR("[CONNECTOR:%d:%s] hw_done timed out\n",
2236				  conn->base.id, conn->name);
2237
2238		/* Currently no support for overwriting flips, hence
2239		 * stall for previous one to execute completely. */
2240		ret = wait_for_completion_timeout(&commit->flip_done,
2241						  10*HZ);
2242		if (ret == 0)
2243			DRM_ERROR("[CONNECTOR:%d:%s] flip_done timed out\n",
2244				  conn->base.id, conn->name);
2245	}
2246
2247	for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
2248		commit = old_plane_state->commit;
2249
2250		if (!commit)
2251			continue;
2252
2253		ret = wait_for_completion_timeout(&commit->hw_done,
2254						  10*HZ);
2255		if (ret == 0)
2256			DRM_ERROR("[PLANE:%d:%s] hw_done timed out\n",
2257				  plane->base.id, plane->name);
2258
2259		/* Currently no support for overwriting flips, hence
2260		 * stall for previous one to execute completely. */
2261		ret = wait_for_completion_timeout(&commit->flip_done,
2262						  10*HZ);
2263		if (ret == 0)
2264			DRM_ERROR("[PLANE:%d:%s] flip_done timed out\n",
2265				  plane->base.id, plane->name);
2266	}
2267}
2268EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
2269
2270/**
2271 * drm_atomic_helper_fake_vblank - fake VBLANK events if needed
2272 * @old_state: atomic state object with old state structures
2273 *
2274 * This function walks all CRTCs and fakes VBLANK events on those with
2275 * &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL.
2276 * The primary use of this function is writeback connectors working in oneshot
2277 * mode and faking VBLANK events. In this case they only fake the VBLANK event
2278 * when a job is queued, and any change to the pipeline that does not touch the
2279 * connector is leading to timeouts when calling
2280 * drm_atomic_helper_wait_for_vblanks() or
2281 * drm_atomic_helper_wait_for_flip_done(). In addition to writeback
2282 * connectors, this function can also fake VBLANK events for CRTCs without
2283 * VBLANK interrupt.
2284 *
2285 * This is part of the atomic helper support for nonblocking commits, see
2286 * drm_atomic_helper_setup_commit() for an overview.
2287 */
2288void drm_atomic_helper_fake_vblank(struct drm_atomic_state *old_state)
2289{
2290	struct drm_crtc_state *new_crtc_state;
2291	struct drm_crtc *crtc;
2292	int i;
2293
2294	for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
2295		unsigned long flags;
2296
2297		if (!new_crtc_state->no_vblank)
2298			continue;
2299
2300		spin_lock_irqsave(&old_state->dev->event_lock, flags);
2301		if (new_crtc_state->event) {
2302			drm_crtc_send_vblank_event(crtc,
2303						   new_crtc_state->event);
2304			new_crtc_state->event = NULL;
2305		}
2306		spin_unlock_irqrestore(&old_state->dev->event_lock, flags);
2307	}
2308}
2309EXPORT_SYMBOL(drm_atomic_helper_fake_vblank);
2310
2311/**
2312 * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
2313 * @old_state: atomic state object with old state structures
2314 *
2315 * This function is used to signal completion of the hardware commit step. After
2316 * this step the driver is not allowed to read or change any permanent software
2317 * or hardware modeset state. The only exception is state protected by other
2318 * means than &drm_modeset_lock locks.
2319 *
2320 * Drivers should try to postpone any expensive or delayed cleanup work after
2321 * this function is called.
2322 *
2323 * This is part of the atomic helper support for nonblocking commits, see
2324 * drm_atomic_helper_setup_commit() for an overview.
2325 */
2326void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state)
2327{
2328	struct drm_crtc *crtc;
2329	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2330	struct drm_crtc_commit *commit;
2331	int i;
2332
2333	for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
2334		commit = new_crtc_state->commit;
2335		if (!commit)
2336			continue;
2337
2338		/*
2339		 * copy new_crtc_state->commit to old_crtc_state->commit,
2340		 * it's unsafe to touch new_crtc_state after hw_done,
2341		 * but we still need to do so in cleanup_done().
2342		 */
2343		if (old_crtc_state->commit)
2344			drm_crtc_commit_put(old_crtc_state->commit);
2345
2346		old_crtc_state->commit = drm_crtc_commit_get(commit);
2347
2348		/* backend must have consumed any event by now */
2349		WARN_ON(new_crtc_state->event);
2350		complete_all(&commit->hw_done);
2351	}
2352
2353	if (old_state->fake_commit) {
2354		complete_all(&old_state->fake_commit->hw_done);
2355		complete_all(&old_state->fake_commit->flip_done);
2356	}
2357}
2358EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
2359
2360/**
2361 * drm_atomic_helper_commit_cleanup_done - signal completion of commit
2362 * @old_state: atomic state object with old state structures
2363 *
2364 * This signals completion of the atomic update @old_state, including any
2365 * cleanup work. If used, it must be called right before calling
2366 * drm_atomic_state_put().
2367 *
2368 * This is part of the atomic helper support for nonblocking commits, see
2369 * drm_atomic_helper_setup_commit() for an overview.
2370 */
2371void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
2372{
2373	struct drm_crtc *crtc;
2374	struct drm_crtc_state *old_crtc_state;
2375	struct drm_crtc_commit *commit;
2376	int i;
2377
2378	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
2379		commit = old_crtc_state->commit;
2380		if (WARN_ON(!commit))
2381			continue;
2382
2383		complete_all(&commit->cleanup_done);
2384		WARN_ON(!try_wait_for_completion(&commit->hw_done));
2385
2386		spin_lock(&crtc->commit_lock);
2387		list_del(&commit->commit_entry);
2388		spin_unlock(&crtc->commit_lock);
2389	}
2390
2391	if (old_state->fake_commit) {
2392		complete_all(&old_state->fake_commit->cleanup_done);
2393		WARN_ON(!try_wait_for_completion(&old_state->fake_commit->hw_done));
2394	}
2395}
2396EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
2397
2398/**
2399 * drm_atomic_helper_prepare_planes - prepare plane resources before commit
2400 * @dev: DRM device
2401 * @state: atomic state object with new state structures
2402 *
2403 * This function prepares plane state, specifically framebuffers, for the new
2404 * configuration, by calling &drm_plane_helper_funcs.prepare_fb. If any failure
2405 * is encountered this function will call &drm_plane_helper_funcs.cleanup_fb on
2406 * any already successfully prepared framebuffer.
2407 *
2408 * Returns:
2409 * 0 on success, negative error code on failure.
2410 */
2411int drm_atomic_helper_prepare_planes(struct drm_device *dev,
2412				     struct drm_atomic_state *state)
2413{
2414	struct drm_connector *connector;
2415	struct drm_connector_state *new_conn_state;
2416	struct drm_plane *plane;
2417	struct drm_plane_state *new_plane_state;
2418	int ret, i, j;
2419
2420	for_each_new_connector_in_state(state, connector, new_conn_state, i) {
2421		if (!new_conn_state->writeback_job)
2422			continue;
2423
2424		ret = drm_writeback_prepare_job(new_conn_state->writeback_job);
2425		if (ret < 0)
2426			return ret;
2427	}
2428
2429	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2430		const struct drm_plane_helper_funcs *funcs;
2431
2432		funcs = plane->helper_private;
2433
2434		if (funcs->prepare_fb) {
2435			ret = funcs->prepare_fb(plane, new_plane_state);
2436			if (ret)
2437				goto fail;
2438		}
2439	}
2440
2441	return 0;
2442
2443fail:
2444	for_each_new_plane_in_state(state, plane, new_plane_state, j) {
2445		const struct drm_plane_helper_funcs *funcs;
2446
2447		if (j >= i)
2448			continue;
2449
2450		funcs = plane->helper_private;
2451
2452		if (funcs->cleanup_fb)
2453			funcs->cleanup_fb(plane, new_plane_state);
2454	}
2455
2456	return ret;
2457}
2458EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
2459
2460static bool plane_crtc_active(const struct drm_plane_state *state)
2461{
2462	return state->crtc && state->crtc->state->active;
2463}
2464
2465/**
2466 * drm_atomic_helper_commit_planes - commit plane state
2467 * @dev: DRM device
2468 * @old_state: atomic state object with old state structures
2469 * @flags: flags for committing plane state
2470 *
2471 * This function commits the new plane state using the plane and atomic helper
2472 * functions for planes and CRTCs. It assumes that the atomic state has already
2473 * been pushed into the relevant object state pointers, since this step can no
2474 * longer fail.
2475 *
2476 * It still requires the global state object @old_state to know which planes and
2477 * crtcs need to be updated though.
2478 *
2479 * Note that this function does all plane updates across all CRTCs in one step.
2480 * If the hardware can't support this approach look at
2481 * drm_atomic_helper_commit_planes_on_crtc() instead.
2482 *
2483 * Plane parameters can be updated by applications while the associated CRTC is
2484 * disabled. The DRM/KMS core will store the parameters in the plane state,
2485 * which will be available to the driver when the CRTC is turned on. As a result
2486 * most drivers don't need to be immediately notified of plane updates for a
2487 * disabled CRTC.
2488 *
2489 * Unless otherwise needed, drivers are advised to set the ACTIVE_ONLY flag in
2490 * @flags in order not to receive plane update notifications related to a
2491 * disabled CRTC. This avoids the need to manually ignore plane updates in
2492 * driver code when the driver and/or hardware can't or just don't need to deal
2493 * with updates on disabled CRTCs, for example when supporting runtime PM.
2494 *
2495 * Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant
2496 * display controllers require to disable a CRTC's planes when the CRTC is
2497 * disabled. This function would skip the &drm_plane_helper_funcs.atomic_disable
2498 * call for a plane if the CRTC of the old plane state needs a modesetting
2499 * operation. Of course, the drivers need to disable the planes in their CRTC
2500 * disable callbacks since no one else would do that.
2501 *
2502 * The drm_atomic_helper_commit() default implementation doesn't set the
2503 * ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers.
2504 * This should not be copied blindly by drivers.
2505 */
2506void drm_atomic_helper_commit_planes(struct drm_device *dev,
2507				     struct drm_atomic_state *old_state,
2508				     uint32_t flags)
2509{
2510	struct drm_crtc *crtc;
2511	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2512	struct drm_plane *plane;
2513	struct drm_plane_state *old_plane_state, *new_plane_state;
2514	int i;
2515	bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY;
2516	bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET;
2517
2518	for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
2519		const struct drm_crtc_helper_funcs *funcs;
2520
2521		funcs = crtc->helper_private;
2522
2523		if (!funcs || !funcs->atomic_begin)
2524			continue;
2525
2526		if (active_only && !new_crtc_state->active)
2527			continue;
2528
2529		funcs->atomic_begin(crtc, old_crtc_state);
2530	}
2531
2532	for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
2533		const struct drm_plane_helper_funcs *funcs;
2534		bool disabling;
2535
2536		funcs = plane->helper_private;
2537
2538		if (!funcs)
2539			continue;
2540
2541		disabling = drm_atomic_plane_disabling(old_plane_state,
2542						       new_plane_state);
2543
2544		if (active_only) {
2545			/*
2546			 * Skip planes related to inactive CRTCs. If the plane
2547			 * is enabled use the state of the current CRTC. If the
2548			 * plane is being disabled use the state of the old
2549			 * CRTC to avoid skipping planes being disabled on an
2550			 * active CRTC.
2551			 */
2552			if (!disabling && !plane_crtc_active(new_plane_state))
2553				continue;
2554			if (disabling && !plane_crtc_active(old_plane_state))
2555				continue;
2556		}
2557
2558		/*
2559		 * Special-case disabling the plane if drivers support it.
2560		 */
2561		if (disabling && funcs->atomic_disable) {
2562			struct drm_crtc_state *crtc_state;
2563
2564			crtc_state = old_plane_state->crtc->state;
2565
2566			if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2567			    no_disable)
2568				continue;
2569
2570			funcs->atomic_disable(plane, old_plane_state);
2571		} else if (new_plane_state->crtc || disabling) {
2572			funcs->atomic_update(plane, old_plane_state);
2573		}
2574	}
2575
2576	for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
2577		const struct drm_crtc_helper_funcs *funcs;
2578
2579		funcs = crtc->helper_private;
2580
2581		if (!funcs || !funcs->atomic_flush)
2582			continue;
2583
2584		if (active_only && !new_crtc_state->active)
2585			continue;
2586
2587		funcs->atomic_flush(crtc, old_crtc_state);
2588	}
2589}
2590EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
2591
2592/**
2593 * drm_atomic_helper_commit_planes_on_crtc - commit plane state for a CRTC
2594 * @old_crtc_state: atomic state object with the old CRTC state
2595 *
2596 * This function commits the new plane state using the plane and atomic helper
2597 * functions for planes on the specific CRTC. It assumes that the atomic state
2598 * has already been pushed into the relevant object state pointers, since this
2599 * step can no longer fail.
2600 *
2601 * This function is useful when plane updates should be done CRTC-by-CRTC
2602 * instead of one global step like drm_atomic_helper_commit_planes() does.
2603 *
2604 * This function can only be savely used when planes are not allowed to move
2605 * between different CRTCs because this function doesn't handle inter-CRTC
2606 * depencies. Callers need to ensure that either no such depencies exist,
2607 * resolve them through ordering of commit calls or through some other means.
2608 */
2609void
2610drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
2611{
2612	const struct drm_crtc_helper_funcs *crtc_funcs;
2613	struct drm_crtc *crtc = old_crtc_state->crtc;
2614	struct drm_atomic_state *old_state = old_crtc_state->state;
2615	struct drm_crtc_state *new_crtc_state =
2616		drm_atomic_get_new_crtc_state(old_state, crtc);
2617	struct drm_plane *plane;
2618	unsigned plane_mask;
2619
2620	plane_mask = old_crtc_state->plane_mask;
2621	plane_mask |= new_crtc_state->plane_mask;
2622
2623	crtc_funcs = crtc->helper_private;
2624	if (crtc_funcs && crtc_funcs->atomic_begin)
2625		crtc_funcs->atomic_begin(crtc, old_crtc_state);
2626
2627	drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
2628		struct drm_plane_state *old_plane_state =
2629			drm_atomic_get_old_plane_state(old_state, plane);
2630		struct drm_plane_state *new_plane_state =
2631			drm_atomic_get_new_plane_state(old_state, plane);
2632		const struct drm_plane_helper_funcs *plane_funcs;
2633
2634		plane_funcs = plane->helper_private;
2635
2636		if (!old_plane_state || !plane_funcs)
2637			continue;
2638
2639		WARN_ON(new_plane_state->crtc &&
2640			new_plane_state->crtc != crtc);
2641
2642		if (drm_atomic_plane_disabling(old_plane_state, new_plane_state) &&
2643		    plane_funcs->atomic_disable)
2644			plane_funcs->atomic_disable(plane, old_plane_state);
2645		else if (new_plane_state->crtc ||
2646			 drm_atomic_plane_disabling(old_plane_state, new_plane_state))
2647			plane_funcs->atomic_update(plane, old_plane_state);
2648	}
2649
2650	if (crtc_funcs && crtc_funcs->atomic_flush)
2651		crtc_funcs->atomic_flush(crtc, old_crtc_state);
2652}
2653EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
2654
2655/**
2656 * drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes
2657 * @old_crtc_state: atomic state object with the old CRTC state
2658 * @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks
2659 *
2660 * Disables all planes associated with the given CRTC. This can be
2661 * used for instance in the CRTC helper atomic_disable callback to disable
2662 * all planes.
2663 *
2664 * If the atomic-parameter is set the function calls the CRTC's
2665 * atomic_begin hook before and atomic_flush hook after disabling the
2666 * planes.
2667 *
2668 * It is a bug to call this function without having implemented the
2669 * &drm_plane_helper_funcs.atomic_disable plane hook.
2670 */
2671void
2672drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state,
2673					 bool atomic)
2674{
2675	struct drm_crtc *crtc = old_crtc_state->crtc;
2676	const struct drm_crtc_helper_funcs *crtc_funcs =
2677		crtc->helper_private;
2678	struct drm_plane *plane;
2679
2680	if (atomic && crtc_funcs && crtc_funcs->atomic_begin)
2681		crtc_funcs->atomic_begin(crtc, NULL);
2682
2683	drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
2684		const struct drm_plane_helper_funcs *plane_funcs =
2685			plane->helper_private;
2686
2687		if (!plane_funcs)
2688			continue;
2689
2690		WARN_ON(!plane_funcs->atomic_disable);
2691		if (plane_funcs->atomic_disable)
2692			plane_funcs->atomic_disable(plane, NULL);
2693	}
2694
2695	if (atomic && crtc_funcs && crtc_funcs->atomic_flush)
2696		crtc_funcs->atomic_flush(crtc, NULL);
2697}
2698EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
2699
2700/**
2701 * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
2702 * @dev: DRM device
2703 * @old_state: atomic state object with old state structures
2704 *
2705 * This function cleans up plane state, specifically framebuffers, from the old
2706 * configuration. Hence the old configuration must be perserved in @old_state to
2707 * be able to call this function.
2708 *
2709 * This function must also be called on the new state when the atomic update
2710 * fails at any point after calling drm_atomic_helper_prepare_planes().
2711 */
2712void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
2713				      struct drm_atomic_state *old_state)
2714{
2715	struct drm_plane *plane;
2716	struct drm_plane_state *old_plane_state, *new_plane_state;
2717	int i;
2718
2719	for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
2720		const struct drm_plane_helper_funcs *funcs;
2721		struct drm_plane_state *plane_state;
2722
2723		/*
2724		 * This might be called before swapping when commit is aborted,
2725		 * in which case we have to cleanup the new state.
2726		 */
2727		if (old_plane_state == plane->state)
2728			plane_state = new_plane_state;
2729		else
2730			plane_state = old_plane_state;
2731
2732		funcs = plane->helper_private;
2733
2734		if (funcs->cleanup_fb)
2735			funcs->cleanup_fb(plane, plane_state);
2736	}
2737}
2738EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
2739
2740/**
2741 * drm_atomic_helper_swap_state - store atomic state into current sw state
2742 * @state: atomic state
2743 * @stall: stall for preceeding commits
2744 *
2745 * This function stores the atomic state into the current state pointers in all
2746 * driver objects. It should be called after all failing steps have been done
2747 * and succeeded, but before the actual hardware state is committed.
2748 *
2749 * For cleanup and error recovery the current state for all changed objects will
2750 * be swapped into @state.
2751 *
2752 * With that sequence it fits perfectly into the plane prepare/cleanup sequence:
2753 *
2754 * 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state.
2755 *
2756 * 2. Do any other steps that might fail.
2757 *
2758 * 3. Put the staged state into the current state pointers with this function.
2759 *
2760 * 4. Actually commit the hardware state.
2761 *
2762 * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3
2763 * contains the old state. Also do any other cleanup required with that state.
2764 *
2765 * @stall must be set when nonblocking commits for this driver directly access
2766 * the &drm_plane.state, &drm_crtc.state or &drm_connector.state pointer. With
2767 * the current atomic helpers this is almost always the case, since the helpers
2768 * don't pass the right state structures to the callbacks.
2769 *
2770 * Returns:
2771 *
2772 * Returns 0 on success. Can return -ERESTARTSYS when @stall is true and the
2773 * waiting for the previous commits has been interrupted.
2774 */
2775int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
2776				  bool stall)
2777{
2778	int i, ret;
2779	struct drm_connector *connector;
2780	struct drm_connector_state *old_conn_state, *new_conn_state;
2781	struct drm_crtc *crtc;
2782	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2783	struct drm_plane *plane;
2784	struct drm_plane_state *old_plane_state, *new_plane_state;
2785	struct drm_crtc_commit *commit;
2786	struct drm_private_obj *obj;
2787	struct drm_private_state *old_obj_state, *new_obj_state;
2788
2789	if (stall) {
2790		/*
2791		 * We have to stall for hw_done here before
2792		 * drm_atomic_helper_wait_for_dependencies() because flip
2793		 * depth > 1 is not yet supported by all drivers. As long as
2794		 * obj->state is directly dereferenced anywhere in the drivers
2795		 * atomic_commit_tail function, then it's unsafe to swap state
2796		 * before drm_atomic_helper_commit_hw_done() is called.
2797		 */
2798
2799		for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
2800			commit = old_crtc_state->commit;
2801
2802			if (!commit)
2803				continue;
2804
2805			ret = wait_for_completion_interruptible(&commit->hw_done);
2806			if (ret)
2807				return ret;
2808		}
2809
2810		for_each_old_connector_in_state(state, connector, old_conn_state, i) {
2811			commit = old_conn_state->commit;
2812
2813			if (!commit)
2814				continue;
2815
2816			ret = wait_for_completion_interruptible(&commit->hw_done);
2817			if (ret)
2818				return ret;
2819		}
2820
2821		for_each_old_plane_in_state(state, plane, old_plane_state, i) {
2822			commit = old_plane_state->commit;
2823
2824			if (!commit)
2825				continue;
2826
2827			ret = wait_for_completion_interruptible(&commit->hw_done);
2828			if (ret)
2829				return ret;
2830		}
2831	}
2832
2833	for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
2834		WARN_ON(connector->state != old_conn_state);
2835
2836		old_conn_state->state = state;
2837		new_conn_state->state = NULL;
2838
2839		state->connectors[i].state = old_conn_state;
2840		connector->state = new_conn_state;
2841	}
2842
2843	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2844		WARN_ON(crtc->state != old_crtc_state);
2845
2846		old_crtc_state->state = state;
2847		new_crtc_state->state = NULL;
2848
2849		state->crtcs[i].state = old_crtc_state;
2850		crtc->state = new_crtc_state;
2851
2852		if (new_crtc_state->commit) {
2853			spin_lock(&crtc->commit_lock);
2854			list_add(&new_crtc_state->commit->commit_entry,
2855				 &crtc->commit_list);
2856			spin_unlock(&crtc->commit_lock);
2857
2858			new_crtc_state->commit->event = NULL;
2859		}
2860	}
2861
2862	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
2863		WARN_ON(plane->state != old_plane_state);
2864
2865		old_plane_state->state = state;
2866		new_plane_state->state = NULL;
2867
2868		state->planes[i].state = old_plane_state;
2869		plane->state = new_plane_state;
2870	}
2871
2872	for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i) {
2873		WARN_ON(obj->state != old_obj_state);
2874
2875		old_obj_state->state = state;
2876		new_obj_state->state = NULL;
2877
2878		state->private_objs[i].state = old_obj_state;
2879		obj->state = new_obj_state;
2880	}
2881
2882	return 0;
2883}
2884EXPORT_SYMBOL(drm_atomic_helper_swap_state);
2885
2886/**
2887 * drm_atomic_helper_update_plane - Helper for primary plane update using atomic
2888 * @plane: plane object to update
2889 * @crtc: owning CRTC of owning plane
2890 * @fb: framebuffer to flip onto plane
2891 * @crtc_x: x offset of primary plane on @crtc
2892 * @crtc_y: y offset of primary plane on @crtc
2893 * @crtc_w: width of primary plane rectangle on @crtc
2894 * @crtc_h: height of primary plane rectangle on @crtc
2895 * @src_x: x offset of @fb for panning
2896 * @src_y: y offset of @fb for panning
2897 * @src_w: width of source rectangle in @fb
2898 * @src_h: height of source rectangle in @fb
2899 * @ctx: lock acquire context
2900 *
2901 * Provides a default plane update handler using the atomic driver interface.
2902 *
2903 * RETURNS:
2904 * Zero on success, error code on failure
2905 */
2906int drm_atomic_helper_update_plane(struct drm_plane *plane,
2907				   struct drm_crtc *crtc,
2908				   struct drm_framebuffer *fb,
2909				   int crtc_x, int crtc_y,
2910				   unsigned int crtc_w, unsigned int crtc_h,
2911				   uint32_t src_x, uint32_t src_y,
2912				   uint32_t src_w, uint32_t src_h,
2913				   struct drm_modeset_acquire_ctx *ctx)
2914{
2915	struct drm_atomic_state *state;
2916	struct drm_plane_state *plane_state;
2917	int ret = 0;
2918
2919	state = drm_atomic_state_alloc(plane->dev);
2920	if (!state)
2921		return -ENOMEM;
2922
2923	state->acquire_ctx = ctx;
2924	plane_state = drm_atomic_get_plane_state(state, plane);
2925	if (IS_ERR(plane_state)) {
2926		ret = PTR_ERR(plane_state);
2927		goto fail;
2928	}
2929
2930	ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
2931	if (ret != 0)
2932		goto fail;
2933	drm_atomic_set_fb_for_plane(plane_state, fb);
2934	plane_state->crtc_x = crtc_x;
2935	plane_state->crtc_y = crtc_y;
2936	plane_state->crtc_w = crtc_w;
2937	plane_state->crtc_h = crtc_h;
2938	plane_state->src_x = src_x;
2939	plane_state->src_y = src_y;
2940	plane_state->src_w = src_w;
2941	plane_state->src_h = src_h;
2942
2943	if (plane == crtc->cursor)
2944		state->legacy_cursor_update = true;
2945
2946	ret = drm_atomic_commit(state);
2947fail:
2948	drm_atomic_state_put(state);
2949	return ret;
2950}
2951EXPORT_SYMBOL(drm_atomic_helper_update_plane);
2952
2953/**
2954 * drm_atomic_helper_disable_plane - Helper for primary plane disable using * atomic
2955 * @plane: plane to disable
2956 * @ctx: lock acquire context
2957 *
2958 * Provides a default plane disable handler using the atomic driver interface.
2959 *
2960 * RETURNS:
2961 * Zero on success, error code on failure
2962 */
2963int drm_atomic_helper_disable_plane(struct drm_plane *plane,
2964				    struct drm_modeset_acquire_ctx *ctx)
2965{
2966	struct drm_atomic_state *state;
2967	struct drm_plane_state *plane_state;
2968	int ret = 0;
2969
2970	state = drm_atomic_state_alloc(plane->dev);
2971	if (!state)
2972		return -ENOMEM;
2973
2974	state->acquire_ctx = ctx;
2975	plane_state = drm_atomic_get_plane_state(state, plane);
2976	if (IS_ERR(plane_state)) {
2977		ret = PTR_ERR(plane_state);
2978		goto fail;
2979	}
2980
2981	if (plane_state->crtc && plane_state->crtc->cursor == plane)
2982		plane_state->state->legacy_cursor_update = true;
2983
2984	ret = __drm_atomic_helper_disable_plane(plane, plane_state);
2985	if (ret != 0)
2986		goto fail;
2987
2988	ret = drm_atomic_commit(state);
2989fail:
2990	drm_atomic_state_put(state);
2991	return ret;
2992}
2993EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
2994
2995/**
2996 * drm_atomic_helper_set_config - set a new config from userspace
2997 * @set: mode set configuration
2998 * @ctx: lock acquisition context
2999 *
3000 * Provides a default CRTC set_config handler using the atomic driver interface.
3001 *
3002 * NOTE: For backwards compatibility with old userspace this automatically
3003 * resets the "link-status" property to GOOD, to force any link
3004 * re-training. The SETCRTC ioctl does not define whether an update does
3005 * need a full modeset or just a plane update, hence we're allowed to do
3006 * that. See also drm_connector_set_link_status_property().
3007 *
3008 * Returns:
3009 * Returns 0 on success, negative errno numbers on failure.
3010 */
3011int drm_atomic_helper_set_config(struct drm_mode_set *set,
3012				 struct drm_modeset_acquire_ctx *ctx)
3013{
3014	struct drm_atomic_state *state;
3015	struct drm_crtc *crtc = set->crtc;
3016	int ret = 0;
3017
3018	state = drm_atomic_state_alloc(crtc->dev);
3019	if (!state)
3020		return -ENOMEM;
3021
3022	state->acquire_ctx = ctx;
3023	ret = __drm_atomic_helper_set_config(set, state);
3024	if (ret != 0)
3025		goto fail;
3026
3027	ret = handle_conflicting_encoders(state, true);
3028	if (ret)
3029		goto fail;
3030
3031	ret = drm_atomic_commit(state);
3032
3033fail:
3034	drm_atomic_state_put(state);
3035	return ret;
3036}
3037EXPORT_SYMBOL(drm_atomic_helper_set_config);
3038
3039/**
3040 * drm_atomic_helper_disable_all - disable all currently active outputs
3041 * @dev: DRM device
3042 * @ctx: lock acquisition context
3043 *
3044 * Loops through all connectors, finding those that aren't turned off and then
3045 * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
3046 * that they are connected to.
3047 *
3048 * This is used for example in suspend/resume to disable all currently active
3049 * functions when suspending. If you just want to shut down everything at e.g.
3050 * driver unload, look at drm_atomic_helper_shutdown().
3051 *
3052 * Note that if callers haven't already acquired all modeset locks this might
3053 * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3054 *
3055 * Returns:
3056 * 0 on success or a negative error code on failure.
3057 *
3058 * See also:
3059 * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
3060 * drm_atomic_helper_shutdown().
3061 */
3062int drm_atomic_helper_disable_all(struct drm_device *dev,
3063				  struct drm_modeset_acquire_ctx *ctx)
3064{
3065	struct drm_atomic_state *state;
3066	struct drm_connector_state *conn_state;
3067	struct drm_connector *conn;
3068	struct drm_plane_state *plane_state;
3069	struct drm_plane *plane;
3070	struct drm_crtc_state *crtc_state;
3071	struct drm_crtc *crtc;
3072	int ret, i;
3073
3074	state = drm_atomic_state_alloc(dev);
3075	if (!state)
3076		return -ENOMEM;
3077
3078	state->acquire_ctx = ctx;
3079
3080	drm_for_each_crtc(crtc, dev) {
3081		crtc_state = drm_atomic_get_crtc_state(state, crtc);
3082		if (IS_ERR(crtc_state)) {
3083			ret = PTR_ERR(crtc_state);
3084			goto free;
3085		}
3086
3087		crtc_state->active = false;
3088
3089		ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL);
3090		if (ret < 0)
3091			goto free;
3092
3093		ret = drm_atomic_add_affected_planes(state, crtc);
3094		if (ret < 0)
3095			goto free;
3096
3097		ret = drm_atomic_add_affected_connectors(state, crtc);
3098		if (ret < 0)
3099			goto free;
3100	}
3101
3102	for_each_new_connector_in_state(state, conn, conn_state, i) {
3103		ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
3104		if (ret < 0)
3105			goto free;
3106	}
3107
3108	for_each_new_plane_in_state(state, plane, plane_state, i) {
3109		ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
3110		if (ret < 0)
3111			goto free;
3112
3113		drm_atomic_set_fb_for_plane(plane_state, NULL);
3114	}
3115
3116	ret = drm_atomic_commit(state);
3117free:
3118	drm_atomic_state_put(state);
3119	return ret;
3120}
3121EXPORT_SYMBOL(drm_atomic_helper_disable_all);
3122
3123/**
3124 * drm_atomic_helper_shutdown - shutdown all CRTC
3125 * @dev: DRM device
3126 *
3127 * This shuts down all CRTC, which is useful for driver unloading. Shutdown on
3128 * suspend should instead be handled with drm_atomic_helper_suspend(), since
3129 * that also takes a snapshot of the modeset state to be restored on resume.
3130 *
3131 * This is just a convenience wrapper around drm_atomic_helper_disable_all(),
3132 * and it is the atomic version of drm_crtc_force_disable_all().
3133 */
3134void drm_atomic_helper_shutdown(struct drm_device *dev)
3135{
3136	struct drm_modeset_acquire_ctx ctx;
3137	int ret;
3138
3139	DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
3140
3141	ret = drm_atomic_helper_disable_all(dev, &ctx);
3142	if (ret)
3143		DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret);
3144
3145	DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
3146}
3147EXPORT_SYMBOL(drm_atomic_helper_shutdown);
3148
3149/**
3150 * drm_atomic_helper_duplicate_state - duplicate an atomic state object
3151 * @dev: DRM device
3152 * @ctx: lock acquisition context
3153 *
3154 * Makes a copy of the current atomic state by looping over all objects and
3155 * duplicating their respective states. This is used for example by suspend/
3156 * resume support code to save the state prior to suspend such that it can
3157 * be restored upon resume.
3158 *
3159 * Note that this treats atomic state as persistent between save and restore.
3160 * Drivers must make sure that this is possible and won't result in confusion
3161 * or erroneous behaviour.
3162 *
3163 * Note that if callers haven't already acquired all modeset locks this might
3164 * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3165 *
3166 * Returns:
3167 * A pointer to the copy of the atomic state object on success or an
3168 * ERR_PTR()-encoded error code on failure.
3169 *
3170 * See also:
3171 * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
3172 */
3173struct drm_atomic_state *
3174drm_atomic_helper_duplicate_state(struct drm_device *dev,
3175				  struct drm_modeset_acquire_ctx *ctx)
3176{
3177	struct drm_atomic_state *state;
3178	struct drm_connector *conn;
3179	struct drm_connector_list_iter conn_iter;
3180	struct drm_plane *plane;
3181	struct drm_crtc *crtc;
3182	int err = 0;
3183
3184	state = drm_atomic_state_alloc(dev);
3185	if (!state)
3186		return ERR_PTR(-ENOMEM);
3187
3188	state->acquire_ctx = ctx;
3189	state->duplicated = true;
3190
3191	drm_for_each_crtc(crtc, dev) {
3192		struct drm_crtc_state *crtc_state;
3193
3194		crtc_state = drm_atomic_get_crtc_state(state, crtc);
3195		if (IS_ERR(crtc_state)) {
3196			err = PTR_ERR(crtc_state);
3197			goto free;
3198		}
3199	}
3200
3201	drm_for_each_plane(plane, dev) {
3202		struct drm_plane_state *plane_state;
3203
3204		plane_state = drm_atomic_get_plane_state(state, plane);
3205		if (IS_ERR(plane_state)) {
3206			err = PTR_ERR(plane_state);
3207			goto free;
3208		}
3209	}
3210
3211	drm_connector_list_iter_begin(dev, &conn_iter);
3212	drm_for_each_connector_iter(conn, &conn_iter) {
3213		struct drm_connector_state *conn_state;
3214
3215		conn_state = drm_atomic_get_connector_state(state, conn);
3216		if (IS_ERR(conn_state)) {
3217			err = PTR_ERR(conn_state);
3218			drm_connector_list_iter_end(&conn_iter);
3219			goto free;
3220		}
3221	}
3222	drm_connector_list_iter_end(&conn_iter);
3223
3224	/* clear the acquire context so that it isn't accidentally reused */
3225	state->acquire_ctx = NULL;
3226
3227free:
3228	if (err < 0) {
3229		drm_atomic_state_put(state);
3230		state = ERR_PTR(err);
3231	}
3232
3233	return state;
3234}
3235EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
3236
3237/**
3238 * drm_atomic_helper_suspend - subsystem-level suspend helper
3239 * @dev: DRM device
3240 *
3241 * Duplicates the current atomic state, disables all active outputs and then
3242 * returns a pointer to the original atomic state to the caller. Drivers can
3243 * pass this pointer to the drm_atomic_helper_resume() helper upon resume to
3244 * restore the output configuration that was active at the time the system
3245 * entered suspend.
3246 *
3247 * Note that it is potentially unsafe to use this. The atomic state object
3248 * returned by this function is assumed to be persistent. Drivers must ensure
3249 * that this holds true. Before calling this function, drivers must make sure
3250 * to suspend fbdev emulation so that nothing can be using the device.
3251 *
3252 * Returns:
3253 * A pointer to a copy of the state before suspend on success or an ERR_PTR()-
3254 * encoded error code on failure. Drivers should store the returned atomic
3255 * state object and pass it to the drm_atomic_helper_resume() helper upon
3256 * resume.
3257 *
3258 * See also:
3259 * drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(),
3260 * drm_atomic_helper_resume(), drm_atomic_helper_commit_duplicated_state()
3261 */
3262struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
3263{
3264	struct drm_modeset_acquire_ctx ctx;
3265	struct drm_atomic_state *state;
3266	int err;
3267
3268	/* This can never be returned, but it makes the compiler happy */
3269	state = ERR_PTR(-EINVAL);
3270
3271	DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
3272
3273	state = drm_atomic_helper_duplicate_state(dev, &ctx);
3274	if (IS_ERR(state))
3275		goto unlock;
3276
3277	err = drm_atomic_helper_disable_all(dev, &ctx);
3278	if (err < 0) {
3279		drm_atomic_state_put(state);
3280		state = ERR_PTR(err);
3281		goto unlock;
3282	}
3283
3284unlock:
3285	DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
3286	if (err)
3287		return ERR_PTR(err);
3288
3289	return state;
3290}
3291EXPORT_SYMBOL(drm_atomic_helper_suspend);
3292
3293/**
3294 * drm_atomic_helper_commit_duplicated_state - commit duplicated state
3295 * @state: duplicated atomic state to commit
3296 * @ctx: pointer to acquire_ctx to use for commit.
3297 *
3298 * The state returned by drm_atomic_helper_duplicate_state() and
3299 * drm_atomic_helper_suspend() is partially invalid, and needs to
3300 * be fixed up before commit.
3301 *
3302 * Returns:
3303 * 0 on success or a negative error code on failure.
3304 *
3305 * See also:
3306 * drm_atomic_helper_suspend()
3307 */
3308int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
3309					      struct drm_modeset_acquire_ctx *ctx)
3310{
3311	int i, ret;
3312	struct drm_plane *plane;
3313	struct drm_plane_state *new_plane_state;
3314	struct drm_connector *connector;
3315	struct drm_connector_state *new_conn_state;
3316	struct drm_crtc *crtc;
3317	struct drm_crtc_state *new_crtc_state;
3318
3319	state->acquire_ctx = ctx;
3320
3321	for_each_new_plane_in_state(state, plane, new_plane_state, i)
3322		state->planes[i].old_state = plane->state;
3323
3324	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
3325		state->crtcs[i].old_state = crtc->state;
3326
3327	for_each_new_connector_in_state(state, connector, new_conn_state, i)
3328		state->connectors[i].old_state = connector->state;
3329
3330	ret = drm_atomic_commit(state);
3331
3332	state->acquire_ctx = NULL;
3333
3334	return ret;
3335}
3336EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
3337
3338/**
3339 * drm_atomic_helper_resume - subsystem-level resume helper
3340 * @dev: DRM device
3341 * @state: atomic state to resume to
3342 *
3343 * Calls drm_mode_config_reset() to synchronize hardware and software states,
3344 * grabs all modeset locks and commits the atomic state object. This can be
3345 * used in conjunction with the drm_atomic_helper_suspend() helper to
3346 * implement suspend/resume for drivers that support atomic mode-setting.
3347 *
3348 * Returns:
3349 * 0 on success or a negative error code on failure.
3350 *
3351 * See also:
3352 * drm_atomic_helper_suspend()
3353 */
3354int drm_atomic_helper_resume(struct drm_device *dev,
3355			     struct drm_atomic_state *state)
3356{
3357	struct drm_modeset_acquire_ctx ctx;
3358	int err;
3359
3360	drm_mode_config_reset(dev);
3361
3362	DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
3363
3364	err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
3365
3366	DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
3367	drm_atomic_state_put(state);
3368
3369	return err;
3370}
3371EXPORT_SYMBOL(drm_atomic_helper_resume);
3372
3373static int page_flip_common(struct drm_atomic_state *state,
3374			    struct drm_crtc *crtc,
3375			    struct drm_framebuffer *fb,
3376			    struct drm_pending_vblank_event *event,
3377			    uint32_t flags)
3378{
3379	struct drm_plane *plane = crtc->primary;
3380	struct drm_plane_state *plane_state;
3381	struct drm_crtc_state *crtc_state;
3382	int ret = 0;
3383
3384	crtc_state = drm_atomic_get_crtc_state(state, crtc);
3385	if (IS_ERR(crtc_state))
3386		return PTR_ERR(crtc_state);
3387
3388	crtc_state->event = event;
3389	crtc_state->async_flip = flags & DRM_MODE_PAGE_FLIP_ASYNC;
3390
3391	plane_state = drm_atomic_get_plane_state(state, plane);
3392	if (IS_ERR(plane_state))
3393		return PTR_ERR(plane_state);
3394
3395	ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
3396	if (ret != 0)
3397		return ret;
3398	drm_atomic_set_fb_for_plane(plane_state, fb);
3399
3400	/* Make sure we don't accidentally do a full modeset. */
3401	state->allow_modeset = false;
3402	if (!crtc_state->active) {
3403		DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled, rejecting legacy flip\n",
3404				 crtc->base.id, crtc->name);
3405		return -EINVAL;
3406	}
3407
3408	return ret;
3409}
3410
3411/**
3412 * drm_atomic_helper_page_flip - execute a legacy page flip
3413 * @crtc: DRM CRTC
3414 * @fb: DRM framebuffer
3415 * @event: optional DRM event to signal upon completion
3416 * @flags: flip flags for non-vblank sync'ed updates
3417 * @ctx: lock acquisition context
3418 *
3419 * Provides a default &drm_crtc_funcs.page_flip implementation
3420 * using the atomic driver interface.
3421 *
3422 * Returns:
3423 * Returns 0 on success, negative errno numbers on failure.
3424 *
3425 * See also:
3426 * drm_atomic_helper_page_flip_target()
3427 */
3428int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
3429				struct drm_framebuffer *fb,
3430				struct drm_pending_vblank_event *event,
3431				uint32_t flags,
3432				struct drm_modeset_acquire_ctx *ctx)
3433{
3434	struct drm_plane *plane = crtc->primary;
3435	struct drm_atomic_state *state;
3436	int ret = 0;
3437
3438	state = drm_atomic_state_alloc(plane->dev);
3439	if (!state)
3440		return -ENOMEM;
3441
3442	state->acquire_ctx = ctx;
3443
3444	ret = page_flip_common(state, crtc, fb, event, flags);
3445	if (ret != 0)
3446		goto fail;
3447
3448	ret = drm_atomic_nonblocking_commit(state);
3449fail:
3450	drm_atomic_state_put(state);
3451	return ret;
3452}
3453EXPORT_SYMBOL(drm_atomic_helper_page_flip);
3454
3455/**
3456 * drm_atomic_helper_page_flip_target - do page flip on target vblank period.
3457 * @crtc: DRM CRTC
3458 * @fb: DRM framebuffer
3459 * @event: optional DRM event to signal upon completion
3460 * @flags: flip flags for non-vblank sync'ed updates
3461 * @target: specifying the target vblank period when the flip to take effect
3462 * @ctx: lock acquisition context
3463 *
3464 * Provides a default &drm_crtc_funcs.page_flip_target implementation.
3465 * Similar to drm_atomic_helper_page_flip() with extra parameter to specify
3466 * target vblank period to flip.
3467 *
3468 * Returns:
3469 * Returns 0 on success, negative errno numbers on failure.
3470 */
3471int drm_atomic_helper_page_flip_target(struct drm_crtc *crtc,
3472				       struct drm_framebuffer *fb,
3473				       struct drm_pending_vblank_event *event,
3474				       uint32_t flags,
3475				       uint32_t target,
3476				       struct drm_modeset_acquire_ctx *ctx)
3477{
3478	struct drm_plane *plane = crtc->primary;
3479	struct drm_atomic_state *state;
3480	struct drm_crtc_state *crtc_state;
3481	int ret = 0;
3482
3483	state = drm_atomic_state_alloc(plane->dev);
3484	if (!state)
3485		return -ENOMEM;
3486
3487	state->acquire_ctx = ctx;
3488
3489	ret = page_flip_common(state, crtc, fb, event, flags);
3490	if (ret != 0)
3491		goto fail;
3492
3493	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
3494	if (WARN_ON(!crtc_state)) {
3495		ret = -EINVAL;
3496		goto fail;
3497	}
3498	crtc_state->target_vblank = target;
3499
3500	ret = drm_atomic_nonblocking_commit(state);
3501fail:
3502	drm_atomic_state_put(state);
3503	return ret;
3504}
3505EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
3506
3507/**
3508 * drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table
3509 * @crtc: CRTC object
3510 * @red: red correction table
3511 * @green: green correction table
3512 * @blue: green correction table
3513 * @size: size of the tables
3514 * @ctx: lock acquire context
3515 *
3516 * Implements support for legacy gamma correction table for drivers
3517 * that support color management through the DEGAMMA_LUT/GAMMA_LUT
3518 * properties. See drm_crtc_enable_color_mgmt() and the containing chapter for
3519 * how the atomic color management and gamma tables work.
3520 */
3521int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
3522				       u16 *red, u16 *green, u16 *blue,
3523				       uint32_t size,
3524				       struct drm_modeset_acquire_ctx *ctx)
3525{
3526	struct drm_device *dev = crtc->dev;
3527	struct drm_atomic_state *state;
3528	struct drm_crtc_state *crtc_state;
3529	struct drm_property_blob *blob = NULL;
3530	struct drm_color_lut *blob_data;
3531	int i, ret = 0;
3532	bool replaced;
3533
3534	state = drm_atomic_state_alloc(crtc->dev);
3535	if (!state)
3536		return -ENOMEM;
3537
3538	blob = drm_property_create_blob(dev,
3539					sizeof(struct drm_color_lut) * size,
3540					NULL);
3541	if (IS_ERR(blob)) {
3542		ret = PTR_ERR(blob);
3543		blob = NULL;
3544		goto fail;
3545	}
3546
3547	/* Prepare GAMMA_LUT with the legacy values. */
3548	blob_data = blob->data;
3549	for (i = 0; i < size; i++) {
3550		blob_data[i].red = red[i];
3551		blob_data[i].green = green[i];
3552		blob_data[i].blue = blue[i];
3553	}
3554
3555	state->acquire_ctx = ctx;
3556	crtc_state = drm_atomic_get_crtc_state(state, crtc);
3557	if (IS_ERR(crtc_state)) {
3558		ret = PTR_ERR(crtc_state);
3559		goto fail;
3560	}
3561
3562	/* Reset DEGAMMA_LUT and CTM properties. */
3563	replaced  = drm_property_replace_blob(&crtc_state->degamma_lut, NULL);
3564	replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
3565	replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob);
3566	crtc_state->color_mgmt_changed |= replaced;
3567
3568	ret = drm_atomic_commit(state);
3569
3570fail:
3571	drm_atomic_state_put(state);
3572	drm_property_blob_put(blob);
3573	return ret;
3574}
3575EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
3576
3577/**
3578 * drm_atomic_helper_bridge_propagate_bus_fmt() - Propagate output format to
3579 *						  the input end of a bridge
3580 * @bridge: bridge control structure
3581 * @bridge_state: new bridge state
3582 * @crtc_state: new CRTC state
3583 * @conn_state: new connector state
3584 * @output_fmt: tested output bus format
3585 * @num_input_fmts: will contain the size of the returned array
3586 *
3587 * This helper is a pluggable implementation of the
3588 * &drm_bridge_funcs.atomic_get_input_bus_fmts operation for bridges that don't
3589 * modify the bus configuration between their input and their output. It
3590 * returns an array of input formats with a single element set to @output_fmt.
3591 *
3592 * RETURNS:
3593 * a valid format array of size @num_input_fmts, or NULL if the allocation
3594 * failed
3595 */
3596u32 *
3597drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
3598					struct drm_bridge_state *bridge_state,
3599					struct drm_crtc_state *crtc_state,
3600					struct drm_connector_state *conn_state,
3601					u32 output_fmt,
3602					unsigned int *num_input_fmts)
3603{
3604	u32 *input_fmts;
3605
3606	input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
3607	if (!input_fmts) {
3608		*num_input_fmts = 0;
3609		return NULL;
3610	}
3611
3612	*num_input_fmts = 1;
3613	input_fmts[0] = output_fmt;
3614	return input_fmts;
3615}
3616EXPORT_SYMBOL(drm_atomic_helper_bridge_propagate_bus_fmt);
3617