1 /*
2  * Copyright (C) 2014 Red Hat
3  * Copyright (C) 2014 Intel Corp.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  * Rob Clark <robdclark@gmail.com>
25  * Daniel Vetter <daniel.vetter@ffwll.ch>
26  */
27 
28 #include <linux/dma-fence.h>
29 #include <linux/ktime.h>
30 
31 #include <drm/drm_atomic.h>
32 #include <drm/drm_atomic_helper.h>
33 #include <drm/drm_atomic_uapi.h>
34 #include <drm/drm_bridge.h>
35 #include <drm/drm_damage_helper.h>
36 #include <drm/drm_device.h>
37 #include <drm/drm_drv.h>
38 #include <drm/drm_plane_helper.h>
39 #include <drm/drm_print.h>
40 #include <drm/drm_self_refresh_helper.h>
41 #include <drm/drm_vblank.h>
42 #include <drm/drm_writeback.h>
43 
44 #include "drm_crtc_helper_internal.h"
45 #include "drm_crtc_internal.h"
46 
47 /**
48  * DOC: overview
49  *
50  * This helper library provides implementations of check and commit functions on
51  * top of the CRTC modeset helper callbacks and the plane helper callbacks. It
52  * also provides convenience implementations for the atomic state handling
53  * callbacks for drivers which don't need to subclass the drm core structures to
54  * add their own additional internal state.
55  *
56  * This library also provides default implementations for the check callback in
57  * drm_atomic_helper_check() and for the commit callback with
58  * drm_atomic_helper_commit(). But the individual stages and callbacks are
59  * exposed to allow drivers to mix and match and e.g. use the plane helpers only
60  * together with a driver private modeset implementation.
61  *
62  * This library also provides implementations for all the legacy driver
63  * interfaces on top of the atomic interface. See drm_atomic_helper_set_config(),
64  * drm_atomic_helper_disable_plane(), drm_atomic_helper_disable_plane() and the
65  * various functions to implement set_property callbacks. New drivers must not
66  * implement these functions themselves but must use the provided helpers.
67  *
68  * The atomic helper uses the same function table structures as all other
69  * modesetting helpers. See the documentation for &struct drm_crtc_helper_funcs,
70  * struct &drm_encoder_helper_funcs and &struct drm_connector_helper_funcs. It
71  * also shares the &struct drm_plane_helper_funcs function table with the plane
72  * helpers.
73  */
drm_atomic_helper_plane_changed(struct drm_atomic_state *state, struct drm_plane_state *old_plane_state, struct drm_plane_state *plane_state, struct drm_plane *plane)74 static void drm_atomic_helper_plane_changed(struct drm_atomic_state *state, struct drm_plane_state *old_plane_state,
75                                             struct drm_plane_state *plane_state, struct drm_plane *plane)
76 {
77     struct drm_crtc_state *crtc_state;
78 
79     if (old_plane_state->crtc) {
80         crtc_state = drm_atomic_get_new_crtc_state(state, old_plane_state->crtc);
81         if (WARN_ON(!crtc_state)) {
82             return;
83         }
84 
85         crtc_state->planes_changed = true;
86     }
87 
88     if (plane_state->crtc) {
89         crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
90         if (WARN_ON(!crtc_state)) {
91             return;
92         }
93 
94         crtc_state->planes_changed = true;
95     }
96 }
97 
handle_conflicting_encoders(struct drm_atomic_state *state, bool disable_conflicting_encoders)98 static int handle_conflicting_encoders(struct drm_atomic_state *state, bool disable_conflicting_encoders)
99 {
100     struct drm_connector_state *new_conn_state;
101     struct drm_connector *connector;
102     struct drm_connector_list_iter conn_iter;
103     struct drm_encoder *encoder;
104     unsigned encoder_mask = 0;
105     int i, ret = 0;
106 
107     /*
108      * First loop, find all newly assigned encoders from the connectors
109      * part of the state. If the same encoder is assigned to multiple
110      * connectors bail out.
111      */
112     for_each_new_connector_in_state(state, connector, new_conn_state, i)
113     {
114         const struct drm_connector_helper_funcs *funcs = connector->helper_private;
115         struct drm_encoder *new_encoder;
116 
117         if (!new_conn_state->crtc) {
118             continue;
119         }
120 
121         if (funcs->atomic_best_encoder) {
122             new_encoder = funcs->atomic_best_encoder(connector, new_conn_state);
123         } else if (funcs->best_encoder) {
124             new_encoder = funcs->best_encoder(connector);
125         } else {
126             new_encoder = drm_connector_get_single_encoder(connector);
127         }
128 
129         if (new_encoder) {
130             if (encoder_mask & drm_encoder_mask(new_encoder)) {
131                 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n", new_encoder->base.id,
132                                  new_encoder->name, connector->base.id, connector->name);
133 
134                 return -EINVAL;
135             }
136 
137             encoder_mask |= drm_encoder_mask(new_encoder);
138         }
139     }
140 
141     if (!encoder_mask) {
142         return 0;
143     }
144 
145     /*
146      * Second loop, iterate over all connectors not part of the state.
147      *
148      * If a conflicting encoder is found and disable_conflicting_encoders
149      * is not set, an error is returned. Userspace can provide a solution
150      * through the atomic ioctl.
151      *
152      * If the flag is set conflicting connectors are removed from the CRTC
153      * and the CRTC is disabled if no encoder is left. This preserves
154      * compatibility with the legacy set_config behavior.
155      */
156     drm_connector_list_iter_begin(state->dev, &conn_iter);
157     drm_for_each_connector_iter(connector, &conn_iter)
158     {
159         struct drm_crtc_state *crtc_state;
160 
161         if (drm_atomic_get_new_connector_state(state, connector)) {
162             continue;
163         }
164 
165         encoder = connector->state->best_encoder;
166         if (!encoder || !(encoder_mask & drm_encoder_mask(encoder))) {
167             continue;
168         }
169 
170         if (!disable_conflicting_encoders) {
171             DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n", encoder->base.id,
172                              encoder->name, connector->state->crtc->base.id, connector->state->crtc->name,
173                              connector->base.id, connector->name);
174             ret = -EINVAL;
175             goto out;
176         }
177 
178         new_conn_state = drm_atomic_get_connector_state(state, connector);
179         if (IS_ERR(new_conn_state)) {
180             ret = PTR_ERR(new_conn_state);
181             goto out;
182         }
183 
184         DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n", encoder->base.id,
185                          encoder->name, new_conn_state->crtc->base.id, new_conn_state->crtc->name, connector->base.id,
186                          connector->name);
187 
188         crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
189 
190         ret = drm_atomic_set_crtc_for_connector(new_conn_state, NULL);
191         if (ret) {
192             goto out;
193         }
194 
195         if (!crtc_state->connector_mask) {
196             ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL);
197             if (ret < 0) {
198                 goto out;
199             }
200 
201             crtc_state->active = false;
202         }
203     }
204 out:
205     drm_connector_list_iter_end(&conn_iter);
206 
207     return ret;
208 }
209 
set_best_encoder(struct drm_atomic_state *state, struct drm_connector_state *conn_state, struct drm_encoder *encoder)210 static void set_best_encoder(struct drm_atomic_state *state, struct drm_connector_state *conn_state,
211                              struct drm_encoder *encoder)
212 {
213     struct drm_crtc_state *crtc_state;
214     struct drm_crtc *crtc;
215 
216     if (conn_state->best_encoder) {
217         /* Unset the encoder_mask in the old crtc state. */
218         crtc = conn_state->connector->state->crtc;
219 
220         /* A NULL crtc is an error here because we should have
221          * duplicated a NULL best_encoder when crtc was NULL.
222          * As an exception restoring duplicated atomic state
223          * during resume is allowed, so don't warn when
224          * best_encoder is equal to encoder we intend to set.
225          */
226         WARN_ON(!crtc && encoder != conn_state->best_encoder);
227         if (crtc) {
228             crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
229 
230             crtc_state->encoder_mask &= ~drm_encoder_mask(conn_state->best_encoder);
231         }
232     }
233 
234     if (encoder) {
235         crtc = conn_state->crtc;
236         WARN_ON(!crtc);
237         if (crtc) {
238             crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
239 
240             crtc_state->encoder_mask |= drm_encoder_mask(encoder);
241         }
242     }
243 
244     conn_state->best_encoder = encoder;
245 }
246 
steal_encoder(struct drm_atomic_state *state, struct drm_encoder *encoder)247 static void steal_encoder(struct drm_atomic_state *state, struct drm_encoder *encoder)
248 {
249     struct drm_crtc_state *crtc_state;
250     struct drm_connector *connector;
251     struct drm_connector_state *old_connector_state, *new_connector_state;
252     int i;
253 
254     for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i)
255     {
256         struct drm_crtc *encoder_crtc;
257 
258         if (new_connector_state->best_encoder != encoder) {
259             continue;
260         }
261 
262         encoder_crtc = old_connector_state->crtc;
263 
264         DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n", encoder->base.id, encoder->name,
265                          encoder_crtc->base.id, encoder_crtc->name);
266 
267         set_best_encoder(state, new_connector_state, NULL);
268 
269         crtc_state = drm_atomic_get_new_crtc_state(state, encoder_crtc);
270         crtc_state->connectors_changed = true;
271 
272         return;
273     }
274 }
275 
update_connector_routing(struct drm_atomic_state *state, struct drm_connector *connector, struct drm_connector_state *old_connector_state, struct drm_connector_state *new_connector_state)276 static int update_connector_routing(struct drm_atomic_state *state, struct drm_connector *connector,
277                                     struct drm_connector_state *old_connector_state,
278                                     struct drm_connector_state *new_connector_state)
279 {
280     const struct drm_connector_helper_funcs *funcs;
281     struct drm_encoder *new_encoder;
282     struct drm_crtc_state *crtc_state;
283 
284     DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n", connector->base.id, connector->name);
285 
286     if (old_connector_state->crtc != new_connector_state->crtc) {
287         if (old_connector_state->crtc) {
288             crtc_state = drm_atomic_get_new_crtc_state(state, old_connector_state->crtc);
289             if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) {
290                 crtc_state->connectors_changed = true;
291             }
292         }
293 
294         if (new_connector_state->crtc) {
295             crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc);
296             if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) {
297                 crtc_state->connectors_changed = true;
298             }
299         }
300     }
301 
302     if (!new_connector_state->crtc) {
303         DRM_DEBUG_ATOMIC("Disabling [CONNECTOR:%d:%s]\n", connector->base.id, connector->name);
304 
305         set_best_encoder(state, new_connector_state, NULL);
306 
307         return 0;
308     }
309 
310     crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc);
311     /*
312      * For compatibility with legacy users, we want to make sure that
313      * we allow DPMS On->Off modesets on unregistered connectors. Modesets
314      * which would result in anything else must be considered invalid, to
315      * avoid turning on new displays on dead connectors.
316      *
317      * Since the connector can be unregistered at any point during an
318      * atomic check or commit, this is racy. But that's OK: all we care
319      * about is ensuring that userspace can't do anything but shut off the
320      * display on a connector that was destroyed after it's been notified,
321      * not before.
322      *
323      * Additionally, we also want to ignore connector registration when
324      * we're trying to restore an atomic state during system resume since
325      * there's a chance the connector may have been destroyed during the
326      * process, but it's better to ignore that then cause
327      * drm_atomic_helper_resume() to fail.
328      */
329     if (!state->duplicated && drm_connector_is_unregistered(connector) && crtc_state->active) {
330         DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] is not registered\n", connector->base.id, connector->name);
331         return -EINVAL;
332     }
333 
334     funcs = connector->helper_private;
335 
336     if (funcs->atomic_best_encoder) {
337         new_encoder = funcs->atomic_best_encoder(connector, new_connector_state);
338     } else if (funcs->best_encoder) {
339         new_encoder = funcs->best_encoder(connector);
340     } else {
341         new_encoder = drm_connector_get_single_encoder(connector);
342     }
343 
344     if (!new_encoder) {
345         DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n", connector->base.id, connector->name);
346         return -EINVAL;
347     }
348 
349     if (!drm_encoder_crtc_ok(new_encoder, new_connector_state->crtc)) {
350         DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n", new_encoder->base.id, new_encoder->name,
351                          new_connector_state->crtc->base.id, new_connector_state->crtc->name);
352         return -EINVAL;
353     }
354 
355     if (new_encoder == new_connector_state->best_encoder) {
356         set_best_encoder(state, new_connector_state, new_encoder);
357 
358         DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n", connector->base.id,
359                          connector->name, new_encoder->base.id, new_encoder->name, new_connector_state->crtc->base.id,
360                          new_connector_state->crtc->name);
361 
362         return 0;
363     }
364 
365     steal_encoder(state, new_encoder);
366 
367     set_best_encoder(state, new_connector_state, new_encoder);
368 
369     if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) {
370         crtc_state->connectors_changed = true;
371     }
372 
373     DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n", connector->base.id, connector->name,
374                      new_encoder->base.id, new_encoder->name, new_connector_state->crtc->base.id,
375                      new_connector_state->crtc->name);
376 
377     return 0;
378 }
379 
mode_fixup(struct drm_atomic_state *state)380 static int mode_fixup(struct drm_atomic_state *state)
381 {
382     struct drm_crtc *crtc;
383     struct drm_crtc_state *new_crtc_state;
384     struct drm_connector *connector;
385     struct drm_connector_state *new_conn_state;
386     int i;
387     int ret;
388 
389     for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
390     {
391         if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed) {
392             continue;
393         }
394 
395         drm_mode_copy(&new_crtc_state->adjusted_mode, &new_crtc_state->mode);
396     }
397 
398     for_each_new_connector_in_state(state, connector, new_conn_state, i)
399     {
400         const struct drm_encoder_helper_funcs *funcs;
401         struct drm_encoder *encoder;
402         struct drm_bridge *bridge;
403 
404         WARN_ON(!!new_conn_state->best_encoder != !!new_conn_state->crtc);
405 
406         if (!new_conn_state->crtc || !new_conn_state->best_encoder) {
407             continue;
408         }
409 
410         new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
411 
412         /*
413          * Each encoder has at most one connector (since we always steal
414          * it away), so we won't call ->mode_fixup twice.
415          */
416         encoder = new_conn_state->best_encoder;
417         funcs = encoder->helper_private;
418 
419         bridge = drm_bridge_chain_get_first_bridge(encoder);
420         ret = drm_atomic_bridge_chain_check(bridge, new_crtc_state, new_conn_state);
421         if (ret) {
422             DRM_DEBUG_ATOMIC("Bridge atomic check failed\n");
423             return ret;
424         }
425 
426         if (funcs && funcs->atomic_check) {
427             ret = funcs->atomic_check(encoder, new_crtc_state, new_conn_state);
428             if (ret) {
429                 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] check failed\n", encoder->base.id, encoder->name);
430                 return ret;
431             }
432         } else if (funcs && funcs->mode_fixup) {
433             ret = funcs->mode_fixup(encoder, &new_crtc_state->mode, &new_crtc_state->adjusted_mode);
434             if (!ret) {
435                 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] fixup failed\n", encoder->base.id, encoder->name);
436                 return -EINVAL;
437             }
438         }
439     }
440 
441     for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
442     {
443         const struct drm_crtc_helper_funcs *funcs;
444 
445         if (!new_crtc_state->enable) {
446             continue;
447         }
448 
449         if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed) {
450             continue;
451         }
452 
453         funcs = crtc->helper_private;
454         if (!funcs || !funcs->mode_fixup) {
455             continue;
456         }
457 
458         ret = funcs->mode_fixup(crtc, &new_crtc_state->mode, &new_crtc_state->adjusted_mode);
459         if (!ret) {
460             DRM_DEBUG_ATOMIC("[CRTC:%d:%s] fixup failed\n", crtc->base.id, crtc->name);
461             return -EINVAL;
462         }
463     }
464 
465     return 0;
466 }
467 
mode_valid_path(struct drm_connector *connector, struct drm_encoder *encoder, struct drm_crtc *crtc, const struct drm_display_mode *mode)468 static enum drm_mode_status mode_valid_path(struct drm_connector *connector, struct drm_encoder *encoder,
469                                             struct drm_crtc *crtc, const struct drm_display_mode *mode)
470 {
471     struct drm_bridge *bridge;
472     enum drm_mode_status ret;
473 
474     ret = drm_encoder_mode_valid(encoder, mode);
475     if (ret != MODE_OK) {
476         DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] mode_valid() failed\n", encoder->base.id, encoder->name);
477         return ret;
478     }
479 
480     bridge = drm_bridge_chain_get_first_bridge(encoder);
481     ret = drm_bridge_chain_mode_valid(bridge, &connector->display_info, mode);
482     if (ret != MODE_OK) {
483         DRM_DEBUG_ATOMIC("[BRIDGE] mode_valid() failed\n");
484         return ret;
485     }
486 
487     ret = drm_crtc_mode_valid(crtc, mode);
488     if (ret != MODE_OK) {
489         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode_valid() failed\n", crtc->base.id, crtc->name);
490         return ret;
491     }
492 
493     return ret;
494 }
495 
mode_valid(struct drm_atomic_state *state)496 static int mode_valid(struct drm_atomic_state *state)
497 {
498     struct drm_connector_state *conn_state;
499     struct drm_connector *connector;
500     int i;
501 
502     for_each_new_connector_in_state(state, connector, conn_state, i)
503     {
504         struct drm_encoder *encoder = conn_state->best_encoder;
505         struct drm_crtc *crtc = conn_state->crtc;
506         struct drm_crtc_state *crtc_state;
507         enum drm_mode_status mode_status;
508         const struct drm_display_mode *mode;
509 
510         if (!crtc || !encoder) {
511             continue;
512         }
513 
514         crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
515         if (!crtc_state) {
516             continue;
517         }
518         if (!crtc_state->mode_changed && !crtc_state->connectors_changed) {
519             continue;
520         }
521 
522         mode = &crtc_state->mode;
523 
524         mode_status = mode_valid_path(connector, encoder, crtc, mode);
525         if (mode_status != MODE_OK) {
526             return -EINVAL;
527         }
528     }
529 
530     return 0;
531 }
532 
533 /**
534  * drm_atomic_helper_check_modeset - validate state object for modeset changes
535  * @dev: DRM device
536  * @state: the driver state object
537  *
538  * Check the state object to see if the requested state is physically possible.
539  * This does all the CRTC and connector related computations for an atomic
540  * update and adds any additional connectors needed for full modesets. It calls
541  * the various per-object callbacks in the follow order:
542  *
543  * 1. &drm_connector_helper_funcs.atomic_best_encoder for determining the new encoder.
544  * 2. &drm_connector_helper_funcs.atomic_check to validate the connector state.
545  * 3. If it's determined a modeset is needed then all connectors on the affected
546  *    CRTC are added and &drm_connector_helper_funcs.atomic_check is run on them.
547  * 4. &drm_encoder_helper_funcs.mode_valid, &drm_bridge_funcs.mode_valid and
548  *    &drm_crtc_helper_funcs.mode_valid are called on the affected components.
549  * 5. &drm_bridge_funcs.mode_fixup is called on all encoder bridges.
550  * 6. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state.
551  *    This function is only called when the encoder will be part of a configured CRTC,
552  *    it must not be used for implementing connector property validation.
553  *    If this function is NULL, &drm_atomic_encoder_helper_funcs.mode_fixup is called
554  *    instead.
555  * 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with CRTC constraints.
556  *
557  * &drm_crtc_state.mode_changed is set when the input mode is changed.
558  * &drm_crtc_state.connectors_changed is set when a connector is added or
559  * removed from the CRTC.  &drm_crtc_state.active_changed is set when
560  * &drm_crtc_state.active changes, which is used for DPMS.
561  * &drm_crtc_state.no_vblank is set from the result of drm_dev_has_vblank().
562  * See also: drm_atomic_crtc_needs_modeset()
563  *
564  * IMPORTANT:See below
565  *
566  * Drivers which set &drm_crtc_state.mode_changed (e.g. in their
567  * &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done
568  * without a full modeset) _must_ call this function afterwards after that
569  * change. It is permitted to call this function multiple times for the same
570  * update, e.g. when the &drm_crtc_helper_funcs.atomic_check functions depend
571  * upon the adjusted dotclock for fifo space allocation and watermark
572  * computation.
573  *
574  * RETURNS:See below
575  * Zero for success or -errno
576  */
drm_atomic_helper_check_modeset(struct drm_device *dev, struct drm_atomic_state *state)577 int drm_atomic_helper_check_modeset(struct drm_device *dev, struct drm_atomic_state *state)
578 {
579     struct drm_crtc *crtc;
580     struct drm_crtc_state *old_crtc_state, *new_crtc_state;
581     struct drm_connector *connector;
582     struct drm_connector_state *old_connector_state, *new_connector_state;
583     int i, ret;
584     unsigned connectors_mask = 0;
585 
586     for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
587     {
588         bool has_connectors = !!new_crtc_state->connector_mask;
589 
590         WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
591 
592         if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
593             DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n", crtc->base.id, crtc->name);
594             new_crtc_state->mode_changed = true;
595         }
596 
597         if (old_crtc_state->enable != new_crtc_state->enable) {
598             DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enable changed\n", crtc->base.id, crtc->name);
599 
600             /*
601              * For clarity this assignment is done here, but
602              * enable == 0 is only true when there are no
603              * connectors and a NULL mode.
604              *
605              * The other way around is true as well. enable != 0
606              * iff connectors are attached and a mode is set.
607              */
608             new_crtc_state->mode_changed = true;
609             new_crtc_state->connectors_changed = true;
610         }
611 
612         if (old_crtc_state->active != new_crtc_state->active) {
613             DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active changed\n", crtc->base.id, crtc->name);
614             new_crtc_state->active_changed = true;
615         }
616 
617         if (new_crtc_state->enable != has_connectors) {
618             DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n", crtc->base.id, crtc->name);
619 
620             return -EINVAL;
621         }
622 
623         if (drm_dev_has_vblank(dev)) {
624             new_crtc_state->no_vblank = false;
625         } else {
626             new_crtc_state->no_vblank = true;
627         }
628     }
629 
630     ret = handle_conflicting_encoders(state, false);
631     if (ret) {
632         return ret;
633     }
634 
635     for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i)
636     {
637         const struct drm_connector_helper_funcs *funcs = connector->helper_private;
638 
639         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
640 
641         /*
642          * This only sets crtc->connectors_changed for routing changes,
643          * drivers must set crtc->connectors_changed themselves when
644          * connector properties need to be updated.
645          */
646         ret = update_connector_routing(state, connector, old_connector_state, new_connector_state);
647         if (ret) {
648             return ret;
649         }
650         if (old_connector_state->crtc) {
651             new_crtc_state = drm_atomic_get_new_crtc_state(state, old_connector_state->crtc);
652             if (old_connector_state->link_status != new_connector_state->link_status) {
653                 new_crtc_state->connectors_changed = true;
654             }
655 
656             if (old_connector_state->max_requested_bpc != new_connector_state->max_requested_bpc) {
657                 new_crtc_state->connectors_changed = true;
658             }
659         }
660 
661         if (funcs->atomic_check) {
662             ret = funcs->atomic_check(connector, state);
663         }
664         if (ret) {
665             return ret;
666         }
667 
668         connectors_mask |= BIT(i);
669     }
670 
671     /*
672      * After all the routing has been prepared we need to add in any
673      * connector which is itself unchanged, but whose CRTC changes its
674      * configuration. This must be done before calling mode_fixup in case a
675      * crtc only changed its mode but has the same set of connectors.
676      */
677     for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
678     {
679         if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) {
680             continue;
681         }
682 
683         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n", crtc->base.id, crtc->name,
684                          new_crtc_state->enable ? 'y' : 'n', new_crtc_state->active ? 'y' : 'n');
685 
686         ret = drm_atomic_add_affected_connectors(state, crtc);
687         if (ret != 0) {
688             return ret;
689         }
690 
691         ret = drm_atomic_add_affected_planes(state, crtc);
692         if (ret != 0) {
693             return ret;
694         }
695     }
696 
697     /*
698      * Iterate over all connectors again, to make sure atomic_check()
699      * has been called on them when a modeset is forced.
700      */
701     for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i)
702     {
703         const struct drm_connector_helper_funcs *funcs = connector->helper_private;
704 
705         if (connectors_mask & BIT(i)) {
706             continue;
707         }
708 
709         if (funcs->atomic_check) {
710             ret = funcs->atomic_check(connector, state);
711         }
712         if (ret) {
713             return ret;
714         }
715     }
716 
717     /*
718      * Iterate over all connectors again, and add all affected bridges to
719      * the state.
720      */
721     for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i)
722     {
723         struct drm_encoder *encoder;
724 
725         encoder = old_connector_state->best_encoder;
726         ret = drm_atomic_add_encoder_bridges(state, encoder);
727         if (ret) {
728             return ret;
729         }
730 
731         encoder = new_connector_state->best_encoder;
732         ret = drm_atomic_add_encoder_bridges(state, encoder);
733         if (ret) {
734             return ret;
735         }
736     }
737 
738     ret = mode_valid(state);
739     if (ret) {
740         return ret;
741     }
742 
743     return mode_fixup(state);
744 }
745 EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
746 
747 /**
748  * drm_atomic_helper_check_plane_state() - Check plane state for validity
749  * @plane_state: plane state to check
750  * @crtc_state: CRTC state to check
751  * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
752  * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
753  * @can_position: is it legal to position the plane such that it
754  *                doesn't cover the entire CRTC?  This will generally
755  *                only be false for primary planes.
756  * @can_update_disabled: can the plane be updated while the CRTC
757  *                       is disabled?
758  *
759  * Checks that a desired plane update is valid, and updates various
760  * bits of derived state (clipped coordinates etc.). Drivers that provide
761  * their own plane handling rather than helper-provided implementations may
762  * still wish to call this function to avoid duplication of error checking
763  * code.
764  *
765  * RETURNS:
766  * Zero if update appears valid, error code on failure
767  */
drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, const struct drm_crtc_state *crtc_state, int min_scale, int max_scale, bool can_position, bool can_update_disabled)768 int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, const struct drm_crtc_state *crtc_state,
769                                         int min_scale, int max_scale, bool can_position, bool can_update_disabled)
770 {
771     struct drm_framebuffer *fb = plane_state->fb;
772     struct drm_rect *src = &plane_state->src;
773     struct drm_rect *dst = &plane_state->dst;
774     unsigned int rotation = plane_state->rotation;
775     struct drm_rect clip = {};
776     int hscale, vscale;
777 
778     WARN_ON(plane_state->crtc && plane_state->crtc != crtc_state->crtc);
779 
780     *src = drm_plane_state_src(plane_state);
781     *dst = drm_plane_state_dest(plane_state);
782 
783     if (!fb) {
784         plane_state->visible = false;
785         return 0;
786     }
787 
788     /* crtc should only be NULL when disabling (i.e., !fb) */
789     if (WARN_ON(!plane_state->crtc)) {
790         plane_state->visible = false;
791         return 0;
792     }
793 
794     if (!crtc_state->enable && !can_update_disabled) {
795         DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n");
796         return -EINVAL;
797     }
798 
799     drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
800 
801     /* Check scaling */
802     hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
803     vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
804     if (hscale < 0 || vscale < 0) {
805         DRM_DEBUG_KMS("Invalid scaling of plane\n");
806         drm_rect_debug_print("src: ", &plane_state->src, true);
807         drm_rect_debug_print("dst: ", &plane_state->dst, false);
808         return -ERANGE;
809     }
810 
811     if (crtc_state->enable) {
812         drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2);
813     }
814 
815     plane_state->visible = drm_rect_clip_scaled(src, dst, &clip);
816 
817     drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
818 
819     if (!plane_state->visible) {
820         /*
821          * Plane isn't visible; some drivers can handle this
822          * so we just return success here.  Drivers that can't
823          * (including those that use the primary plane helper's
824          * update function) will return an error from their
825          * update_plane handler.
826          */
827         return 0;
828     }
829 
830     if (!can_position && !drm_rect_equals(dst, &clip)) {
831         DRM_DEBUG_KMS("Plane must cover entire CRTC\n");
832         drm_rect_debug_print("dst: ", dst, false);
833         drm_rect_debug_print("clip: ", &clip, false);
834         return -EINVAL;
835     }
836 
837     return 0;
838 }
839 EXPORT_SYMBOL(drm_atomic_helper_check_plane_state);
840 
841 /**
842  * drm_atomic_helper_check_planes - validate state object for planes changes
843  * @dev: DRM device
844  * @state: the driver state object
845  *
846  * Check the state object to see if the requested state is physically possible.
847  * This does all the plane update related checks using by calling into the
848  * &drm_crtc_helper_funcs.atomic_check and &drm_plane_helper_funcs.atomic_check
849  * hooks provided by the driver.
850  *
851  * It also sets &drm_crtc_state.planes_changed to indicate that a CRTC has
852  * updated planes.
853  *
854  * RETURNS:
855  * Zero for success or -errno
856  */
drm_atomic_helper_check_planes(struct drm_device *dev, struct drm_atomic_state *state)857 int drm_atomic_helper_check_planes(struct drm_device *dev, struct drm_atomic_state *state)
858 {
859     struct drm_crtc *crtc;
860     struct drm_crtc_state *new_crtc_state;
861     struct drm_plane *plane;
862     struct drm_plane_state *new_plane_state, *old_plane_state;
863     int i, ret = 0;
864 
865     for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
866     {
867         const struct drm_plane_helper_funcs *funcs;
868 
869         WARN_ON(!drm_modeset_is_locked(&plane->mutex));
870 
871         funcs = plane->helper_private;
872 
873         drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
874 
875         drm_atomic_helper_check_plane_damage(state, new_plane_state);
876 
877         if (!funcs || !funcs->atomic_check) {
878             continue;
879         }
880 
881         ret = funcs->atomic_check(plane, new_plane_state);
882         if (ret) {
883             DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n", plane->base.id, plane->name);
884             return ret;
885         }
886     }
887 
888     for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
889     {
890         const struct drm_crtc_helper_funcs *funcs;
891 
892         funcs = crtc->helper_private;
893 
894         if (!funcs || !funcs->atomic_check) {
895             continue;
896         }
897 
898         ret = funcs->atomic_check(crtc, new_crtc_state);
899         if (ret) {
900             DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n", crtc->base.id, crtc->name);
901             return ret;
902         }
903     }
904 
905     return ret;
906 }
907 EXPORT_SYMBOL(drm_atomic_helper_check_planes);
908 
crtc_needs_disable(struct drm_crtc_state *old_state, struct drm_crtc_state *new_state)909 static bool crtc_needs_disable(struct drm_crtc_state *old_state, struct drm_crtc_state *new_state)
910 {
911     /*
912      * No new_state means the CRTC is off, so the only criteria is whether
913      * it's currently active or in self refresh mode.
914      */
915     if (!new_state) {
916         return drm_atomic_crtc_effectively_active(old_state);
917     }
918     if (old_state->self_refresh_active &&
919         old_state->crtc != new_state->crtc)
920         return true;
921 
922     /*
923      * We need to run through the crtc_funcs->disable() function if the CRTC
924      * is currently on, if it's transitioning to self refresh mode, or if
925      * it's in self refresh mode and needs to be fully disabled.
926      */
927     return old_state->active || (old_state->self_refresh_active && !new_state->enable) ||
928            new_state->self_refresh_active;
929 }
930 
disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)931 static void disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
932 {
933     struct drm_connector *connector;
934     struct drm_connector_state *old_conn_state, *new_conn_state;
935     struct drm_crtc *crtc;
936     struct drm_crtc_state *old_crtc_state, *new_crtc_state;
937     int i;
938 
939     for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i)
940     {
941         const struct drm_encoder_helper_funcs *funcs;
942         struct drm_encoder *encoder;
943         struct drm_bridge *bridge;
944 
945         /* Shut down everything that's in the changeset and currently
946          * still on. So need to check the old, saved state. */
947         if (!old_conn_state->crtc) {
948             continue;
949         }
950 
951         old_crtc_state = drm_atomic_get_old_crtc_state(old_state, old_conn_state->crtc);
952 
953         if (new_conn_state->crtc) {
954             new_crtc_state = drm_atomic_get_new_crtc_state(old_state, new_conn_state->crtc);
955         } else {
956             new_crtc_state = NULL;
957         }
958 
959         if (!crtc_needs_disable(old_crtc_state, new_crtc_state) ||
960             !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state)) {
961             continue;
962         }
963 
964         encoder = old_conn_state->best_encoder;
965 
966         /* We shouldn't get this far if we didn't previously have
967          * an encoder.. but WARN_ON() rather than explode.
968          */
969         if (WARN_ON(!encoder)) {
970             continue;
971         }
972 
973         funcs = encoder->helper_private;
974 
975         DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n", encoder->base.id, encoder->name);
976 
977         /*
978          * Each encoder has at most one connector (since we always steal
979          * it away), so we won't call disable hooks twice.
980          */
981         bridge = drm_bridge_chain_get_first_bridge(encoder);
982         drm_atomic_bridge_chain_disable(bridge, old_state);
983 
984         /* Right function depends upon target state. */
985         if (funcs) {
986             if (funcs->atomic_disable) {
987                 funcs->atomic_disable(encoder, old_state);
988             } else if (new_conn_state->crtc && funcs->prepare) {
989                 funcs->prepare(encoder);
990             } else if (funcs->disable) {
991                 funcs->disable(encoder);
992             } else if (funcs->dpms) {
993                 funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
994             }
995         }
996 
997         drm_atomic_bridge_chain_post_disable(bridge, old_state);
998     }
999 
1000     for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i)
1001     {
1002         const struct drm_crtc_helper_funcs *funcs;
1003         int ret;
1004 
1005         /* Shut down everything that needs a full modeset. */
1006         if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) {
1007             continue;
1008         }
1009 
1010         if (!crtc_needs_disable(old_crtc_state, new_crtc_state)) {
1011             continue;
1012         }
1013 
1014         funcs = crtc->helper_private;
1015 
1016         DRM_DEBUG_ATOMIC("disabling [CRTC:%d:%s]\n", crtc->base.id, crtc->name);
1017 
1018         /* Right function depends upon target state. */
1019         if (new_crtc_state->enable && funcs->prepare) {
1020             funcs->prepare(crtc);
1021         } else if (funcs->atomic_disable) {
1022             funcs->atomic_disable(crtc, old_crtc_state);
1023         } else if (funcs->disable) {
1024             funcs->disable(crtc);
1025         } else if (funcs->dpms) {
1026             funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
1027         }
1028 
1029         if (!drm_dev_has_vblank(dev)) {
1030             continue;
1031         }
1032 
1033         ret = drm_crtc_vblank_get(crtc);
1034         WARN_ONCE(ret != -EINVAL, "driver forgot to call drm_crtc_vblank_off()\n");
1035         if (ret == 0) {
1036             drm_crtc_vblank_put(crtc);
1037         }
1038     }
1039 }
1040 
1041 /**
1042  * drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state
1043  * @dev: DRM device
1044  * @old_state: atomic state object with old state structures
1045  *
1046  * This function updates all the various legacy modeset state pointers in
1047  * connectors, encoders and CRTCs.
1048  *
1049  * Drivers can use this for building their own atomic commit if they don't have
1050  * a pure helper-based modeset implementation.
1051  *
1052  * Since these updates are not synchronized with lockings, only code paths
1053  * called from &drm_mode_config_helper_funcs.atomic_commit_tail can look at the
1054  * legacy state filled out by this helper. Defacto this means this helper and
1055  * the legacy state pointers are only really useful for transitioning an
1056  * existing driver to the atomic world.
1057  */
drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev, struct drm_atomic_state *old_state)1058 void drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev, struct drm_atomic_state *old_state)
1059 {
1060     struct drm_connector *connector;
1061     struct drm_connector_state *old_conn_state, *new_conn_state;
1062     struct drm_crtc *crtc;
1063     struct drm_crtc_state *new_crtc_state;
1064     int i;
1065 
1066     /* clear out existing links and update dpms */
1067     for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i)
1068     {
1069         if (connector->encoder) {
1070             WARN_ON(!connector->encoder->crtc);
1071 
1072             connector->encoder->crtc = NULL;
1073             connector->encoder = NULL;
1074         }
1075 
1076         crtc = new_conn_state->crtc;
1077         if ((!crtc && old_conn_state->crtc) || (crtc && drm_atomic_crtc_needs_modeset(crtc->state))) {
1078             int mode = DRM_MODE_DPMS_OFF;
1079 
1080             if (crtc && crtc->state->active) {
1081                 mode = DRM_MODE_DPMS_ON;
1082             }
1083 
1084             connector->dpms = mode;
1085         }
1086     }
1087 
1088     /* set new links */
1089     for_each_new_connector_in_state(old_state, connector, new_conn_state, i)
1090     {
1091         if (!new_conn_state->crtc) {
1092             continue;
1093         }
1094 
1095         if (WARN_ON(!new_conn_state->best_encoder)) {
1096             continue;
1097         }
1098 
1099         connector->encoder = new_conn_state->best_encoder;
1100         connector->encoder->crtc = new_conn_state->crtc;
1101     }
1102 
1103     /* set legacy state in the crtc structure */
1104     for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i)
1105     {
1106         struct drm_plane *primary = crtc->primary;
1107         struct drm_plane_state *new_plane_state;
1108 
1109         crtc->mode = new_crtc_state->mode;
1110         crtc->enabled = new_crtc_state->enable;
1111 
1112         new_plane_state = drm_atomic_get_new_plane_state(old_state, primary);
1113         if (new_plane_state && new_plane_state->crtc == crtc) {
1114             crtc->x = new_plane_state->src_x >> 16;
1115             crtc->y = new_plane_state->src_y >> 16;
1116         }
1117     }
1118 }
1119 EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state);
1120 
1121 /**
1122  * drm_atomic_helper_calc_timestamping_constants - update vblank timestamping constants
1123  * @state: atomic state object
1124  *
1125  * Updates the timestamping constants used for precise vblank timestamps
1126  * by calling drm_calc_timestamping_constants() for all enabled crtcs in @state.
1127  */
drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state)1128 void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state)
1129 {
1130     struct drm_crtc_state *new_crtc_state;
1131     struct drm_crtc *crtc;
1132     int i;
1133 
1134     for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
1135     {
1136         if (new_crtc_state->enable) {
1137             drm_calc_timestamping_constants(crtc, &new_crtc_state->adjusted_mode);
1138         }
1139     }
1140 }
1141 EXPORT_SYMBOL(drm_atomic_helper_calc_timestamping_constants);
1142 
crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)1143 static void crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
1144 {
1145     struct drm_crtc *crtc;
1146     struct drm_crtc_state *new_crtc_state;
1147     struct drm_connector *connector;
1148     struct drm_connector_state *new_conn_state;
1149     int i;
1150 
1151     for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i)
1152     {
1153         const struct drm_crtc_helper_funcs *funcs;
1154 
1155         if (!new_crtc_state->mode_changed) {
1156             continue;
1157         }
1158 
1159         funcs = crtc->helper_private;
1160 
1161         if (new_crtc_state->enable && funcs->mode_set_nofb) {
1162             DRM_DEBUG_ATOMIC("modeset on [CRTC:%d:%s]\n", crtc->base.id, crtc->name);
1163 
1164             funcs->mode_set_nofb(crtc);
1165         }
1166     }
1167 
1168     for_each_new_connector_in_state(old_state, connector, new_conn_state, i)
1169     {
1170         const struct drm_encoder_helper_funcs *funcs;
1171         struct drm_encoder *encoder;
1172         struct drm_display_mode *mode, *adjusted_mode;
1173         struct drm_bridge *bridge;
1174 
1175         if (!new_conn_state->best_encoder) {
1176             continue;
1177         }
1178 
1179         encoder = new_conn_state->best_encoder;
1180         funcs = encoder->helper_private;
1181         new_crtc_state = new_conn_state->crtc->state;
1182         mode = &new_crtc_state->mode;
1183         adjusted_mode = &new_crtc_state->adjusted_mode;
1184 
1185         if (!new_crtc_state->mode_changed) {
1186             continue;
1187         }
1188 
1189         DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n", encoder->base.id, encoder->name);
1190 
1191         /*
1192          * Each encoder has at most one connector (since we always steal
1193          * it away), so we won't call mode_set hooks twice.
1194          */
1195         if (funcs && funcs->atomic_mode_set) {
1196             funcs->atomic_mode_set(encoder, new_crtc_state, new_conn_state);
1197         } else if (funcs && funcs->mode_set) {
1198             funcs->mode_set(encoder, mode, adjusted_mode);
1199         }
1200 
1201         bridge = drm_bridge_chain_get_first_bridge(encoder);
1202         drm_bridge_chain_mode_set(bridge, mode, adjusted_mode);
1203     }
1204 }
1205 
1206 /**
1207  * drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
1208  * @dev: DRM device
1209  * @old_state: atomic state object with old state structures
1210  *
1211  * This function shuts down all the outputs that need to be shut down and
1212  * prepares them (if required) with the new mode.
1213  *
1214  * For compatibility with legacy CRTC helpers this should be called before
1215  * drm_atomic_helper_commit_planes(), which is what the default commit function
1216  * does. But drivers with different needs can group the modeset commits together
1217  * and do the plane commits at the end. This is useful for drivers doing runtime
1218  * PM since planes updates then only happen when the CRTC is actually enabled.
1219  */
drm_atomic_helper_commit_modeset_disables(struct drm_device *dev, struct drm_atomic_state *old_state)1220 void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev, struct drm_atomic_state *old_state)
1221 {
1222     disable_outputs(dev, old_state);
1223 
1224     drm_atomic_helper_update_legacy_modeset_state(dev, old_state);
1225     drm_atomic_helper_calc_timestamping_constants(old_state);
1226 
1227     crtc_set_mode(dev, old_state);
1228 }
1229 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
1230 
drm_atomic_helper_commit_writebacks(struct drm_device *dev, struct drm_atomic_state *old_state)1231 static void drm_atomic_helper_commit_writebacks(struct drm_device *dev, struct drm_atomic_state *old_state)
1232 {
1233     struct drm_connector *connector;
1234     struct drm_connector_state *new_conn_state;
1235     int i;
1236 
1237     for_each_new_connector_in_state(old_state, connector, new_conn_state, i)
1238     {
1239         const struct drm_connector_helper_funcs *funcs;
1240 
1241         funcs = connector->helper_private;
1242         if (!funcs->atomic_commit) {
1243             continue;
1244         }
1245 
1246         if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
1247             WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
1248             funcs->atomic_commit(connector, new_conn_state);
1249         }
1250     }
1251 }
1252 
1253 /**
1254  * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
1255  * @dev: DRM device
1256  * @old_state: atomic state object with old state structures
1257  *
1258  * This function enables all the outputs with the new configuration which had to
1259  * be turned off for the update.
1260  *
1261  * For compatibility with legacy CRTC helpers this should be called after
1262  * drm_atomic_helper_commit_planes(), which is what the default commit function
1263  * does. But drivers with different needs can group the modeset commits together
1264  * and do the plane commits at the end. This is useful for drivers doing runtime
1265  * PM since planes updates then only happen when the CRTC is actually enabled.
1266  */
drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, struct drm_atomic_state *old_state)1267 void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, struct drm_atomic_state *old_state)
1268 {
1269     struct drm_crtc *crtc;
1270     struct drm_crtc_state *old_crtc_state;
1271     struct drm_crtc_state *new_crtc_state;
1272     struct drm_connector *connector;
1273     struct drm_connector_state *new_conn_state;
1274     int i;
1275 
1276     for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i)
1277     {
1278         const struct drm_crtc_helper_funcs *funcs;
1279 
1280         /* Need to filter out CRTCs where only planes change. */
1281         if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) {
1282             continue;
1283         }
1284 
1285         if (!new_crtc_state->active) {
1286             continue;
1287         }
1288 
1289         funcs = crtc->helper_private;
1290 
1291         if (new_crtc_state->enable) {
1292             DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n", crtc->base.id, crtc->name);
1293             if (funcs->atomic_enable) {
1294                 funcs->atomic_enable(crtc, old_crtc_state);
1295             } else if (funcs->commit) {
1296                 funcs->commit(crtc);
1297             }
1298         }
1299     }
1300 
1301     for_each_new_connector_in_state(old_state, connector, new_conn_state, i)
1302     {
1303         const struct drm_encoder_helper_funcs *funcs;
1304         struct drm_encoder *encoder;
1305         struct drm_bridge *bridge;
1306 
1307         if (!new_conn_state->best_encoder) {
1308             continue;
1309         }
1310 
1311         if (!new_conn_state->crtc->state->active || !drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state)) {
1312             continue;
1313         }
1314 
1315         encoder = new_conn_state->best_encoder;
1316         funcs = encoder->helper_private;
1317 
1318         DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n", encoder->base.id, encoder->name);
1319 
1320         /*
1321          * Each encoder has at most one connector (since we always steal
1322          * it away), so we won't call enable hooks twice.
1323          */
1324         bridge = drm_bridge_chain_get_first_bridge(encoder);
1325         drm_atomic_bridge_chain_pre_enable(bridge, old_state);
1326 
1327         if (funcs) {
1328             if (funcs->atomic_enable) {
1329                 funcs->atomic_enable(encoder, old_state);
1330             } else if (funcs->enable) {
1331                 funcs->enable(encoder);
1332             } else if (funcs->commit) {
1333                 funcs->commit(encoder);
1334             }
1335         }
1336 
1337         drm_atomic_bridge_chain_enable(bridge, old_state);
1338     }
1339 
1340     drm_atomic_helper_commit_writebacks(dev, old_state);
1341 }
1342 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
1343 
1344 /**
1345  * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
1346  * @dev: DRM device
1347  * @state: atomic state object with old state structures
1348  * @pre_swap: If true, do an interruptible wait, and @state is the new state.
1349  *     Otherwise @state is the old state.
1350  *
1351  * For implicit sync, driver should fish the exclusive fence out from the
1352  * incoming fb's and stash it in the drm_plane_state.  This is called after
1353  * drm_atomic_helper_swap_state() so it uses the current plane state (and
1354  * just uses the atomic state to find the changed planes)
1355  *
1356  * Note that @pre_swap is needed since the point where we block for fences moves
1357  * around depending upon whether an atomic commit is blocking or
1358  * non-blocking. For non-blocking commit all waiting needs to happen after
1359  * drm_atomic_helper_swap_state() is called, but for blocking commits we want
1360  * to wait **before** we do anything that can't be easily rolled back. That is
1361  * before we call drm_atomic_helper_swap_state().
1362  *
1363  * Returns zero if success or < 0 if dma_fence_wait() fails.
1364  */
drm_atomic_helper_wait_for_fences(struct drm_device *dev, struct drm_atomic_state *state, bool pre_swap)1365 int drm_atomic_helper_wait_for_fences(struct drm_device *dev, struct drm_atomic_state *state, bool pre_swap)
1366 {
1367     struct drm_plane *plane;
1368     struct drm_plane_state *new_plane_state;
1369     int i, ret;
1370 
1371     for_each_new_plane_in_state(state, plane, new_plane_state, i)
1372     {
1373         if (!new_plane_state->fence) {
1374             continue;
1375         }
1376 
1377         WARN_ON(!new_plane_state->fb);
1378 
1379         /*
1380          * If waiting for fences pre-swap (ie: nonblock), userspace can
1381          * still interrupt the operation. Instead of blocking until the
1382          * timer expires, make the wait interruptible.
1383          */
1384         ret = dma_fence_wait(new_plane_state->fence, pre_swap);
1385         if (ret) {
1386             return ret;
1387         }
1388 
1389         dma_fence_put(new_plane_state->fence);
1390         new_plane_state->fence = NULL;
1391     }
1392 
1393     return 0;
1394 }
1395 EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
1396 
1397 /**
1398  * drm_atomic_helper_wait_for_vblanks - wait for vblank on CRTCs
1399  * @dev: DRM device
1400  * @old_state: atomic state object with old state structures
1401  *
1402  * Helper to, after atomic commit, wait for vblanks on all affected
1403  * CRTCs (ie. before cleaning up old framebuffers using
1404  * drm_atomic_helper_cleanup_planes()). It will only wait on CRTCs where the
1405  * framebuffers have actually changed to optimize for the legacy cursor and
1406  * plane update use-case.
1407  *
1408  * Drivers using the nonblocking commit tracking support initialized by calling
1409  * drm_atomic_helper_setup_commit() should look at
1410  * drm_atomic_helper_wait_for_flip_done() as an alternative.
1411  */
drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, struct drm_atomic_state *old_state)1412 void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, struct drm_atomic_state *old_state)
1413 {
1414     struct drm_crtc *crtc;
1415     struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1416     int i, ret;
1417     unsigned crtc_mask = 0;
1418 
1419     /*
1420      * Legacy cursor ioctls are completely unsynced, and userspace
1421      * relies on that (by doing tons of cursor updates).
1422      */
1423     if (old_state->legacy_cursor_update) {
1424         return;
1425     }
1426 
1427     for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i)
1428     {
1429         if (!new_crtc_state->active) {
1430             continue;
1431         }
1432 
1433         ret = drm_crtc_vblank_get(crtc);
1434         if (ret != 0) {
1435             continue;
1436         }
1437 
1438         crtc_mask |= drm_crtc_mask(crtc);
1439         old_state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc);
1440     }
1441 
1442     for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
1443     {
1444         if (!(crtc_mask & drm_crtc_mask(crtc))) {
1445             continue;
1446         }
1447 
1448         ret = wait_event_timeout(dev->vblank[i].queue,
1449                                  old_state->crtcs[i].last_vblank_count != drm_crtc_vblank_count(crtc),
1450                                  msecs_to_jiffies(100));
1451 
1452         WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n", crtc->base.id, crtc->name);
1453 
1454         drm_crtc_vblank_put(crtc);
1455     }
1456 }
1457 EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
1458 
1459 /**
1460  * drm_atomic_helper_wait_for_flip_done - wait for all page flips to be done
1461  * @dev: DRM device
1462  * @old_state: atomic state object with old state structures
1463  *
1464  * Helper to, after atomic commit, wait for page flips on all affected
1465  * crtcs (ie. before cleaning up old framebuffers using
1466  * drm_atomic_helper_cleanup_planes()). Compared to
1467  * drm_atomic_helper_wait_for_vblanks() this waits for the completion on all
1468  * CRTCs, assuming that cursors-only updates are signalling their completion
1469  * immediately (or using a different path).
1470  *
1471  * This requires that drivers use the nonblocking commit tracking support
1472  * initialized using drm_atomic_helper_setup_commit().
1473  */
drm_atomic_helper_wait_for_flip_done(struct drm_device *dev, struct drm_atomic_state *old_state)1474 void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev, struct drm_atomic_state *old_state)
1475 {
1476     struct drm_crtc *crtc;
1477     int i;
1478 
1479     for (i = 0; i < dev->mode_config.num_crtc; i++) {
1480         struct drm_crtc_commit *commit = old_state->crtcs[i].commit;
1481         int ret;
1482 
1483         crtc = old_state->crtcs[i].ptr;
1484 
1485         if (!crtc || !commit) {
1486             continue;
1487         }
1488 
1489         ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
1490         if (ret == 0) {
1491             DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n", crtc->base.id, crtc->name);
1492         }
1493     }
1494 
1495     if (old_state->fake_commit) {
1496         complete_all(&old_state->fake_commit->flip_done);
1497     }
1498 }
1499 EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
1500 
1501 /**
1502  * drm_atomic_helper_commit_tail - commit atomic update to hardware
1503  * @old_state: atomic state object with old state structures
1504  *
1505  * This is the default implementation for the
1506  * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
1507  * that do not support runtime_pm or do not need the CRTC to be
1508  * enabled to perform a commit. Otherwise, see
1509  * drm_atomic_helper_commit_tail_rpm().
1510  *
1511  * Note that the default ordering of how the various stages are called is to
1512  * match the legacy modeset helper library closest.
1513  */
drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state)1514 void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state)
1515 {
1516     struct drm_device *dev = old_state->dev;
1517 
1518     drm_atomic_helper_commit_modeset_disables(dev, old_state);
1519 
1520     drm_atomic_helper_commit_planes(dev, old_state, 0);
1521 
1522     drm_atomic_helper_commit_modeset_enables(dev, old_state);
1523 
1524     drm_atomic_helper_fake_vblank(old_state);
1525 
1526     drm_atomic_helper_commit_hw_done(old_state);
1527 
1528     drm_atomic_helper_wait_for_vblanks(dev, old_state);
1529 
1530     drm_atomic_helper_cleanup_planes(dev, old_state);
1531 }
1532 EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
1533 
1534 /**
1535  * drm_atomic_helper_commit_tail_rpm - commit atomic update to hardware
1536  * @old_state: new modeset state to be committed
1537  *
1538  * This is an alternative implementation for the
1539  * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
1540  * that support runtime_pm or need the CRTC to be enabled to perform a
1541  * commit. Otherwise, one should use the default implementation
1542  * drm_atomic_helper_commit_tail().
1543  */
drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)1544 void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
1545 {
1546     struct drm_device *dev = old_state->dev;
1547 
1548     drm_atomic_helper_commit_modeset_disables(dev, old_state);
1549 
1550     drm_atomic_helper_commit_modeset_enables(dev, old_state);
1551 
1552     drm_atomic_helper_commit_planes(dev, old_state, DRM_PLANE_COMMIT_ACTIVE_ONLY);
1553 
1554     drm_atomic_helper_fake_vblank(old_state);
1555 
1556     drm_atomic_helper_commit_hw_done(old_state);
1557 
1558     drm_atomic_helper_wait_for_vblanks(dev, old_state);
1559 
1560     drm_atomic_helper_cleanup_planes(dev, old_state);
1561 }
1562 EXPORT_SYMBOL(drm_atomic_helper_commit_tail_rpm);
1563 
commit_tail(struct drm_atomic_state *old_state)1564 static void commit_tail(struct drm_atomic_state *old_state)
1565 {
1566     struct drm_device *dev = old_state->dev;
1567     const struct drm_mode_config_helper_funcs *funcs;
1568     struct drm_crtc_state *new_crtc_state;
1569     struct drm_crtc *crtc;
1570     ktime_t start;
1571     s64 commit_time_ms;
1572     unsigned int i, new_self_refresh_mask = 0;
1573 
1574     funcs = dev->mode_config.helper_private;
1575 
1576     /*
1577      * We're measuring the _entire_ commit, so the time will vary depending
1578      * on how many fences and objects are involved. For the purposes of self
1579      * refresh, this is desirable since it'll give us an idea of how
1580      * congested things are. This will inform our decision on how often we
1581      * should enter self refresh after idle.
1582      *
1583      * These times will be averaged out in the self refresh helpers to avoid
1584      * overreacting over one outlier frame
1585      */
1586     start = ktime_get();
1587 
1588     drm_atomic_helper_wait_for_fences(dev, old_state, false);
1589 
1590     drm_atomic_helper_wait_for_dependencies(old_state);
1591 
1592     /*
1593      * We cannot safely access new_crtc_state after
1594      * drm_atomic_helper_commit_hw_done() so figure out which crtc's have
1595      * self-refresh active beforehand:
1596      */
1597     for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) if (new_crtc_state->self_refresh_active)
1598         new_self_refresh_mask |= BIT(i);
1599 
1600     if (funcs && funcs->atomic_commit_tail) {
1601         funcs->atomic_commit_tail(old_state);
1602     } else {
1603         drm_atomic_helper_commit_tail(old_state);
1604     }
1605 
1606     commit_time_ms = ktime_ms_delta(ktime_get(), start);
1607     if (commit_time_ms > 0) {
1608         drm_self_refresh_helper_update_avg_times(old_state, (unsigned long)commit_time_ms, new_self_refresh_mask);
1609     }
1610 
1611     drm_atomic_helper_commit_cleanup_done(old_state);
1612 
1613     drm_atomic_state_put(old_state);
1614 }
1615 
commit_work(struct work_struct *work)1616 static void commit_work(struct work_struct *work)
1617 {
1618     struct drm_atomic_state *state = container_of(work, struct drm_atomic_state, commit_work);
1619     commit_tail(state);
1620 }
1621 
1622 /**
1623  * drm_atomic_helper_async_check - check if state can be commited asynchronously
1624  * @dev: DRM device
1625  * @state: the driver state object
1626  *
1627  * This helper will check if it is possible to commit the state asynchronously.
1628  * Async commits are not supposed to swap the states like normal sync commits
1629  * but just do in-place changes on the current state.
1630  *
1631  * It will return 0 if the commit can happen in an asynchronous fashion or error
1632  * if not. Note that error just mean it can't be commited asynchronously, if it
1633  * fails the commit should be treated like a normal synchronous commit.
1634  */
drm_atomic_helper_async_check(struct drm_device *dev, struct drm_atomic_state *state)1635 int drm_atomic_helper_async_check(struct drm_device *dev, struct drm_atomic_state *state)
1636 {
1637     struct drm_crtc *crtc;
1638     struct drm_crtc_state *crtc_state;
1639     struct drm_plane *plane = NULL;
1640     struct drm_plane_state *old_plane_state = NULL;
1641     struct drm_plane_state *new_plane_state = NULL;
1642     const struct drm_plane_helper_funcs *funcs;
1643     int i, n_planes = 0;
1644 
1645     for_each_new_crtc_in_state(state, crtc, crtc_state, i)
1646     {
1647         if (drm_atomic_crtc_needs_modeset(crtc_state)) {
1648             return -EINVAL;
1649         }
1650     }
1651 
1652     for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) n_planes++;
1653 
1654     /* we support only single plane updates for now */
1655     if (n_planes != 1) {
1656         return -EINVAL;
1657     }
1658 
1659     if (!new_plane_state->crtc || old_plane_state->crtc != new_plane_state->crtc) {
1660         return -EINVAL;
1661     }
1662 
1663     funcs = plane->helper_private;
1664     if (!funcs->atomic_async_update) {
1665         return -EINVAL;
1666     }
1667 
1668     if (new_plane_state->fence) {
1669         return -EINVAL;
1670     }
1671 
1672     /*
1673      * Don't do an async update if there is an outstanding commit modifying
1674      * the plane.  This prevents our async update's changes from getting
1675      * overridden by a previous synchronous update's state.
1676      */
1677     if (old_plane_state->commit && !try_wait_for_completion(&old_plane_state->commit->hw_done)) {
1678         return -EBUSY;
1679     }
1680 
1681     return funcs->atomic_async_check(plane, new_plane_state);
1682 }
1683 EXPORT_SYMBOL(drm_atomic_helper_async_check);
1684 
1685 /**
1686  * drm_atomic_helper_check - validate state object
1687  * @dev: DRM device
1688  * @state: the driver state object
1689  *
1690  * Check the state object to see if the requested state is physically possible.
1691  * Only CRTCs and planes have check callbacks, so for any additional (global)
1692  * checking that a driver needs it can simply wrap that around this function.
1693  * Drivers without such needs can directly use this as their
1694  * &drm_mode_config_funcs.atomic_check callback.
1695  *
1696  * This just wraps the two parts of the state checking for planes and modeset
1697  * state in the default order: First it calls drm_atomic_helper_check_modeset()
1698  * and then drm_atomic_helper_check_planes(). The assumption is that the
1699  * @drm_plane_helper_funcs.atomic_check and @drm_crtc_helper_funcs.atomic_check
1700  * functions depend upon an updated adjusted_mode.clock to e.g. properly compute
1701  * watermarks.
1702  *
1703  * Note that zpos normalization will add all enable planes to the state which
1704  * might not desired for some drivers.
1705  * For example enable/disable of a cursor plane which have fixed zpos value
1706  * would trigger all other enabled planes to be forced to the state change.
1707  *
1708  * RETURNS:
1709  * Zero for success or -errno
1710  */
drm_atomic_helper_check(struct drm_device *dev, struct drm_atomic_state *state)1711 int drm_atomic_helper_check(struct drm_device *dev, struct drm_atomic_state *state)
1712 {
1713     int ret;
1714 
1715     ret = drm_atomic_helper_check_modeset(dev, state);
1716     if (ret) {
1717         return ret;
1718     }
1719 
1720     if (dev->mode_config.normalize_zpos) {
1721         ret = drm_atomic_normalize_zpos(dev, state);
1722         if (ret) {
1723             return ret;
1724         }
1725     }
1726 
1727     ret = drm_atomic_helper_check_planes(dev, state);
1728     if (ret) {
1729         return ret;
1730     }
1731 
1732     if (state->legacy_cursor_update) {
1733         state->async_update = !drm_atomic_helper_async_check(dev, state);
1734     }
1735 
1736     drm_self_refresh_helper_alter_state(state);
1737 
1738     return ret;
1739 }
1740 EXPORT_SYMBOL(drm_atomic_helper_check);
1741 
1742 /**
1743  * drm_atomic_helper_async_commit - commit state asynchronously
1744  * @dev: DRM device
1745  * @state: the driver state object
1746  *
1747  * This function commits a state asynchronously, i.e., not vblank
1748  * synchronized. It should be used on a state only when
1749  * drm_atomic_async_check() succeeds. Async commits are not supposed to swap
1750  * the states like normal sync commits, but just do in-place changes on the
1751  * current state.
1752  *
1753  * Implement full swap instead of doing in-place changes.
1754  */
drm_atomic_helper_async_commit(struct drm_device *dev, struct drm_atomic_state *state)1755 void drm_atomic_helper_async_commit(struct drm_device *dev, struct drm_atomic_state *state)
1756 {
1757     struct drm_plane *plane;
1758     struct drm_plane_state *plane_state;
1759     const struct drm_plane_helper_funcs *funcs;
1760     int i;
1761 
1762     for_each_new_plane_in_state(state, plane, plane_state, i)
1763     {
1764         struct drm_framebuffer *new_fb = plane_state->fb;
1765         struct drm_framebuffer *old_fb = plane->state->fb;
1766 
1767         funcs = plane->helper_private;
1768         funcs->atomic_async_update(plane, plane_state);
1769 
1770         /*
1771          * ->atomic_async_update() is supposed to update the
1772          * plane->state in-place, make sure at least common
1773          * properties have been properly updated.
1774          */
1775         WARN_ON_ONCE(plane->state->fb != new_fb);
1776         WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
1777         WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
1778         WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
1779         WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
1780 
1781         /*
1782          * Make sure the FBs have been swapped so that cleanups in the
1783          * new_state performs a cleanup in the old FB.
1784          */
1785         WARN_ON_ONCE(plane_state->fb != old_fb);
1786     }
1787 }
1788 EXPORT_SYMBOL(drm_atomic_helper_async_commit);
1789 
1790 /**
1791  * DOC: implementing nonblocking commit
1792  *
1793  * Nonblocking atomic commits should use struct &drm_crtc_commit to sequence
1794  * different operations against each another. Locks, especially struct
1795  * &drm_modeset_lock, should not be held in worker threads or any other
1796  * asynchronous context used to commit the hardware state.
1797  *
1798  * drm_atomic_helper_commit() implements the recommended sequence for
1799  * nonblocking commits, using drm_atomic_helper_setup_commit() internally: see below
1800  *
1801  * 1. Run drm_atomic_helper_prepare_planes(). Since this can fail and we
1802  * need to propagate out of memory/VRAM errors to userspace, it must be called
1803  * synchronously.
1804  *
1805  * 2. Synchronize with any outstanding nonblocking commit worker threads which
1806  * might be affected by the new state update. This is handled by
1807  * drm_atomic_helper_setup_commit().
1808  *
1809  * Asynchronous workers need to have sufficient parallelism to be able to run
1810  * different atomic commits on different CRTCs in parallel. The simplest way to
1811  * achieve this is by running them on the &system_unbound_wq work queue. Note
1812  * that drivers are not required to split up atomic commits and run an
1813  * individual commit in parallel - userspace is supposed to do that if it cares.
1814  * But it might be beneficial to do that for modesets, since those necessarily
1815  * must be done as one global operation, and enabling or disabling a CRTC can
1816  * take a long time. But even that is not required.
1817  *
1818  * IMPORTANT: A &drm_atomic_state update for multiple CRTCs is sequenced
1819  * against all CRTCs therein. Therefore for atomic state updates which only flip
1820  * planes the driver must not get the struct &drm_crtc_state of unrelated CRTCs
1821  * in its atomic check code: This would prevent committing of atomic updates to
1822  * multiple CRTCs in parallel. In general, adding additional state structures
1823  * should be avoided as much as possible, because this reduces parallelism in
1824  * (nonblocking) commits, both due to locking and due to commit sequencing
1825  * requirements.
1826  *
1827  * 3. The software state is updated synchronously with
1828  * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
1829  * locks means concurrent callers never see inconsistent state. Note that commit
1830  * workers do not hold any locks; their access is only coordinated through
1831  * ordering. If workers would access state only through the pointers in the
1832  * free-standing state objects (currently not the case for any driver) then even
1833  * multiple pending commits could be in-flight at the same time.
1834  *
1835  * 4. Schedule a work item to do all subsequent steps, using the split-out
1836  * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
1837  * then cleaning up the framebuffers after the old framebuffer is no longer
1838  * being displayed. The scheduled work should synchronize against other workers
1839  * using the &drm_crtc_commit infrastructure as needed. See
1840  * drm_atomic_helper_setup_commit() for more details.
1841  */
1842 
stall_checks(struct drm_crtc *crtc, bool nonblock)1843 static int stall_checks(struct drm_crtc *crtc, bool nonblock)
1844 {
1845     struct drm_crtc_commit *commit, *stall_commit = NULL;
1846     bool completed = true;
1847     int i;
1848     long ret = 0;
1849 
1850     spin_lock(&crtc->commit_lock);
1851     i = 0;
1852     list_for_each_entry(commit, &crtc->commit_list, commit_entry)
1853     {
1854         if (i == 0) {
1855             completed = try_wait_for_completion(&commit->flip_done);
1856             /* Userspace is not allowed to get ahead of the previous
1857              * commit with nonblocking ones. */
1858             if (!completed && nonblock) {
1859                 spin_unlock(&crtc->commit_lock);
1860                 return -EBUSY;
1861             }
1862         } else if (i == 1) {
1863             stall_commit = drm_crtc_commit_get(commit);
1864             break;
1865         }
1866 
1867         i++;
1868     }
1869     spin_unlock(&crtc->commit_lock);
1870 
1871     if (!stall_commit) {
1872         return 0;
1873     }
1874 
1875     /* We don't want to let commits get ahead of cleanup work too much,
1876      * stalling on 2nd previous commit means triple-buffer won't ever stall.
1877      */
1878     ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done, 10 * HZ);
1879     if (ret == 0) {
1880         DRM_ERROR("[CRTC:%d:%s] cleanup_done timed out\n", crtc->base.id, crtc->name);
1881     }
1882 
1883     drm_crtc_commit_put(stall_commit);
1884 
1885     return ret < 0 ? ret : 0;
1886 }
1887 
release_crtc_commit(struct completion *completion)1888 static void release_crtc_commit(struct completion *completion)
1889 {
1890     struct drm_crtc_commit *commit = container_of(completion, typeof(*commit), flip_done);
1891 
1892     drm_crtc_commit_put(commit);
1893 }
1894 
init_commit(struct drm_crtc_commit *commit, struct drm_crtc *crtc)1895 static void init_commit(struct drm_crtc_commit *commit, struct drm_crtc *crtc)
1896 {
1897     init_completion(&commit->flip_done);
1898     init_completion(&commit->hw_done);
1899     init_completion(&commit->cleanup_done);
1900     INIT_LIST_HEAD(&commit->commit_entry);
1901     kref_init(&commit->ref);
1902     commit->crtc = crtc;
1903 }
1904 
crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)1905 static struct drm_crtc_commit *crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
1906 {
1907     if (crtc) {
1908         struct drm_crtc_state *new_crtc_state;
1909 
1910         new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1911 
1912         return new_crtc_state->commit;
1913     }
1914 
1915     if (!state->fake_commit) {
1916         state->fake_commit = kzalloc(sizeof(*state->fake_commit), GFP_KERNEL);
1917         if (!state->fake_commit) {
1918             return NULL;
1919         }
1920 
1921         init_commit(state->fake_commit, NULL);
1922     }
1923 
1924     return state->fake_commit;
1925 }
1926 
1927 /**
1928  * drm_atomic_helper_setup_commit - setup possibly nonblocking commit
1929  * @state: new modeset state to be committed
1930  * @nonblock: whether nonblocking behavior is requested.
1931  *
1932  * This function prepares @state to be used by the atomic helper's support for
1933  * nonblocking commits. Drivers using the nonblocking commit infrastructure
1934  * should always call this function from their
1935  * &drm_mode_config_funcs.atomic_commit hook.
1936  *
1937  * To be able to use this support drivers need to use a few more helper
1938  * functions. drm_atomic_helper_wait_for_dependencies() must be called before
1939  * actually committing the hardware state, and for nonblocking commits this call
1940  * must be placed in the async worker. See also drm_atomic_helper_swap_state()
1941  * and its stall parameter, for when a driver's commit hooks look at the
1942  * &drm_crtc.state, &drm_plane.state or &drm_connector.state pointer directly.
1943  *
1944  * Completion of the hardware commit step must be signalled using
1945  * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed
1946  * to read or change any permanent software or hardware modeset state. The only
1947  * exception is state protected by other means than &drm_modeset_lock locks.
1948  * Only the free standing @state with pointers to the old state structures can
1949  * be inspected, e.g. to clean up old buffers using
1950  * drm_atomic_helper_cleanup_planes().
1951  *
1952  * At the very end, before cleaning up @state drivers must call
1953  * drm_atomic_helper_commit_cleanup_done().
1954  *
1955  * This is all implemented by in drm_atomic_helper_commit(), giving drivers a
1956  * complete and easy-to-use default implementation of the atomic_commit() hook.
1957  *
1958  * The tracking of asynchronously executed and still pending commits is done
1959  * using the core structure &drm_crtc_commit.
1960  *
1961  * By default there's no need to clean up resources allocated by this function
1962  * explicitly: drm_atomic_state_default_clear() will take care of that
1963  * automatically.
1964  *
1965  * Returns:See below
1966  *
1967  * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast,
1968  * -ENOMEM on allocation failures and -EINTR when a signal is pending.
1969  */
drm_atomic_helper_setup_commit(struct drm_atomic_state *state, bool nonblock)1970 int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, bool nonblock)
1971 {
1972     struct drm_crtc *crtc;
1973     struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1974     struct drm_connector *conn;
1975     struct drm_connector_state *old_conn_state, *new_conn_state;
1976     struct drm_plane *plane;
1977     struct drm_plane_state *old_plane_state, *new_plane_state;
1978     struct drm_crtc_commit *commit;
1979     int i, ret;
1980 
1981     for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
1982     {
1983         commit = kzalloc(sizeof(*commit), GFP_KERNEL);
1984         if (!commit) {
1985             return -ENOMEM;
1986         }
1987 
1988         init_commit(commit, crtc);
1989 
1990         new_crtc_state->commit = commit;
1991 
1992         ret = stall_checks(crtc, nonblock);
1993         if (ret) {
1994             return ret;
1995         }
1996 
1997         /* Drivers only send out events when at least either current or
1998          * new CRTC state is active. Complete right away if everything
1999          * stays off. */
2000         if (!old_crtc_state->active && !new_crtc_state->active) {
2001             complete_all(&commit->flip_done);
2002             continue;
2003         }
2004 
2005         /* Legacy cursor updates are fully unsynced. */
2006         if (state->legacy_cursor_update) {
2007             complete_all(&commit->flip_done);
2008             continue;
2009         }
2010 
2011         if (!new_crtc_state->event) {
2012             commit->event = kzalloc(sizeof(*commit->event), GFP_KERNEL);
2013             if (!commit->event) {
2014                 return -ENOMEM;
2015             }
2016 
2017             new_crtc_state->event = commit->event;
2018         }
2019 
2020         new_crtc_state->event->base.completion = &commit->flip_done;
2021         new_crtc_state->event->base.completion_release = release_crtc_commit;
2022         drm_crtc_commit_get(commit);
2023 
2024         commit->abort_completion = true;
2025 
2026         state->crtcs[i].commit = commit;
2027         drm_crtc_commit_get(commit);
2028     }
2029 
2030     for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i)
2031     {
2032         /* Userspace is not allowed to get ahead of the previous
2033          * commit with nonblocking ones. */
2034         if (nonblock && old_conn_state->commit && !try_wait_for_completion(&old_conn_state->commit->flip_done)) {
2035             return -EBUSY;
2036         }
2037 
2038         /* Always track connectors explicitly for e.g. link retraining. */
2039         commit = crtc_or_fake_commit(state, new_conn_state->crtc ?: old_conn_state->crtc);
2040         if (!commit) {
2041             return -ENOMEM;
2042         }
2043 
2044         new_conn_state->commit = drm_crtc_commit_get(commit);
2045     }
2046 
2047     for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
2048     {
2049         /* Userspace is not allowed to get ahead of the previous
2050          * commit with nonblocking ones. */
2051         if (nonblock && old_plane_state->commit && !try_wait_for_completion(&old_plane_state->commit->flip_done)) {
2052             return -EBUSY;
2053         }
2054 
2055         /* Always track planes explicitly for async pageflip support. */
2056         commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc);
2057         if (!commit) {
2058             return -ENOMEM;
2059         }
2060 
2061         new_plane_state->commit = drm_crtc_commit_get(commit);
2062     }
2063 
2064     return 0;
2065 }
2066 EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
2067 
2068 /**
2069  * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits
2070  * @old_state: atomic state object with old state structures
2071  *
2072  * This function waits for all preceeding commits that touch the same CRTC as
2073  * @old_state to both be committed to the hardware (as signalled by
2074  * drm_atomic_helper_commit_hw_done()) and executed by the hardware (as signalled
2075  * by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
2076  *
2077  * This is part of the atomic helper support for nonblocking commits, see
2078  * drm_atomic_helper_setup_commit() for an overview.
2079  */
drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)2080 void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
2081 {
2082     struct drm_crtc *crtc;
2083     struct drm_crtc_state *old_crtc_state;
2084     struct drm_plane *plane;
2085     struct drm_plane_state *old_plane_state;
2086     struct drm_connector *conn;
2087     struct drm_connector_state *old_conn_state;
2088     struct drm_crtc_commit *commit;
2089     int i;
2090     long ret;
2091 
2092     for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
2093     {
2094         commit = old_crtc_state->commit;
2095 
2096         if (!commit) {
2097             continue;
2098         }
2099 
2100         ret = wait_for_completion_timeout(&commit->hw_done, 10 * HZ);
2101         if (ret == 0) {
2102             DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n", crtc->base.id, crtc->name);
2103         }
2104 
2105         /* Currently no support for overwriting flips, hence
2106          * stall for previous one to execute completely. */
2107         ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
2108         if (ret == 0) {
2109             DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n", crtc->base.id, crtc->name);
2110         }
2111     }
2112 
2113     for_each_old_connector_in_state(old_state, conn, old_conn_state, i)
2114     {
2115         commit = old_conn_state->commit;
2116 
2117         if (!commit) {
2118             continue;
2119         }
2120 
2121         ret = wait_for_completion_timeout(&commit->hw_done, 10 * HZ);
2122         if (ret == 0) {
2123             DRM_ERROR("[CONNECTOR:%d:%s] hw_done timed out\n", conn->base.id, conn->name);
2124         }
2125 
2126         /* Currently no support for overwriting flips, hence
2127          * stall for previous one to execute completely. */
2128         ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
2129         if (ret == 0) {
2130             DRM_ERROR("[CONNECTOR:%d:%s] flip_done timed out\n", conn->base.id, conn->name);
2131         }
2132     }
2133 
2134     for_each_old_plane_in_state(old_state, plane, old_plane_state, i)
2135     {
2136         commit = old_plane_state->commit;
2137 
2138         if (!commit) {
2139             continue;
2140         }
2141 
2142         ret = wait_for_completion_timeout(&commit->hw_done, 10 * HZ);
2143         if (ret == 0) {
2144             DRM_ERROR("[PLANE:%d:%s] hw_done timed out\n", plane->base.id, plane->name);
2145         }
2146 
2147         /* Currently no support for overwriting flips, hence
2148          * stall for previous one to execute completely. */
2149         ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
2150         if (ret == 0) {
2151             DRM_ERROR("[PLANE:%d:%s] flip_done timed out\n", plane->base.id, plane->name);
2152         }
2153     }
2154 }
2155 EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
2156 
2157 /**
2158  * drm_atomic_helper_fake_vblank - fake VBLANK events if needed
2159  * @old_state: atomic state object with old state structures
2160  *
2161  * This function walks all CRTCs and fakes VBLANK events on those with
2162  * &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL.
2163  * The primary use of this function is writeback connectors working in oneshot
2164  * mode and faking VBLANK events. In this case they only fake the VBLANK event
2165  * when a job is queued, and any change to the pipeline that does not touch the
2166  * connector is leading to timeouts when calling
2167  * drm_atomic_helper_wait_for_vblanks() or
2168  * drm_atomic_helper_wait_for_flip_done(). In addition to writeback
2169  * connectors, this function can also fake VBLANK events for CRTCs without
2170  * VBLANK interrupt.
2171  *
2172  * This is part of the atomic helper support for nonblocking commits, see
2173  * drm_atomic_helper_setup_commit() for an overview.
2174  */
drm_atomic_helper_fake_vblank(struct drm_atomic_state *old_state)2175 void drm_atomic_helper_fake_vblank(struct drm_atomic_state *old_state)
2176 {
2177     struct drm_crtc_state *new_crtc_state;
2178     struct drm_crtc *crtc;
2179     int i;
2180 
2181     for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i)
2182     {
2183         unsigned long flags;
2184 
2185         if (!new_crtc_state->no_vblank) {
2186             continue;
2187         }
2188 
2189         spin_lock_irqsave(&old_state->dev->event_lock, flags);
2190         if (new_crtc_state->event) {
2191             drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
2192             new_crtc_state->event = NULL;
2193         }
2194         spin_unlock_irqrestore(&old_state->dev->event_lock, flags);
2195     }
2196 }
2197 EXPORT_SYMBOL(drm_atomic_helper_fake_vblank);
2198 
2199 /**
2200  * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
2201  * @old_state: atomic state object with old state structures
2202  *
2203  * This function is used to signal completion of the hardware commit step. After
2204  * this step the driver is not allowed to read or change any permanent software
2205  * or hardware modeset state. The only exception is state protected by other
2206  * means than &drm_modeset_lock locks.
2207  *
2208  * Drivers should try to postpone any expensive or delayed cleanup work after
2209  * this function is called.
2210  *
2211  * This is part of the atomic helper support for nonblocking commits, see
2212  * drm_atomic_helper_setup_commit() for an overview.
2213  */
drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state)2214 void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state)
2215 {
2216     struct drm_crtc *crtc;
2217     struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2218     struct drm_crtc_commit *commit;
2219     int i;
2220 
2221     for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i)
2222     {
2223         commit = new_crtc_state->commit;
2224         if (!commit) {
2225             continue;
2226         }
2227 
2228         /*
2229          * copy new_crtc_state->commit to old_crtc_state->commit,
2230          * it's unsafe to touch new_crtc_state after hw_done,
2231          * but we still need to do so in cleanup_done().
2232          */
2233         if (old_crtc_state->commit) {
2234             drm_crtc_commit_put(old_crtc_state->commit);
2235         }
2236 
2237         old_crtc_state->commit = drm_crtc_commit_get(commit);
2238 
2239         /* backend must have consumed any event by now */
2240         WARN_ON(new_crtc_state->event);
2241         complete_all(&commit->hw_done);
2242     }
2243 
2244     if (old_state->fake_commit) {
2245         complete_all(&old_state->fake_commit->hw_done);
2246         complete_all(&old_state->fake_commit->flip_done);
2247     }
2248 }
2249 EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
2250 
2251 /**
2252  * drm_atomic_helper_commit_cleanup_done - signal completion of commit
2253  * @old_state: atomic state object with old state structures
2254  *
2255  * This signals completion of the atomic update @old_state, including any
2256  * cleanup work. If used, it must be called right before calling
2257  * drm_atomic_state_put().
2258  *
2259  * This is part of the atomic helper support for nonblocking commits, see
2260  * drm_atomic_helper_setup_commit() for an overview.
2261  */
drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)2262 void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
2263 {
2264     struct drm_crtc *crtc;
2265     struct drm_crtc_state *old_crtc_state;
2266     struct drm_crtc_commit *commit;
2267     int i;
2268 
2269     for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
2270     {
2271         commit = old_crtc_state->commit;
2272         if (WARN_ON(!commit)) {
2273             continue;
2274         }
2275 
2276         complete_all(&commit->cleanup_done);
2277         WARN_ON(!try_wait_for_completion(&commit->hw_done));
2278 
2279         spin_lock(&crtc->commit_lock);
2280         list_del(&commit->commit_entry);
2281         spin_unlock(&crtc->commit_lock);
2282     }
2283 
2284     if (old_state->fake_commit) {
2285         complete_all(&old_state->fake_commit->cleanup_done);
2286         WARN_ON(!try_wait_for_completion(&old_state->fake_commit->hw_done));
2287     }
2288 }
2289 EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
2290 
2291 /**
2292  * drm_atomic_helper_prepare_planes - prepare plane resources before commit
2293  * @dev: DRM device
2294  * @state: atomic state object with new state structures
2295  *
2296  * This function prepares plane state, specifically framebuffers, for the new
2297  * configuration, by calling &drm_plane_helper_funcs.prepare_fb. If any failure
2298  * is encountered this function will call &drm_plane_helper_funcs.cleanup_fb on
2299  * any already successfully prepared framebuffer.
2300  *
2301  * Returns:
2302  * 0 on success, negative error code on failure.
2303  */
drm_atomic_helper_prepare_planes(struct drm_device *dev, struct drm_atomic_state *state)2304 int drm_atomic_helper_prepare_planes(struct drm_device *dev, struct drm_atomic_state *state)
2305 {
2306     struct drm_connector *connector;
2307     struct drm_connector_state *new_conn_state;
2308     struct drm_plane *plane;
2309     struct drm_plane_state *new_plane_state;
2310     int ret, i, j;
2311 
2312     for_each_new_connector_in_state(state, connector, new_conn_state, i)
2313     {
2314         if (!new_conn_state->writeback_job) {
2315             continue;
2316         }
2317 
2318         ret = drm_writeback_prepare_job(new_conn_state->writeback_job);
2319         if (ret < 0) {
2320             return ret;
2321         }
2322     }
2323 
2324     for_each_new_plane_in_state(state, plane, new_plane_state, i)
2325     {
2326         const struct drm_plane_helper_funcs *funcs;
2327 
2328         funcs = plane->helper_private;
2329 
2330         if (funcs->prepare_fb) {
2331             ret = funcs->prepare_fb(plane, new_plane_state);
2332             if (ret) {
2333                 goto fail;
2334             }
2335         }
2336     }
2337 
2338     return 0;
2339 
2340 fail:
2341     for_each_new_plane_in_state(state, plane, new_plane_state, j)
2342     {
2343         const struct drm_plane_helper_funcs *funcs;
2344 
2345         if (j >= i) {
2346             continue;
2347         }
2348 
2349         funcs = plane->helper_private;
2350 
2351         if (funcs->cleanup_fb) {
2352             funcs->cleanup_fb(plane, new_plane_state);
2353         }
2354     }
2355 
2356     return ret;
2357 }
2358 EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
2359 
plane_crtc_active(const struct drm_plane_state *state)2360 static bool plane_crtc_active(const struct drm_plane_state *state)
2361 {
2362     return state->crtc && state->crtc->state->active;
2363 }
2364 
2365 /**
2366  * drm_atomic_helper_commit_planes - commit plane state
2367  * @dev: DRM device
2368  * @old_state: atomic state object with old state structures
2369  * @flags: flags for committing plane state
2370  *
2371  * This function commits the new plane state using the plane and atomic helper
2372  * functions for planes and CRTCs. It assumes that the atomic state has already
2373  * been pushed into the relevant object state pointers, since this step can no
2374  * longer fail.
2375  *
2376  * It still requires the global state object @old_state to know which planes and
2377  * crtcs need to be updated though.
2378  *
2379  * Note that this function does all plane updates across all CRTCs in one step.
2380  * If the hardware can't support this approach look at
2381  * drm_atomic_helper_commit_planes_on_crtc() instead.
2382  *
2383  * Plane parameters can be updated by applications while the associated CRTC is
2384  * disabled. The DRM/KMS core will store the parameters in the plane state,
2385  * which will be available to the driver when the CRTC is turned on. As a result
2386  * most drivers don't need to be immediately notified of plane updates for a
2387  * disabled CRTC.
2388  *
2389  * Unless otherwise needed, drivers are advised to set the ACTIVE_ONLY flag in
2390  * @flags in order not to receive plane update notifications related to a
2391  * disabled CRTC. This avoids the need to manually ignore plane updates in
2392  * driver code when the driver and/or hardware can't or just don't need to deal
2393  * with updates on disabled CRTCs, for example when supporting runtime PM.
2394  *
2395  * Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant
2396  * display controllers require to disable a CRTC's planes when the CRTC is
2397  * disabled. This function would skip the &drm_plane_helper_funcs.atomic_disable
2398  * call for a plane if the CRTC of the old plane state needs a modesetting
2399  * operation. Of course, the drivers need to disable the planes in their CRTC
2400  * disable callbacks since no one else would do that.
2401  *
2402  * The drm_atomic_helper_commit() default implementation doesn't set the
2403  * ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers.
2404  * This should not be copied blindly by drivers.
2405  */
drm_atomic_helper_commit_planes(struct drm_device *dev, struct drm_atomic_state *old_state, uint32_t flags)2406 void drm_atomic_helper_commit_planes(struct drm_device *dev, struct drm_atomic_state *old_state, uint32_t flags)
2407 {
2408     struct drm_crtc *crtc;
2409     struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2410     struct drm_plane *plane;
2411     struct drm_plane_state *old_plane_state, *new_plane_state;
2412     int i;
2413     bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY;
2414     bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET;
2415 
2416     for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i)
2417     {
2418         const struct drm_crtc_helper_funcs *funcs;
2419 
2420         funcs = crtc->helper_private;
2421 
2422         if (!funcs || !funcs->atomic_begin) {
2423             continue;
2424         }
2425 
2426         if (active_only && !new_crtc_state->active) {
2427             continue;
2428         }
2429 
2430         funcs->atomic_begin(crtc, old_crtc_state);
2431     }
2432 
2433     for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i)
2434     {
2435         const struct drm_plane_helper_funcs *funcs;
2436         bool disabling;
2437 
2438         funcs = plane->helper_private;
2439 
2440         if (!funcs) {
2441             continue;
2442         }
2443 
2444         disabling = drm_atomic_plane_disabling(old_plane_state, new_plane_state);
2445 
2446         if (active_only) {
2447             /*
2448              * Skip planes related to inactive CRTCs. If the plane
2449              * is enabled use the state of the current CRTC. If the
2450              * plane is being disabled use the state of the old
2451              * CRTC to avoid skipping planes being disabled on an
2452              * active CRTC.
2453              */
2454             if (!disabling && !plane_crtc_active(new_plane_state)) {
2455                 continue;
2456             }
2457             if (disabling && !plane_crtc_active(old_plane_state)) {
2458                 continue;
2459             }
2460         }
2461 
2462         /*
2463          * Special-case disabling the plane if drivers support it.
2464          */
2465         if (disabling && funcs->atomic_disable) {
2466             struct drm_crtc_state *crtc_state;
2467 
2468             crtc_state = old_plane_state->crtc->state;
2469 
2470             if (drm_atomic_crtc_needs_modeset(crtc_state) && no_disable) {
2471                 continue;
2472             }
2473 
2474             funcs->atomic_disable(plane, old_plane_state);
2475         } else if (new_plane_state->crtc || disabling) {
2476             funcs->atomic_update(plane, old_plane_state);
2477         }
2478     }
2479 
2480     for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i)
2481     {
2482         const struct drm_crtc_helper_funcs *funcs;
2483 
2484         funcs = crtc->helper_private;
2485 
2486         if (!funcs || !funcs->atomic_flush) {
2487             continue;
2488         }
2489 
2490         if (active_only && !new_crtc_state->active) {
2491             continue;
2492         }
2493 
2494         funcs->atomic_flush(crtc, old_crtc_state);
2495     }
2496 }
2497 EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
2498 
2499 /**
2500  * drm_atomic_helper_commit_planes_on_crtc - commit plane state for a CRTC
2501  * @old_crtc_state: atomic state object with the old CRTC state
2502  *
2503  * This function commits the new plane state using the plane and atomic helper
2504  * functions for planes on the specific CRTC. It assumes that the atomic state
2505  * has already been pushed into the relevant object state pointers, since this
2506  * step can no longer fail.
2507  *
2508  * This function is useful when plane updates should be done CRTC-by-CRTC
2509  * instead of one global step like drm_atomic_helper_commit_planes() does.
2510  *
2511  * This function can only be savely used when planes are not allowed to move
2512  * between different CRTCs because this function doesn't handle inter-CRTC
2513  * depencies. Callers need to ensure that either no such depencies exist,
2514  * resolve them through ordering of commit calls or through some other means.
2515  */
drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)2516 void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
2517 {
2518     const struct drm_crtc_helper_funcs *crtc_funcs;
2519     struct drm_crtc *crtc = old_crtc_state->crtc;
2520     struct drm_atomic_state *old_state = old_crtc_state->state;
2521     struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc);
2522     struct drm_plane *plane;
2523     unsigned plane_mask;
2524 
2525     plane_mask = old_crtc_state->plane_mask;
2526     plane_mask |= new_crtc_state->plane_mask;
2527 
2528     crtc_funcs = crtc->helper_private;
2529     if (crtc_funcs && crtc_funcs->atomic_begin) {
2530         crtc_funcs->atomic_begin(crtc, old_crtc_state);
2531     }
2532 
2533     drm_for_each_plane_mask(plane, crtc->dev, plane_mask)
2534     {
2535         struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(old_state, plane);
2536         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(old_state, plane);
2537         const struct drm_plane_helper_funcs *plane_funcs;
2538 
2539         plane_funcs = plane->helper_private;
2540 
2541         if (!old_plane_state || !plane_funcs) {
2542             continue;
2543         }
2544 
2545         WARN_ON(new_plane_state->crtc && new_plane_state->crtc != crtc);
2546 
2547         if (drm_atomic_plane_disabling(old_plane_state, new_plane_state) && plane_funcs->atomic_disable) {
2548             plane_funcs->atomic_disable(plane, old_plane_state);
2549         } else if (new_plane_state->crtc || drm_atomic_plane_disabling(old_plane_state, new_plane_state)) {
2550             plane_funcs->atomic_update(plane, old_plane_state);
2551         }
2552     }
2553 
2554     if (crtc_funcs && crtc_funcs->atomic_flush) {
2555         crtc_funcs->atomic_flush(crtc, old_crtc_state);
2556     }
2557 }
2558 EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
2559 
2560 /**
2561  * drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes
2562  * @old_crtc_state: atomic state object with the old CRTC state
2563  * @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks
2564  *
2565  * Disables all planes associated with the given CRTC. This can be
2566  * used for instance in the CRTC helper atomic_disable callback to disable
2567  * all planes.
2568  *
2569  * If the atomic-parameter is set the function calls the CRTC's
2570  * atomic_begin hook before and atomic_flush hook after disabling the
2571  * planes.
2572  *
2573  * It is a bug to call this function without having implemented the
2574  * &drm_plane_helper_funcs.atomic_disable plane hook.
2575  */
drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state, bool atomic)2576 void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state, bool atomic)
2577 {
2578     struct drm_crtc *crtc = old_crtc_state->crtc;
2579     const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
2580     struct drm_plane *plane;
2581 
2582     if (atomic && crtc_funcs && crtc_funcs->atomic_begin) {
2583         crtc_funcs->atomic_begin(crtc, NULL);
2584     }
2585 
2586     drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state)
2587     {
2588         const struct drm_plane_helper_funcs *plane_funcs = plane->helper_private;
2589 
2590         if (!plane_funcs) {
2591             continue;
2592         }
2593 
2594         WARN_ON(!plane_funcs->atomic_disable);
2595         if (plane_funcs->atomic_disable) {
2596             plane_funcs->atomic_disable(plane, NULL);
2597         }
2598     }
2599 
2600     if (atomic && crtc_funcs && crtc_funcs->atomic_flush) {
2601         crtc_funcs->atomic_flush(crtc, NULL);
2602     }
2603 }
2604 EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
2605 
2606 /**
2607  * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
2608  * @dev: DRM device
2609  * @old_state: atomic state object with old state structures
2610  *
2611  * This function cleans up plane state, specifically framebuffers, from the old
2612  * configuration. Hence the old configuration must be perserved in @old_state to
2613  * be able to call this function.
2614  *
2615  * This function must also be called on the new state when the atomic update
2616  * fails at any point after calling drm_atomic_helper_prepare_planes().
2617  */
drm_atomic_helper_cleanup_planes(struct drm_device *dev, struct drm_atomic_state *old_state)2618 void drm_atomic_helper_cleanup_planes(struct drm_device *dev, struct drm_atomic_state *old_state)
2619 {
2620     struct drm_plane *plane;
2621     struct drm_plane_state *old_plane_state, *new_plane_state;
2622     int i;
2623 
2624     for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i)
2625     {
2626         const struct drm_plane_helper_funcs *funcs;
2627         struct drm_plane_state *plane_state;
2628 
2629         /*
2630          * This might be called before swapping when commit is aborted,
2631          * in which case we have to cleanup the new state.
2632          */
2633         if (old_plane_state == plane->state) {
2634             plane_state = new_plane_state;
2635         } else {
2636             plane_state = old_plane_state;
2637         }
2638 
2639         funcs = plane->helper_private;
2640 
2641         if (funcs->cleanup_fb) {
2642             funcs->cleanup_fb(plane, plane_state);
2643         }
2644     }
2645 }
2646 EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
2647 
2648 /**
2649  * drm_atomic_helper_commit - commit validated state object
2650  * @dev: DRM device
2651  * @state: the driver state object
2652  * @nonblock: whether nonblocking behavior is requested.
2653  *
2654  * This function commits a with drm_atomic_helper_check() pre-validated state
2655  * object. This can still fail when e.g. the framebuffer reservation fails. This
2656  * function implements nonblocking commits, using
2657  * drm_atomic_helper_setup_commit() and related functions.
2658  *
2659  * Committing the actual hardware state is done through the
2660  * &drm_mode_config_helper_funcs.atomic_commit_tail callback, or its default
2661  * implementation drm_atomic_helper_commit_tail().
2662  *
2663  * RETURNS:
2664  * Zero for success or -errno.
2665  */
drm_atomic_helper_commit(struct drm_device *dev, struct drm_atomic_state *state, bool nonblock)2666 int drm_atomic_helper_commit(struct drm_device *dev, struct drm_atomic_state *state, bool nonblock)
2667 {
2668     int ret;
2669 
2670     if (state->async_update) {
2671         ret = drm_atomic_helper_prepare_planes(dev, state);
2672         if (ret) {
2673             return ret;
2674         }
2675 
2676         drm_atomic_helper_async_commit(dev, state);
2677         drm_atomic_helper_cleanup_planes(dev, state);
2678 
2679         return 0;
2680     }
2681 
2682     ret = drm_atomic_helper_setup_commit(state, nonblock);
2683     if (ret) {
2684         return ret;
2685     }
2686 
2687     INIT_WORK(&state->commit_work, commit_work);
2688 
2689     ret = drm_atomic_helper_prepare_planes(dev, state);
2690     if (ret) {
2691         return ret;
2692     }
2693 
2694     if (!nonblock) {
2695         ret = drm_atomic_helper_wait_for_fences(dev, state, true);
2696         if (ret) {
2697             goto err;
2698         }
2699     }
2700 
2701     /*
2702      * This is the point of no return - everything below never fails except
2703      * when the hw goes bonghits. Which means we can commit the new state on
2704      * the software side now.
2705      */
2706 
2707     ret = drm_atomic_helper_swap_state(state, true);
2708     if (ret) {
2709         goto err;
2710     }
2711 
2712     /*
2713      * Everything below can be run asynchronously without the need to grab
2714      * any modeset locks at all under one condition: It must be guaranteed
2715      * that the asynchronous work has either been cancelled (if the driver
2716      * supports it, which at least requires that the framebuffers get
2717      * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
2718      * before the new state gets committed on the software side with
2719      * drm_atomic_helper_swap_state().
2720      *
2721      * This scheme allows new atomic state updates to be prepared and
2722      * checked in parallel to the asynchronous completion of the previous
2723      * update. Which is important since compositors need to figure out the
2724      * composition of the next frame right after having submitted the
2725      * current layout.
2726      *
2727      * NOTE: Commit work has multiple phases, first hardware commit, then
2728      * cleanup. We want them to overlap, hence need system_unbound_wq to
2729      * make sure work items don't artificially stall on each another.
2730      */
2731 
2732     drm_atomic_state_get(state);
2733     if (nonblock) {
2734         queue_work(system_unbound_wq, &state->commit_work);
2735     } else {
2736         commit_tail(state);
2737     }
2738 
2739     return 0;
2740 
2741 err:
2742     drm_atomic_helper_cleanup_planes(dev, state);
2743     return ret;
2744 }
2745 EXPORT_SYMBOL(drm_atomic_helper_commit);
2746 
2747 /**
2748  * drm_atomic_helper_swap_state - store atomic state into current sw state
2749  * @state: atomic state
2750  * @stall: stall for preceeding commits
2751  *
2752  * This function stores the atomic state into the current state pointers in all
2753  * driver objects. It should be called after all failing steps have been done
2754  * and succeeded, but before the actual hardware state is committed.
2755  *
2756  * For cleanup and error recovery the current state for all changed objects will
2757  * be swapped into @state.
2758  *
2759  * With that sequence it fits perfectly into the plane prepare/cleanup sequence:
2760  *
2761  * 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state.
2762  *
2763  * 2. Do any other steps that might fail.
2764  *
2765  * 3. Put the staged state into the current state pointers with this function.
2766  *
2767  * 4. Actually commit the hardware state.
2768  *
2769  * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3
2770  * contains the old state. Also do any other cleanup required with that state.
2771  *
2772  * @stall must be set when nonblocking commits for this driver directly access
2773  * the &drm_plane.state, &drm_crtc.state or &drm_connector.state pointer. With
2774  * the current atomic helpers this is almost always the case, since the helpers
2775  * don't pass the right state structures to the callbacks.
2776  *
2777  * Returns:See below
2778  *
2779  * Returns 0 on success. Can return -ERESTARTSYS when @stall is true and the
2780  * waiting for the previous commits has been interrupted.
2781  */
drm_atomic_helper_swap_state(struct drm_atomic_state *state, bool stall)2782 int drm_atomic_helper_swap_state(struct drm_atomic_state *state, bool stall)
2783 {
2784     int i, ret;
2785     struct drm_connector *connector;
2786     struct drm_connector_state *old_conn_state, *new_conn_state;
2787     struct drm_crtc *crtc;
2788     struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2789     struct drm_plane *plane;
2790     struct drm_plane_state *old_plane_state, *new_plane_state;
2791     struct drm_crtc_commit *commit;
2792     struct drm_private_obj *obj;
2793     struct drm_private_state *old_obj_state, *new_obj_state;
2794 
2795     if (stall) {
2796         /*
2797          * We have to stall for hw_done here before
2798          * drm_atomic_helper_wait_for_dependencies() because flip
2799          * depth > 1 is not yet supported by all drivers. As long as
2800          * obj->state is directly dereferenced anywhere in the drivers
2801          * atomic_commit_tail function, then it's unsafe to swap state
2802          * before drm_atomic_helper_commit_hw_done() is called.
2803          */
2804 
2805         for_each_old_crtc_in_state(state, crtc, old_crtc_state, i)
2806         {
2807             commit = old_crtc_state->commit;
2808 
2809             if (!commit) {
2810                 continue;
2811             }
2812 
2813             ret = wait_for_completion_interruptible(&commit->hw_done);
2814             if (ret) {
2815                 return ret;
2816             }
2817         }
2818 
2819         for_each_old_connector_in_state(state, connector, old_conn_state, i)
2820         {
2821             commit = old_conn_state->commit;
2822 
2823             if (!commit) {
2824                 continue;
2825             }
2826 
2827             ret = wait_for_completion_interruptible(&commit->hw_done);
2828             if (ret) {
2829                 return ret;
2830             }
2831         }
2832 
2833         for_each_old_plane_in_state(state, plane, old_plane_state, i)
2834         {
2835             commit = old_plane_state->commit;
2836 
2837             if (!commit) {
2838                 continue;
2839             }
2840 
2841             ret = wait_for_completion_interruptible(&commit->hw_done);
2842             if (ret) {
2843                 return ret;
2844             }
2845         }
2846     }
2847 
2848     for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i)
2849     {
2850         WARN_ON(connector->state != old_conn_state);
2851 
2852         old_conn_state->state = state;
2853         new_conn_state->state = NULL;
2854 
2855         state->connectors[i].state = old_conn_state;
2856         connector->state = new_conn_state;
2857     }
2858 
2859     for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
2860     {
2861         WARN_ON(crtc->state != old_crtc_state);
2862 
2863         old_crtc_state->state = state;
2864         new_crtc_state->state = NULL;
2865 
2866         state->crtcs[i].state = old_crtc_state;
2867         crtc->state = new_crtc_state;
2868 
2869         if (new_crtc_state->commit) {
2870             spin_lock(&crtc->commit_lock);
2871             list_add(&new_crtc_state->commit->commit_entry, &crtc->commit_list);
2872             spin_unlock(&crtc->commit_lock);
2873 
2874             new_crtc_state->commit->event = NULL;
2875         }
2876     }
2877 
2878     for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
2879     {
2880         WARN_ON(plane->state != old_plane_state);
2881 
2882         old_plane_state->state = state;
2883         new_plane_state->state = NULL;
2884 
2885         state->planes[i].state = old_plane_state;
2886         plane->state = new_plane_state;
2887     }
2888 
2889     for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i)
2890     {
2891         WARN_ON(obj->state != old_obj_state);
2892 
2893         old_obj_state->state = state;
2894         new_obj_state->state = NULL;
2895 
2896         state->private_objs[i].state = old_obj_state;
2897         obj->state = new_obj_state;
2898     }
2899 
2900     return 0;
2901 }
2902 EXPORT_SYMBOL(drm_atomic_helper_swap_state);
2903 
2904 /**
2905  * drm_atomic_helper_update_plane - Helper for primary plane update using atomic
2906  * @plane: plane object to update
2907  * @crtc: owning CRTC of owning plane
2908  * @fb: framebuffer to flip onto plane
2909  * @crtc_x: x offset of primary plane on @crtc
2910  * @crtc_y: y offset of primary plane on @crtc
2911  * @crtc_w: width of primary plane rectangle on @crtc
2912  * @crtc_h: height of primary plane rectangle on @crtc
2913  * @src_x: x offset of @fb for panning
2914  * @src_y: y offset of @fb for panning
2915  * @src_w: width of source rectangle in @fb
2916  * @src_h: height of source rectangle in @fb
2917  * @ctx: lock acquire context
2918  *
2919  * Provides a default plane update handler using the atomic driver interface.
2920  *
2921  * RETURNS:
2922  * Zero on success, error code on failure
2923  */
drm_atomic_helper_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h, struct drm_modeset_acquire_ctx *ctx)2924 int drm_atomic_helper_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb,
2925                                    int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x,
2926                                    uint32_t src_y, uint32_t src_w, uint32_t src_h, struct drm_modeset_acquire_ctx *ctx)
2927 {
2928     struct drm_atomic_state *state;
2929     struct drm_plane_state *plane_state;
2930     int ret = 0;
2931 
2932     state = drm_atomic_state_alloc(plane->dev);
2933     if (!state) {
2934         return -ENOMEM;
2935     }
2936 
2937     state->acquire_ctx = ctx;
2938     plane_state = drm_atomic_get_plane_state(state, plane);
2939     if (IS_ERR(plane_state)) {
2940         ret = PTR_ERR(plane_state);
2941         goto fail;
2942     }
2943 
2944     ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
2945     if (ret != 0) {
2946         goto fail;
2947     }
2948     drm_atomic_set_fb_for_plane(plane_state, fb);
2949     plane_state->crtc_x = crtc_x;
2950     plane_state->crtc_y = crtc_y;
2951     plane_state->crtc_w = crtc_w;
2952     plane_state->crtc_h = crtc_h;
2953     plane_state->src_x = src_x;
2954     plane_state->src_y = src_y;
2955     plane_state->src_w = src_w;
2956     plane_state->src_h = src_h;
2957 
2958     if (plane == crtc->cursor) {
2959         state->legacy_cursor_update = true;
2960     }
2961 
2962     ret = drm_atomic_commit(state);
2963 fail:
2964     drm_atomic_state_put(state);
2965     return ret;
2966 }
2967 EXPORT_SYMBOL(drm_atomic_helper_update_plane);
2968 
2969 /**
2970  * drm_atomic_helper_disable_plane - Helper for primary plane disable using * atomic
2971  * @plane: plane to disable
2972  * @ctx: lock acquire context
2973  *
2974  * Provides a default plane disable handler using the atomic driver interface.
2975  *
2976  * RETURNS:
2977  * Zero on success, error code on failure
2978  */
drm_atomic_helper_disable_plane(struct drm_plane *plane, struct drm_modeset_acquire_ctx *ctx)2979 int drm_atomic_helper_disable_plane(struct drm_plane *plane, struct drm_modeset_acquire_ctx *ctx)
2980 {
2981     struct drm_atomic_state *state;
2982     struct drm_plane_state *plane_state;
2983     int ret = 0;
2984 
2985     state = drm_atomic_state_alloc(plane->dev);
2986     if (!state) {
2987         return -ENOMEM;
2988     }
2989 
2990     state->acquire_ctx = ctx;
2991     plane_state = drm_atomic_get_plane_state(state, plane);
2992     if (IS_ERR(plane_state)) {
2993         ret = PTR_ERR(plane_state);
2994         goto fail;
2995     }
2996 
2997     if (plane_state->crtc && plane_state->crtc->cursor == plane) {
2998         plane_state->state->legacy_cursor_update = true;
2999     }
3000 
3001     ret = __drm_atomic_helper_disable_plane(plane, plane_state);
3002     if (ret != 0) {
3003         goto fail;
3004     }
3005 
3006     ret = drm_atomic_commit(state);
3007 fail:
3008     drm_atomic_state_put(state);
3009     return ret;
3010 }
3011 EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
3012 
3013 /**
3014  * drm_atomic_helper_set_config - set a new config from userspace
3015  * @set: mode set configuration
3016  * @ctx: lock acquisition context
3017  *
3018  * Provides a default CRTC set_config handler using the atomic driver interface.
3019  *
3020  * NOTE: For backwards compatibility with old userspace this automatically
3021  * resets the "link-status" property to GOOD, to force any link
3022  * re-training. The SETCRTC ioctl does not define whether an update does
3023  * need a full modeset or just a plane update, hence we're allowed to do
3024  * that. See also drm_connector_set_link_status_property().
3025  *
3026  * Returns:
3027  * Returns 0 on success, negative errno numbers on failure.
3028  */
drm_atomic_helper_set_config(struct drm_mode_set *set, struct drm_modeset_acquire_ctx *ctx)3029 int drm_atomic_helper_set_config(struct drm_mode_set *set, struct drm_modeset_acquire_ctx *ctx)
3030 {
3031     struct drm_atomic_state *state;
3032     struct drm_crtc *crtc = set->crtc;
3033     int ret = 0;
3034 
3035     state = drm_atomic_state_alloc(crtc->dev);
3036     if (!state) {
3037         return -ENOMEM;
3038     }
3039 
3040     state->acquire_ctx = ctx;
3041     ret = __drm_atomic_helper_set_config(set, state);
3042     if (ret != 0) {
3043         goto fail;
3044     }
3045 
3046     ret = handle_conflicting_encoders(state, true);
3047     if (ret) {
3048         goto fail;
3049     }
3050 
3051     ret = drm_atomic_commit(state);
3052 
3053 fail:
3054     drm_atomic_state_put(state);
3055     return ret;
3056 }
3057 EXPORT_SYMBOL(drm_atomic_helper_set_config);
3058 
3059 /**
3060  * drm_atomic_helper_disable_all - disable all currently active outputs
3061  * @dev: DRM device
3062  * @ctx: lock acquisition context
3063  *
3064  * Loops through all connectors, finding those that aren't turned off and then
3065  * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
3066  * that they are connected to.
3067  *
3068  * This is used for example in suspend/resume to disable all currently active
3069  * functions when suspending. If you just want to shut down everything at e.g.
3070  * driver unload, look at drm_atomic_helper_shutdown().
3071  *
3072  * Note that if callers haven't already acquired all modeset locks this might
3073  * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3074  *
3075  * Returns:
3076  * 0 on success or a negative error code on failure.
3077  *
3078  * See also:
3079  * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
3080  * drm_atomic_helper_shutdown().
3081  */
drm_atomic_helper_disable_all(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx)3082 int drm_atomic_helper_disable_all(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx)
3083 {
3084     struct drm_atomic_state *state;
3085     struct drm_connector_state *conn_state;
3086     struct drm_connector *conn;
3087     struct drm_plane_state *plane_state;
3088     struct drm_plane *plane;
3089     struct drm_crtc_state *crtc_state;
3090     struct drm_crtc *crtc;
3091     int ret, i;
3092 
3093     state = drm_atomic_state_alloc(dev);
3094     if (!state) {
3095         return -ENOMEM;
3096     }
3097 
3098     state->acquire_ctx = ctx;
3099 
3100     drm_for_each_crtc(crtc, dev)
3101     {
3102         crtc_state = drm_atomic_get_crtc_state(state, crtc);
3103         if (IS_ERR(crtc_state)) {
3104             ret = PTR_ERR(crtc_state);
3105             goto free;
3106         }
3107 
3108         crtc_state->active = false;
3109 
3110         ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL);
3111         if (ret < 0) {
3112             goto free;
3113         }
3114 
3115         ret = drm_atomic_add_affected_planes(state, crtc);
3116         if (ret < 0) {
3117             goto free;
3118         }
3119 
3120         ret = drm_atomic_add_affected_connectors(state, crtc);
3121         if (ret < 0) {
3122             goto free;
3123         }
3124     }
3125 
3126     for_each_new_connector_in_state(state, conn, conn_state, i)
3127     {
3128         ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
3129         if (ret < 0) {
3130             goto free;
3131         }
3132     }
3133 
3134     for_each_new_plane_in_state(state, plane, plane_state, i)
3135     {
3136         ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
3137         if (ret < 0) {
3138             goto free;
3139         }
3140 
3141         drm_atomic_set_fb_for_plane(plane_state, NULL);
3142     }
3143 
3144     ret = drm_atomic_commit(state);
3145 free:
3146     drm_atomic_state_put(state);
3147     return ret;
3148 }
3149 EXPORT_SYMBOL(drm_atomic_helper_disable_all);
3150 
3151 /**
3152  * drm_atomic_helper_shutdown - shutdown all CRTC
3153  * @dev: DRM device
3154  *
3155  * This shuts down all CRTC, which is useful for driver unloading. Shutdown on
3156  * suspend should instead be handled with drm_atomic_helper_suspend(), since
3157  * that also takes a snapshot of the modeset state to be restored on resume.
3158  *
3159  * This is just a convenience wrapper around drm_atomic_helper_disable_all(),
3160  * and it is the atomic version of drm_crtc_force_disable_all().
3161  */
drm_atomic_helper_shutdown(struct drm_device *dev)3162 void drm_atomic_helper_shutdown(struct drm_device *dev)
3163 {
3164     struct drm_modeset_acquire_ctx ctx;
3165     int ret;
3166 
3167     DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
3168 
3169     ret = drm_atomic_helper_disable_all(dev, &ctx);
3170     if (ret) {
3171         DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret);
3172     }
3173 
3174     DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
3175 }
3176 EXPORT_SYMBOL(drm_atomic_helper_shutdown);
3177 
3178 /**
3179  * drm_atomic_helper_duplicate_state - duplicate an atomic state object
3180  * @dev: DRM device
3181  * @ctx: lock acquisition context
3182  *
3183  * Makes a copy of the current atomic state by looping over all objects and
3184  * duplicating their respective states. This is used for example by suspend/
3185  * resume support code to save the state prior to suspend such that it can
3186  * be restored upon resume.
3187  *
3188  * Note that this treats atomic state as persistent between save and restore.
3189  * Drivers must make sure that this is possible and won't result in confusion
3190  * or erroneous behaviour.
3191  *
3192  * Note that if callers haven't already acquired all modeset locks this might
3193  * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3194  *
3195  * Returns:
3196  * A pointer to the copy of the atomic state object on success or an
3197  * ERR_PTR()-encoded error code on failure.
3198  *
3199  * See also:
3200  * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
3201  */
drm_atomic_helper_duplicate_state(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx)3202 struct drm_atomic_state *drm_atomic_helper_duplicate_state(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx)
3203 {
3204     struct drm_atomic_state *state;
3205     struct drm_connector *conn;
3206     struct drm_connector_list_iter conn_iter;
3207     struct drm_plane *plane;
3208     struct drm_crtc *crtc;
3209     int err = 0;
3210 
3211     state = drm_atomic_state_alloc(dev);
3212     if (!state) {
3213         return ERR_PTR(-ENOMEM);
3214     }
3215 
3216     state->acquire_ctx = ctx;
3217     state->duplicated = true;
3218 
3219     drm_for_each_crtc(crtc, dev)
3220     {
3221         struct drm_crtc_state *crtc_state;
3222 
3223         crtc_state = drm_atomic_get_crtc_state(state, crtc);
3224         if (IS_ERR(crtc_state)) {
3225             err = PTR_ERR(crtc_state);
3226             goto free;
3227         }
3228     }
3229 
3230     drm_for_each_plane(plane, dev)
3231     {
3232         struct drm_plane_state *plane_state;
3233 
3234         plane_state = drm_atomic_get_plane_state(state, plane);
3235         if (IS_ERR(plane_state)) {
3236             err = PTR_ERR(plane_state);
3237             goto free;
3238         }
3239     }
3240 
3241     drm_connector_list_iter_begin(dev, &conn_iter);
3242     drm_for_each_connector_iter(conn, &conn_iter)
3243     {
3244         struct drm_connector_state *conn_state;
3245 
3246         conn_state = drm_atomic_get_connector_state(state, conn);
3247         if (IS_ERR(conn_state)) {
3248             err = PTR_ERR(conn_state);
3249             drm_connector_list_iter_end(&conn_iter);
3250             goto free;
3251         }
3252     }
3253     drm_connector_list_iter_end(&conn_iter);
3254 
3255     /* clear the acquire context so that it isn't accidentally reused */
3256     state->acquire_ctx = NULL;
3257 
3258 free:
3259     if (err < 0) {
3260         drm_atomic_state_put(state);
3261         state = ERR_PTR(err);
3262     }
3263 
3264     return state;
3265 }
3266 EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
3267 
3268 /**
3269  * drm_atomic_helper_suspend - subsystem-level suspend helper
3270  * @dev: DRM device
3271  *
3272  * Duplicates the current atomic state, disables all active outputs and then
3273  * returns a pointer to the original atomic state to the caller. Drivers can
3274  * pass this pointer to the drm_atomic_helper_resume() helper upon resume to
3275  * restore the output configuration that was active at the time the system
3276  * entered suspend.
3277  *
3278  * Note that it is potentially unsafe to use this. The atomic state object
3279  * returned by this function is assumed to be persistent. Drivers must ensure
3280  * that this holds true. Before calling this function, drivers must make sure
3281  * to suspend fbdev emulation so that nothing can be using the device.
3282  *
3283  * Returns:
3284  * A pointer to a copy of the state before suspend on success or an ERR_PTR()-
3285  * encoded error code on failure. Drivers should store the returned atomic
3286  * state object and pass it to the drm_atomic_helper_resume() helper upon
3287  * resume.
3288  *
3289  * See also:
3290  * drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(),
3291  * drm_atomic_helper_resume(), drm_atomic_helper_commit_duplicated_state()
3292  */
drm_atomic_helper_suspend(struct drm_device *dev)3293 struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
3294 {
3295     struct drm_modeset_acquire_ctx ctx;
3296     struct drm_atomic_state *state;
3297     int err;
3298 
3299     /* This can never be returned, but it makes the compiler happy */
3300     state = ERR_PTR(-EINVAL);
3301 
3302     DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
3303 
3304     state = drm_atomic_helper_duplicate_state(dev, &ctx);
3305     if (IS_ERR(state)) {
3306         goto unlock;
3307     }
3308 
3309     err = drm_atomic_helper_disable_all(dev, &ctx);
3310     if (err < 0) {
3311         drm_atomic_state_put(state);
3312         state = ERR_PTR(err);
3313         goto unlock;
3314     }
3315 
3316 unlock:
3317     DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
3318     if (err) {
3319         return ERR_PTR(err);
3320     }
3321 
3322     return state;
3323 }
3324 EXPORT_SYMBOL(drm_atomic_helper_suspend);
3325 
3326 /**
3327  * drm_atomic_helper_commit_duplicated_state - commit duplicated state
3328  * @state: duplicated atomic state to commit
3329  * @ctx: pointer to acquire_ctx to use for commit.
3330  *
3331  * The state returned by drm_atomic_helper_duplicate_state() and
3332  * drm_atomic_helper_suspend() is partially invalid, and needs to
3333  * be fixed up before commit.
3334  *
3335  * Returns:
3336  * 0 on success or a negative error code on failure.
3337  *
3338  * See also:
3339  * drm_atomic_helper_suspend()
3340  */
drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state, struct drm_modeset_acquire_ctx *ctx)3341 int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state, struct drm_modeset_acquire_ctx *ctx)
3342 {
3343     int i, ret;
3344     struct drm_plane *plane;
3345     struct drm_plane_state *new_plane_state;
3346     struct drm_connector *connector;
3347     struct drm_connector_state *new_conn_state;
3348     struct drm_crtc *crtc;
3349     struct drm_crtc_state *new_crtc_state;
3350 
3351     state->acquire_ctx = ctx;
3352 
3353     for_each_new_plane_in_state(state, plane, new_plane_state, i) state->planes[i].old_state = plane->state;
3354 
3355     for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) state->crtcs[i].old_state = crtc->state;
3356 
3357     for_each_new_connector_in_state(state, connector, new_conn_state, i) state->connectors[i].old_state =
3358         connector->state;
3359 
3360     ret = drm_atomic_commit(state);
3361 
3362     state->acquire_ctx = NULL;
3363 
3364     return ret;
3365 }
3366 EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
3367 
3368 /**
3369  * drm_atomic_helper_resume - subsystem-level resume helper
3370  * @dev: DRM device
3371  * @state: atomic state to resume to
3372  *
3373  * Calls drm_mode_config_reset() to synchronize hardware and software states,
3374  * grabs all modeset locks and commits the atomic state object. This can be
3375  * used in conjunction with the drm_atomic_helper_suspend() helper to
3376  * implement suspend/resume for drivers that support atomic mode-setting.
3377  *
3378  * Returns:
3379  * 0 on success or a negative error code on failure.
3380  *
3381  * See also:
3382  * drm_atomic_helper_suspend()
3383  */
drm_atomic_helper_resume(struct drm_device *dev, struct drm_atomic_state *state)3384 int drm_atomic_helper_resume(struct drm_device *dev, struct drm_atomic_state *state)
3385 {
3386     struct drm_modeset_acquire_ctx ctx;
3387     int err;
3388 
3389     drm_mode_config_reset(dev);
3390 
3391     DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
3392 
3393     err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
3394 
3395     DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
3396     drm_atomic_state_put(state);
3397 
3398     return err;
3399 }
3400 EXPORT_SYMBOL(drm_atomic_helper_resume);
3401 
page_flip_common(struct drm_atomic_state *state, struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t flags)3402 static int page_flip_common(struct drm_atomic_state *state, struct drm_crtc *crtc, struct drm_framebuffer *fb,
3403                             struct drm_pending_vblank_event *event, uint32_t flags)
3404 {
3405     struct drm_plane *plane = crtc->primary;
3406     struct drm_plane_state *plane_state;
3407     struct drm_crtc_state *crtc_state;
3408     int ret = 0;
3409 
3410     crtc_state = drm_atomic_get_crtc_state(state, crtc);
3411     if (IS_ERR(crtc_state)) {
3412         return PTR_ERR(crtc_state);
3413     }
3414 
3415     crtc_state->event = event;
3416     crtc_state->async_flip = flags & DRM_MODE_PAGE_FLIP_ASYNC;
3417 
3418     plane_state = drm_atomic_get_plane_state(state, plane);
3419     if (IS_ERR(plane_state)) {
3420         return PTR_ERR(plane_state);
3421     }
3422 
3423     ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
3424     if (ret != 0) {
3425         return ret;
3426     }
3427     drm_atomic_set_fb_for_plane(plane_state, fb);
3428 
3429     /* Make sure we don't accidentally do a full modeset. */
3430     state->allow_modeset = false;
3431     if (!crtc_state->active) {
3432         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled, rejecting legacy flip\n", crtc->base.id, crtc->name);
3433         return -EINVAL;
3434     }
3435 
3436     return ret;
3437 }
3438 
3439 /**
3440  * drm_atomic_helper_page_flip - execute a legacy page flip
3441  * @crtc: DRM CRTC
3442  * @fb: DRM framebuffer
3443  * @event: optional DRM event to signal upon completion
3444  * @flags: flip flags for non-vblank sync'ed updates
3445  * @ctx: lock acquisition context
3446  *
3447  * Provides a default &drm_crtc_funcs.page_flip implementation
3448  * using the atomic driver interface.
3449  *
3450  * Returns:
3451  * Returns 0 on success, negative errno numbers on failure.
3452  *
3453  * See also:
3454  * drm_atomic_helper_page_flip_target()
3455  */
drm_atomic_helper_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t flags, struct drm_modeset_acquire_ctx *ctx)3456 int drm_atomic_helper_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3457                                 struct drm_pending_vblank_event *event, uint32_t flags,
3458                                 struct drm_modeset_acquire_ctx *ctx)
3459 {
3460     struct drm_plane *plane = crtc->primary;
3461     struct drm_atomic_state *state;
3462     int ret = 0;
3463 
3464     state = drm_atomic_state_alloc(plane->dev);
3465     if (!state) {
3466         return -ENOMEM;
3467     }
3468 
3469     state->acquire_ctx = ctx;
3470 
3471     ret = page_flip_common(state, crtc, fb, event, flags);
3472     if (ret != 0) {
3473         goto fail;
3474     }
3475 
3476     ret = drm_atomic_nonblocking_commit(state);
3477 fail:
3478     drm_atomic_state_put(state);
3479     return ret;
3480 }
3481 EXPORT_SYMBOL(drm_atomic_helper_page_flip);
3482 
3483 /**
3484  * drm_atomic_helper_page_flip_target - do page flip on target vblank period.
3485  * @crtc: DRM CRTC
3486  * @fb: DRM framebuffer
3487  * @event: optional DRM event to signal upon completion
3488  * @flags: flip flags for non-vblank sync'ed updates
3489  * @target: specifying the target vblank period when the flip to take effect
3490  * @ctx: lock acquisition context
3491  *
3492  * Provides a default &drm_crtc_funcs.page_flip_target implementation.
3493  * Similar to drm_atomic_helper_page_flip() with extra parameter to specify
3494  * target vblank period to flip.
3495  *
3496  * Returns:
3497  * Returns 0 on success, negative errno numbers on failure.
3498  */
drm_atomic_helper_page_flip_target(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t flags, uint32_t target, struct drm_modeset_acquire_ctx *ctx)3499 int drm_atomic_helper_page_flip_target(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3500                                        struct drm_pending_vblank_event *event, uint32_t flags, uint32_t target,
3501                                        struct drm_modeset_acquire_ctx *ctx)
3502 {
3503     struct drm_plane *plane = crtc->primary;
3504     struct drm_atomic_state *state;
3505     struct drm_crtc_state *crtc_state;
3506     int ret = 0;
3507 
3508     state = drm_atomic_state_alloc(plane->dev);
3509     if (!state) {
3510         return -ENOMEM;
3511     }
3512 
3513     state->acquire_ctx = ctx;
3514 
3515     ret = page_flip_common(state, crtc, fb, event, flags);
3516     if (ret != 0) {
3517         goto fail;
3518     }
3519 
3520     crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
3521     if (WARN_ON(!crtc_state)) {
3522         ret = -EINVAL;
3523         goto fail;
3524     }
3525     crtc_state->target_vblank = target;
3526 
3527     ret = drm_atomic_nonblocking_commit(state);
3528 fail:
3529     drm_atomic_state_put(state);
3530     return ret;
3531 }
3532 EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
3533 
3534 /**
3535  * drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table
3536  * @crtc: CRTC object
3537  * @red: red correction table
3538  * @green: green correction table
3539  * @blue: green correction table
3540  * @size: size of the tables
3541  * @ctx: lock acquire context
3542  *
3543  * Implements support for legacy gamma correction table for drivers
3544  * that support color management through the DEGAMMA_LUT/GAMMA_LUT
3545  * properties. See drm_crtc_enable_color_mgmt() and the containing chapter for
3546  * how the atomic color management and gamma tables work.
3547  */
drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, uint32_t size, struct drm_modeset_acquire_ctx *ctx)3548 int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, uint32_t size,
3549                                        struct drm_modeset_acquire_ctx *ctx)
3550 {
3551     struct drm_device *dev = crtc->dev;
3552     struct drm_atomic_state *state;
3553     struct drm_crtc_state *crtc_state;
3554     struct drm_property_blob *blob = NULL;
3555     struct drm_color_lut *blob_data;
3556     int i, ret = 0;
3557     bool replaced;
3558 
3559     state = drm_atomic_state_alloc(crtc->dev);
3560     if (!state) {
3561         return -ENOMEM;
3562     }
3563 
3564     blob = drm_property_create_blob(dev, sizeof(struct drm_color_lut) * size, NULL);
3565     if (IS_ERR(blob)) {
3566         ret = PTR_ERR(blob);
3567         blob = NULL;
3568         goto fail;
3569     }
3570 
3571     /* Prepare GAMMA_LUT with the legacy values. */
3572     blob_data = blob->data;
3573     for (i = 0; i < size; i++) {
3574         blob_data[i].red = red[i];
3575         blob_data[i].green = green[i];
3576         blob_data[i].blue = blue[i];
3577     }
3578 
3579     state->acquire_ctx = ctx;
3580     crtc_state = drm_atomic_get_crtc_state(state, crtc);
3581     if (IS_ERR(crtc_state)) {
3582         ret = PTR_ERR(crtc_state);
3583         goto fail;
3584     }
3585 
3586     /* Reset DEGAMMA_LUT and CTM properties. */
3587     replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL);
3588     replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
3589     replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob);
3590 #if defined(CONFIG_ROCKCHIP_DRM_CUBIC_LUT)
3591     replaced |= drm_property_replace_blob(&crtc_state->cubic_lut, NULL);
3592 #endif
3593     crtc_state->color_mgmt_changed |= replaced;
3594 
3595     ret = drm_atomic_commit(state);
3596 
3597 fail:
3598     drm_atomic_state_put(state);
3599     drm_property_blob_put(blob);
3600     return ret;
3601 }
3602 EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
3603 
3604 /**
3605  * drm_atomic_helper_bridge_propagate_bus_fmt() - Propagate output format to
3606  *                          the input end of a bridge
3607  * @bridge: bridge control structure
3608  * @bridge_state: new bridge state
3609  * @crtc_state: new CRTC state
3610  * @conn_state: new connector state
3611  * @output_fmt: tested output bus format
3612  * @num_input_fmts: will contain the size of the returned array
3613  *
3614  * This helper is a pluggable implementation of the
3615  * &drm_bridge_funcs.atomic_get_input_bus_fmts operation for bridges that don't
3616  * modify the bus configuration between their input and their output. It
3617  * returns an array of input formats with a single element set to @output_fmt.
3618  *
3619  * RETURNS:
3620  * a valid format array of size @num_input_fmts, or NULL if the allocation
3621  * failed
3622  */
drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, u32 output_fmt, unsigned int *num_input_fmts)3623 u32 *drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state,
3624                                                 struct drm_crtc_state *crtc_state,
3625                                                 struct drm_connector_state *conn_state, u32 output_fmt,
3626                                                 unsigned int *num_input_fmts)
3627 {
3628     u32 *input_fmts;
3629 
3630     input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
3631     if (!input_fmts) {
3632         *num_input_fmts = 0;
3633         return NULL;
3634     }
3635 
3636     *num_input_fmts = 1;
3637     input_fmts[0] = output_fmt;
3638     return input_fmts;
3639 }
3640 EXPORT_SYMBOL(drm_atomic_helper_bridge_propagate_bus_fmt);
3641