1/*
2 * Copyright (c) 2014 Samsung Electronics Co., Ltd
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#include <linux/err.h>
25#include <linux/media-bus-format.h>
26#include <linux/module.h>
27#include <linux/mutex.h>
28
29#include <drm/drm_atomic_state_helper.h>
30#include <drm/drm_debugfs.h>
31#include <drm/drm_bridge.h>
32#include <drm/drm_encoder.h>
33#include <drm/drm_file.h>
34#include <drm/drm_of.h>
35#include <drm/drm_print.h>
36
37#include "drm_crtc_internal.h"
38
39/**
40 * DOC: overview
41 *
42 * &struct drm_bridge represents a device that hangs on to an encoder. These are
43 * handy when a regular &drm_encoder entity isn't enough to represent the entire
44 * encoder chain.
45 *
46 * A bridge is always attached to a single &drm_encoder at a time, but can be
47 * either connected to it directly, or through a chain of bridges::
48 *
49 *     [ CRTC ---> ] Encoder ---> Bridge A ---> Bridge B
50 *
51 * Here, the output of the encoder feeds to bridge A, and that furthers feeds to
52 * bridge B. Bridge chains can be arbitrarily long, and shall be fully linear:
53 * Chaining multiple bridges to the output of a bridge, or the same bridge to
54 * the output of different bridges, is not supported.
55 *
56 * &drm_bridge, like &drm_panel, aren't &drm_mode_object entities like planes,
57 * CRTCs, encoders or connectors and hence are not visible to userspace. They
58 * just provide additional hooks to get the desired output at the end of the
59 * encoder chain.
60 */
61
62/**
63 * DOC:	display driver integration
64 *
65 * Display drivers are responsible for linking encoders with the first bridge
66 * in the chains. This is done by acquiring the appropriate bridge with
67 * devm_drm_of_get_bridge(). Once acquired, the bridge shall be attached to the
68 * encoder with a call to drm_bridge_attach().
69 *
70 * Bridges are responsible for linking themselves with the next bridge in the
71 * chain, if any. This is done the same way as for encoders, with the call to
72 * drm_bridge_attach() occurring in the &drm_bridge_funcs.attach operation.
73 *
74 * Once these links are created, the bridges can participate along with encoder
75 * functions to perform mode validation and fixup (through
76 * drm_bridge_chain_mode_valid() and drm_atomic_bridge_chain_check()), mode
77 * setting (through drm_bridge_chain_mode_set()), enable (through
78 * drm_atomic_bridge_chain_pre_enable() and drm_atomic_bridge_chain_enable())
79 * and disable (through drm_atomic_bridge_chain_disable() and
80 * drm_atomic_bridge_chain_post_disable()). Those functions call the
81 * corresponding operations provided in &drm_bridge_funcs in sequence for all
82 * bridges in the chain.
83 *
84 * For display drivers that use the atomic helpers
85 * drm_atomic_helper_check_modeset(),
86 * drm_atomic_helper_commit_modeset_enables() and
87 * drm_atomic_helper_commit_modeset_disables() (either directly in hand-rolled
88 * commit check and commit tail handlers, or through the higher-level
89 * drm_atomic_helper_check() and drm_atomic_helper_commit_tail() or
90 * drm_atomic_helper_commit_tail_rpm() helpers), this is done transparently and
91 * requires no intervention from the driver. For other drivers, the relevant
92 * DRM bridge chain functions shall be called manually.
93 *
94 * Bridges also participate in implementing the &drm_connector at the end of
95 * the bridge chain. Display drivers may use the drm_bridge_connector_init()
96 * helper to create the &drm_connector, or implement it manually on top of the
97 * connector-related operations exposed by the bridge (see the overview
98 * documentation of bridge operations for more details).
99 */
100
101/**
102 * DOC: special care dsi
103 *
104 * The interaction between the bridges and other frameworks involved in
105 * the probing of the upstream driver and the bridge driver can be
106 * challenging. Indeed, there's multiple cases that needs to be
107 * considered:
108 *
109 * - The upstream driver doesn't use the component framework and isn't a
110 *   MIPI-DSI host. In this case, the bridge driver will probe at some
111 *   point and the upstream driver should try to probe again by returning
112 *   EPROBE_DEFER as long as the bridge driver hasn't probed.
113 *
114 * - The upstream driver doesn't use the component framework, but is a
115 *   MIPI-DSI host. The bridge device uses the MIPI-DCS commands to be
116 *   controlled. In this case, the bridge device is a child of the
117 *   display device and when it will probe it's assured that the display
118 *   device (and MIPI-DSI host) is present. The upstream driver will be
119 *   assured that the bridge driver is connected between the
120 *   &mipi_dsi_host_ops.attach and &mipi_dsi_host_ops.detach operations.
121 *   Therefore, it must run mipi_dsi_host_register() in its probe
122 *   function, and then run drm_bridge_attach() in its
123 *   &mipi_dsi_host_ops.attach hook.
124 *
125 * - The upstream driver uses the component framework and is a MIPI-DSI
126 *   host. The bridge device uses the MIPI-DCS commands to be
127 *   controlled. This is the same situation than above, and can run
128 *   mipi_dsi_host_register() in either its probe or bind hooks.
129 *
130 * - The upstream driver uses the component framework and is a MIPI-DSI
131 *   host. The bridge device uses a separate bus (such as I2C) to be
132 *   controlled. In this case, there's no correlation between the probe
133 *   of the bridge and upstream drivers, so care must be taken to avoid
134 *   an endless EPROBE_DEFER loop, with each driver waiting for the
135 *   other to probe.
136 *
137 * The ideal pattern to cover the last item (and all the others in the
138 * MIPI-DSI host driver case) is to split the operations like this:
139 *
140 * - The MIPI-DSI host driver must run mipi_dsi_host_register() in its
141 *   probe hook. It will make sure that the MIPI-DSI host sticks around,
142 *   and that the driver's bind can be called.
143 *
144 * - In its probe hook, the bridge driver must try to find its MIPI-DSI
145 *   host, register as a MIPI-DSI device and attach the MIPI-DSI device
146 *   to its host. The bridge driver is now functional.
147 *
148 * - In its &struct mipi_dsi_host_ops.attach hook, the MIPI-DSI host can
149 *   now add its component. Its bind hook will now be called and since
150 *   the bridge driver is attached and registered, we can now look for
151 *   and attach it.
152 *
153 * At this point, we're now certain that both the upstream driver and
154 * the bridge driver are functional and we can't have a deadlock-like
155 * situation when probing.
156 */
157
158/**
159 * DOC: dsi bridge operations
160 *
161 * DSI host interfaces are expected to be implemented as bridges rather than
162 * encoders, however there are a few aspects of their operation that need to
163 * be defined in order to provide a consistent interface.
164 *
165 * A DSI host should keep the PHY powered down until the pre_enable operation is
166 * called. All lanes are in an undefined idle state up to this point, and it
167 * must not be assumed that it is LP-11.
168 * pre_enable should initialise the PHY, set the data lanes to LP-11, and the
169 * clock lane to either LP-11 or HS depending on the mode_flag
170 * %MIPI_DSI_CLOCK_NON_CONTINUOUS.
171 *
172 * Ordinarily the downstream bridge DSI peripheral pre_enable will have been
173 * called before the DSI host. If the DSI peripheral requires LP-11 and/or
174 * the clock lane to be in HS mode prior to pre_enable, then it can set the
175 * &pre_enable_prev_first flag to request the pre_enable (and
176 * post_disable) order to be altered to enable the DSI host first.
177 *
178 * Either the CRTC being enabled, or the DSI host enable operation should switch
179 * the host to actively transmitting video on the data lanes.
180 *
181 * The reverse also applies. The DSI host disable operation or stopping the CRTC
182 * should stop transmitting video, and the data lanes should return to the LP-11
183 * state. The DSI host &post_disable operation should disable the PHY.
184 * If the &pre_enable_prev_first flag is set, then the DSI peripheral's
185 * bridge &post_disable will be called before the DSI host's post_disable.
186 *
187 * Whilst it is valid to call &host_transfer prior to pre_enable or after
188 * post_disable, the exact state of the lanes is undefined at this point. The
189 * DSI host should initialise the interface, transmit the data, and then disable
190 * the interface again.
191 *
192 * Ultra Low Power State (ULPS) is not explicitly supported by DRM. If
193 * implemented, it therefore needs to be handled entirely within the DSI Host
194 * driver.
195 */
196
197static DEFINE_MUTEX(bridge_lock);
198static LIST_HEAD(bridge_list);
199
200/**
201 * drm_bridge_add - add the given bridge to the global bridge list
202 *
203 * @bridge: bridge control structure
204 */
205void drm_bridge_add(struct drm_bridge *bridge)
206{
207	mutex_init(&bridge->hpd_mutex);
208
209	mutex_lock(&bridge_lock);
210	list_add_tail(&bridge->list, &bridge_list);
211	mutex_unlock(&bridge_lock);
212}
213EXPORT_SYMBOL(drm_bridge_add);
214
215static void drm_bridge_remove_void(void *bridge)
216{
217	drm_bridge_remove(bridge);
218}
219
220/**
221 * devm_drm_bridge_add - devm managed version of drm_bridge_add()
222 *
223 * @dev: device to tie the bridge lifetime to
224 * @bridge: bridge control structure
225 *
226 * This is the managed version of drm_bridge_add() which automatically
227 * calls drm_bridge_remove() when @dev is unbound.
228 *
229 * Return: 0 if no error or negative error code.
230 */
231int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge)
232{
233	drm_bridge_add(bridge);
234	return devm_add_action_or_reset(dev, drm_bridge_remove_void, bridge);
235}
236EXPORT_SYMBOL(devm_drm_bridge_add);
237
238/**
239 * drm_bridge_remove - remove the given bridge from the global bridge list
240 *
241 * @bridge: bridge control structure
242 */
243void drm_bridge_remove(struct drm_bridge *bridge)
244{
245	mutex_lock(&bridge_lock);
246	list_del_init(&bridge->list);
247	mutex_unlock(&bridge_lock);
248
249	mutex_destroy(&bridge->hpd_mutex);
250}
251EXPORT_SYMBOL(drm_bridge_remove);
252
253static struct drm_private_state *
254drm_bridge_atomic_duplicate_priv_state(struct drm_private_obj *obj)
255{
256	struct drm_bridge *bridge = drm_priv_to_bridge(obj);
257	struct drm_bridge_state *state;
258
259	state = bridge->funcs->atomic_duplicate_state(bridge);
260	return state ? &state->base : NULL;
261}
262
263static void
264drm_bridge_atomic_destroy_priv_state(struct drm_private_obj *obj,
265				     struct drm_private_state *s)
266{
267	struct drm_bridge_state *state = drm_priv_to_bridge_state(s);
268	struct drm_bridge *bridge = drm_priv_to_bridge(obj);
269
270	bridge->funcs->atomic_destroy_state(bridge, state);
271}
272
273static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = {
274	.atomic_duplicate_state = drm_bridge_atomic_duplicate_priv_state,
275	.atomic_destroy_state = drm_bridge_atomic_destroy_priv_state,
276};
277
278/**
279 * drm_bridge_attach - attach the bridge to an encoder's chain
280 *
281 * @encoder: DRM encoder
282 * @bridge: bridge to attach
283 * @previous: previous bridge in the chain (optional)
284 * @flags: DRM_BRIDGE_ATTACH_* flags
285 *
286 * Called by a kms driver to link the bridge to an encoder's chain. The previous
287 * argument specifies the previous bridge in the chain. If NULL, the bridge is
288 * linked directly at the encoder's output. Otherwise it is linked at the
289 * previous bridge's output.
290 *
291 * If non-NULL the previous bridge must be already attached by a call to this
292 * function.
293 *
294 * Note that bridges attached to encoders are auto-detached during encoder
295 * cleanup in drm_encoder_cleanup(), so drm_bridge_attach() should generally
296 * *not* be balanced with a drm_bridge_detach() in driver code.
297 *
298 * RETURNS:
299 * Zero on success, error code on failure
300 */
301int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
302		      struct drm_bridge *previous,
303		      enum drm_bridge_attach_flags flags)
304{
305	int ret;
306
307	if (!encoder || !bridge)
308		return -EINVAL;
309
310	if (previous && (!previous->dev || previous->encoder != encoder))
311		return -EINVAL;
312
313	if (bridge->dev)
314		return -EBUSY;
315
316	bridge->dev = encoder->dev;
317	bridge->encoder = encoder;
318
319	if (previous)
320		list_add(&bridge->chain_node, &previous->chain_node);
321	else
322		list_add(&bridge->chain_node, &encoder->bridge_chain);
323
324	if (bridge->funcs->attach) {
325		ret = bridge->funcs->attach(bridge, flags);
326		if (ret < 0)
327			goto err_reset_bridge;
328	}
329
330	if (bridge->funcs->atomic_reset) {
331		struct drm_bridge_state *state;
332
333		state = bridge->funcs->atomic_reset(bridge);
334		if (IS_ERR(state)) {
335			ret = PTR_ERR(state);
336			goto err_detach_bridge;
337		}
338
339		drm_atomic_private_obj_init(bridge->dev, &bridge->base,
340					    &state->base,
341					    &drm_bridge_priv_state_funcs);
342	}
343
344	return 0;
345
346err_detach_bridge:
347	if (bridge->funcs->detach)
348		bridge->funcs->detach(bridge);
349
350err_reset_bridge:
351	bridge->dev = NULL;
352	bridge->encoder = NULL;
353	list_del(&bridge->chain_node);
354
355#ifdef CONFIG_OF
356	DRM_ERROR("failed to attach bridge %pOF to encoder %s: %d\n",
357		  bridge->of_node, encoder->name, ret);
358#else
359	DRM_ERROR("failed to attach bridge to encoder %s: %d\n",
360		  encoder->name, ret);
361#endif
362
363	return ret;
364}
365EXPORT_SYMBOL(drm_bridge_attach);
366
367void drm_bridge_detach(struct drm_bridge *bridge)
368{
369	if (WARN_ON(!bridge))
370		return;
371
372	if (WARN_ON(!bridge->dev))
373		return;
374
375	if (bridge->funcs->atomic_reset)
376		drm_atomic_private_obj_fini(&bridge->base);
377
378	if (bridge->funcs->detach)
379		bridge->funcs->detach(bridge);
380
381	list_del(&bridge->chain_node);
382	bridge->dev = NULL;
383}
384
385/**
386 * DOC: bridge operations
387 *
388 * Bridge drivers expose operations through the &drm_bridge_funcs structure.
389 * The DRM internals (atomic and CRTC helpers) use the helpers defined in
390 * drm_bridge.c to call bridge operations. Those operations are divided in
391 * three big categories to support different parts of the bridge usage.
392 *
393 * - The encoder-related operations support control of the bridges in the
394 *   chain, and are roughly counterparts to the &drm_encoder_helper_funcs
395 *   operations. They are used by the legacy CRTC and the atomic modeset
396 *   helpers to perform mode validation, fixup and setting, and enable and
397 *   disable the bridge automatically.
398 *
399 *   The enable and disable operations are split in
400 *   &drm_bridge_funcs.pre_enable, &drm_bridge_funcs.enable,
401 *   &drm_bridge_funcs.disable and &drm_bridge_funcs.post_disable to provide
402 *   finer-grained control.
403 *
404 *   Bridge drivers may implement the legacy version of those operations, or
405 *   the atomic version (prefixed with atomic\_), in which case they shall also
406 *   implement the atomic state bookkeeping operations
407 *   (&drm_bridge_funcs.atomic_duplicate_state,
408 *   &drm_bridge_funcs.atomic_destroy_state and &drm_bridge_funcs.reset).
409 *   Mixing atomic and non-atomic versions of the operations is not supported.
410 *
411 * - The bus format negotiation operations
412 *   &drm_bridge_funcs.atomic_get_output_bus_fmts and
413 *   &drm_bridge_funcs.atomic_get_input_bus_fmts allow bridge drivers to
414 *   negotiate the formats transmitted between bridges in the chain when
415 *   multiple formats are supported. Negotiation for formats is performed
416 *   transparently for display drivers by the atomic modeset helpers. Only
417 *   atomic versions of those operations exist, bridge drivers that need to
418 *   implement them shall thus also implement the atomic version of the
419 *   encoder-related operations. This feature is not supported by the legacy
420 *   CRTC helpers.
421 *
422 * - The connector-related operations support implementing a &drm_connector
423 *   based on a chain of bridges. DRM bridges traditionally create a
424 *   &drm_connector for bridges meant to be used at the end of the chain. This
425 *   puts additional burden on bridge drivers, especially for bridges that may
426 *   be used in the middle of a chain or at the end of it. Furthermore, it
427 *   requires all operations of the &drm_connector to be handled by a single
428 *   bridge, which doesn't always match the hardware architecture.
429 *
430 *   To simplify bridge drivers and make the connector implementation more
431 *   flexible, a new model allows bridges to unconditionally skip creation of
432 *   &drm_connector and instead expose &drm_bridge_funcs operations to support
433 *   an externally-implemented &drm_connector. Those operations are
434 *   &drm_bridge_funcs.detect, &drm_bridge_funcs.get_modes,
435 *   &drm_bridge_funcs.get_edid, &drm_bridge_funcs.hpd_notify,
436 *   &drm_bridge_funcs.hpd_enable and &drm_bridge_funcs.hpd_disable. When
437 *   implemented, display drivers shall create a &drm_connector instance for
438 *   each chain of bridges, and implement those connector instances based on
439 *   the bridge connector operations.
440 *
441 *   Bridge drivers shall implement the connector-related operations for all
442 *   the features that the bridge hardware support. For instance, if a bridge
443 *   supports reading EDID, the &drm_bridge_funcs.get_edid shall be
444 *   implemented. This however doesn't mean that the DDC lines are wired to the
445 *   bridge on a particular platform, as they could also be connected to an I2C
446 *   controller of the SoC. Support for the connector-related operations on the
447 *   running platform is reported through the &drm_bridge.ops flags. Bridge
448 *   drivers shall detect which operations they can support on the platform
449 *   (usually this information is provided by ACPI or DT), and set the
450 *   &drm_bridge.ops flags for all supported operations. A flag shall only be
451 *   set if the corresponding &drm_bridge_funcs operation is implemented, but
452 *   an implemented operation doesn't necessarily imply that the corresponding
453 *   flag will be set. Display drivers shall use the &drm_bridge.ops flags to
454 *   decide which bridge to delegate a connector operation to. This mechanism
455 *   allows providing a single static const &drm_bridge_funcs instance in
456 *   bridge drivers, improving security by storing function pointers in
457 *   read-only memory.
458 *
459 *   In order to ease transition, bridge drivers may support both the old and
460 *   new models by making connector creation optional and implementing the
461 *   connected-related bridge operations. Connector creation is then controlled
462 *   by the flags argument to the drm_bridge_attach() function. Display drivers
463 *   that support the new model and create connectors themselves shall set the
464 *   %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag, and bridge drivers shall then skip
465 *   connector creation. For intermediate bridges in the chain, the flag shall
466 *   be passed to the drm_bridge_attach() call for the downstream bridge.
467 *   Bridge drivers that implement the new model only shall return an error
468 *   from their &drm_bridge_funcs.attach handler when the
469 *   %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag is not set. New display drivers
470 *   should use the new model, and convert the bridge drivers they use if
471 *   needed, in order to gradually transition to the new model.
472 */
473
474/**
475 * drm_bridge_chain_mode_fixup - fixup proposed mode for all bridges in the
476 *				 encoder chain
477 * @bridge: bridge control structure
478 * @mode: desired mode to be set for the bridge
479 * @adjusted_mode: updated mode that works for this bridge
480 *
481 * Calls &drm_bridge_funcs.mode_fixup for all the bridges in the
482 * encoder chain, starting from the first bridge to the last.
483 *
484 * Note: the bridge passed should be the one closest to the encoder
485 *
486 * RETURNS:
487 * true on success, false on failure
488 */
489bool drm_bridge_chain_mode_fixup(struct drm_bridge *bridge,
490				 const struct drm_display_mode *mode,
491				 struct drm_display_mode *adjusted_mode)
492{
493	struct drm_encoder *encoder;
494
495	if (!bridge)
496		return true;
497
498	encoder = bridge->encoder;
499	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
500		if (!bridge->funcs->mode_fixup)
501			continue;
502
503		if (!bridge->funcs->mode_fixup(bridge, mode, adjusted_mode))
504			return false;
505	}
506
507	return true;
508}
509EXPORT_SYMBOL(drm_bridge_chain_mode_fixup);
510
511/**
512 * drm_bridge_chain_mode_valid - validate the mode against all bridges in the
513 *				 encoder chain.
514 * @bridge: bridge control structure
515 * @info: display info against which the mode shall be validated
516 * @mode: desired mode to be validated
517 *
518 * Calls &drm_bridge_funcs.mode_valid for all the bridges in the encoder
519 * chain, starting from the first bridge to the last. If at least one bridge
520 * does not accept the mode the function returns the error code.
521 *
522 * Note: the bridge passed should be the one closest to the encoder.
523 *
524 * RETURNS:
525 * MODE_OK on success, drm_mode_status Enum error code on failure
526 */
527enum drm_mode_status
528drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
529			    const struct drm_display_info *info,
530			    const struct drm_display_mode *mode)
531{
532	struct drm_encoder *encoder;
533
534	if (!bridge)
535		return MODE_OK;
536
537	encoder = bridge->encoder;
538	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
539		enum drm_mode_status ret;
540
541		if (!bridge->funcs->mode_valid)
542			continue;
543
544		ret = bridge->funcs->mode_valid(bridge, info, mode);
545		if (ret != MODE_OK)
546			return ret;
547	}
548
549	return MODE_OK;
550}
551EXPORT_SYMBOL(drm_bridge_chain_mode_valid);
552
553/**
554 * drm_bridge_chain_mode_set - set proposed mode for all bridges in the
555 *			       encoder chain
556 * @bridge: bridge control structure
557 * @mode: desired mode to be set for the encoder chain
558 * @adjusted_mode: updated mode that works for this encoder chain
559 *
560 * Calls &drm_bridge_funcs.mode_set op for all the bridges in the
561 * encoder chain, starting from the first bridge to the last.
562 *
563 * Note: the bridge passed should be the one closest to the encoder
564 */
565void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
566			       const struct drm_display_mode *mode,
567			       const struct drm_display_mode *adjusted_mode)
568{
569	struct drm_encoder *encoder;
570
571	if (!bridge)
572		return;
573
574	encoder = bridge->encoder;
575	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
576		if (bridge->funcs->mode_set)
577			bridge->funcs->mode_set(bridge, mode, adjusted_mode);
578	}
579}
580EXPORT_SYMBOL(drm_bridge_chain_mode_set);
581
582/**
583 * drm_atomic_bridge_chain_disable - disables all bridges in the encoder chain
584 * @bridge: bridge control structure
585 * @old_state: old atomic state
586 *
587 * Calls &drm_bridge_funcs.atomic_disable (falls back on
588 * &drm_bridge_funcs.disable) op for all the bridges in the encoder chain,
589 * starting from the last bridge to the first. These are called before calling
590 * &drm_encoder_helper_funcs.atomic_disable
591 *
592 * Note: the bridge passed should be the one closest to the encoder
593 */
594void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
595				     struct drm_atomic_state *old_state)
596{
597	struct drm_encoder *encoder;
598	struct drm_bridge *iter;
599
600	if (!bridge)
601		return;
602
603	encoder = bridge->encoder;
604	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
605		if (iter->funcs->atomic_disable) {
606			struct drm_bridge_state *old_bridge_state;
607
608			old_bridge_state =
609				drm_atomic_get_old_bridge_state(old_state,
610								iter);
611			if (WARN_ON(!old_bridge_state))
612				return;
613
614			iter->funcs->atomic_disable(iter, old_bridge_state);
615		} else if (iter->funcs->disable) {
616			iter->funcs->disable(iter);
617		}
618
619		if (iter == bridge)
620			break;
621	}
622}
623EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
624
625static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge,
626						struct drm_atomic_state *old_state)
627{
628	if (old_state && bridge->funcs->atomic_post_disable) {
629		struct drm_bridge_state *old_bridge_state;
630
631		old_bridge_state =
632			drm_atomic_get_old_bridge_state(old_state,
633							bridge);
634		if (WARN_ON(!old_bridge_state))
635			return;
636
637		bridge->funcs->atomic_post_disable(bridge,
638						   old_bridge_state);
639	} else if (bridge->funcs->post_disable) {
640		bridge->funcs->post_disable(bridge);
641	}
642}
643
644/**
645 * drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges
646 *					  in the encoder chain
647 * @bridge: bridge control structure
648 * @old_state: old atomic state
649 *
650 * Calls &drm_bridge_funcs.atomic_post_disable (falls back on
651 * &drm_bridge_funcs.post_disable) op for all the bridges in the encoder chain,
652 * starting from the first bridge to the last. These are called after completing
653 * &drm_encoder_helper_funcs.atomic_disable
654 *
655 * If a bridge sets @pre_enable_prev_first, then the @post_disable for that
656 * bridge will be called before the previous one to reverse the @pre_enable
657 * calling direction.
658 *
659 * Note: the bridge passed should be the one closest to the encoder
660 */
661void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
662					  struct drm_atomic_state *old_state)
663{
664	struct drm_encoder *encoder;
665	struct drm_bridge *next, *limit;
666
667	if (!bridge)
668		return;
669
670	encoder = bridge->encoder;
671
672	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
673		limit = NULL;
674
675		if (!list_is_last(&bridge->chain_node, &encoder->bridge_chain)) {
676			next = list_next_entry(bridge, chain_node);
677
678			if (next->pre_enable_prev_first) {
679				/* next bridge had requested that prev
680				 * was enabled first, so disabled last
681				 */
682				limit = next;
683
684				/* Find the next bridge that has NOT requested
685				 * prev to be enabled first / disabled last
686				 */
687				list_for_each_entry_from(next, &encoder->bridge_chain,
688							 chain_node) {
689					if (next->pre_enable_prev_first) {
690						next = list_prev_entry(next, chain_node);
691						limit = next;
692						break;
693					}
694				}
695
696				/* Call these bridges in reverse order */
697				list_for_each_entry_from_reverse(next, &encoder->bridge_chain,
698								 chain_node) {
699					if (next == bridge)
700						break;
701
702					drm_atomic_bridge_call_post_disable(next,
703									    old_state);
704				}
705			}
706		}
707
708		drm_atomic_bridge_call_post_disable(bridge, old_state);
709
710		if (limit)
711			/* Jump all bridges that we have already post_disabled */
712			bridge = limit;
713	}
714}
715EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
716
717static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge,
718					      struct drm_atomic_state *old_state)
719{
720	if (old_state && bridge->funcs->atomic_pre_enable) {
721		struct drm_bridge_state *old_bridge_state;
722
723		old_bridge_state =
724			drm_atomic_get_old_bridge_state(old_state,
725							bridge);
726		if (WARN_ON(!old_bridge_state))
727			return;
728
729		bridge->funcs->atomic_pre_enable(bridge, old_bridge_state);
730	} else if (bridge->funcs->pre_enable) {
731		bridge->funcs->pre_enable(bridge);
732	}
733}
734
735/**
736 * drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in
737 *					the encoder chain
738 * @bridge: bridge control structure
739 * @old_state: old atomic state
740 *
741 * Calls &drm_bridge_funcs.atomic_pre_enable (falls back on
742 * &drm_bridge_funcs.pre_enable) op for all the bridges in the encoder chain,
743 * starting from the last bridge to the first. These are called before calling
744 * &drm_encoder_helper_funcs.atomic_enable
745 *
746 * If a bridge sets @pre_enable_prev_first, then the pre_enable for the
747 * prev bridge will be called before pre_enable of this bridge.
748 *
749 * Note: the bridge passed should be the one closest to the encoder
750 */
751void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
752					struct drm_atomic_state *old_state)
753{
754	struct drm_encoder *encoder;
755	struct drm_bridge *iter, *next, *limit;
756
757	if (!bridge)
758		return;
759
760	encoder = bridge->encoder;
761
762	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
763		if (iter->pre_enable_prev_first) {
764			next = iter;
765			limit = bridge;
766			list_for_each_entry_from_reverse(next,
767							 &encoder->bridge_chain,
768							 chain_node) {
769				if (next == bridge)
770					break;
771
772				if (!next->pre_enable_prev_first) {
773					/* Found first bridge that does NOT
774					 * request prev to be enabled first
775					 */
776					limit = list_prev_entry(next, chain_node);
777					break;
778				}
779			}
780
781			list_for_each_entry_from(next, &encoder->bridge_chain, chain_node) {
782				/* Call requested prev bridge pre_enable
783				 * in order.
784				 */
785				if (next == iter)
786					/* At the first bridge to request prev
787					 * bridges called first.
788					 */
789					break;
790
791				drm_atomic_bridge_call_pre_enable(next, old_state);
792			}
793		}
794
795		drm_atomic_bridge_call_pre_enable(iter, old_state);
796
797		if (iter->pre_enable_prev_first)
798			/* Jump all bridges that we have already pre_enabled */
799			iter = limit;
800
801		if (iter == bridge)
802			break;
803	}
804}
805EXPORT_SYMBOL(drm_atomic_bridge_chain_pre_enable);
806
807/**
808 * drm_atomic_bridge_chain_enable - enables all bridges in the encoder chain
809 * @bridge: bridge control structure
810 * @old_state: old atomic state
811 *
812 * Calls &drm_bridge_funcs.atomic_enable (falls back on
813 * &drm_bridge_funcs.enable) op for all the bridges in the encoder chain,
814 * starting from the first bridge to the last. These are called after completing
815 * &drm_encoder_helper_funcs.atomic_enable
816 *
817 * Note: the bridge passed should be the one closest to the encoder
818 */
819void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
820				    struct drm_atomic_state *old_state)
821{
822	struct drm_encoder *encoder;
823
824	if (!bridge)
825		return;
826
827	encoder = bridge->encoder;
828	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
829		if (bridge->funcs->atomic_enable) {
830			struct drm_bridge_state *old_bridge_state;
831
832			old_bridge_state =
833				drm_atomic_get_old_bridge_state(old_state,
834								bridge);
835			if (WARN_ON(!old_bridge_state))
836				return;
837
838			bridge->funcs->atomic_enable(bridge, old_bridge_state);
839		} else if (bridge->funcs->enable) {
840			bridge->funcs->enable(bridge);
841		}
842	}
843}
844EXPORT_SYMBOL(drm_atomic_bridge_chain_enable);
845
846static int drm_atomic_bridge_check(struct drm_bridge *bridge,
847				   struct drm_crtc_state *crtc_state,
848				   struct drm_connector_state *conn_state)
849{
850	if (bridge->funcs->atomic_check) {
851		struct drm_bridge_state *bridge_state;
852		int ret;
853
854		bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
855							       bridge);
856		if (WARN_ON(!bridge_state))
857			return -EINVAL;
858
859		ret = bridge->funcs->atomic_check(bridge, bridge_state,
860						  crtc_state, conn_state);
861		if (ret)
862			return ret;
863	} else if (bridge->funcs->mode_fixup) {
864		if (!bridge->funcs->mode_fixup(bridge, &crtc_state->mode,
865					       &crtc_state->adjusted_mode))
866			return -EINVAL;
867	}
868
869	return 0;
870}
871
872static int select_bus_fmt_recursive(struct drm_bridge *first_bridge,
873				    struct drm_bridge *cur_bridge,
874				    struct drm_crtc_state *crtc_state,
875				    struct drm_connector_state *conn_state,
876				    u32 out_bus_fmt)
877{
878	unsigned int i, num_in_bus_fmts = 0;
879	struct drm_bridge_state *cur_state;
880	struct drm_bridge *prev_bridge;
881	u32 *in_bus_fmts;
882	int ret;
883
884	prev_bridge = drm_bridge_get_prev_bridge(cur_bridge);
885	cur_state = drm_atomic_get_new_bridge_state(crtc_state->state,
886						    cur_bridge);
887
888	/*
889	 * If bus format negotiation is not supported by this bridge, let's
890	 * pass MEDIA_BUS_FMT_FIXED to the previous bridge in the chain and
891	 * hope that it can handle this situation gracefully (by providing
892	 * appropriate default values).
893	 */
894	if (!cur_bridge->funcs->atomic_get_input_bus_fmts) {
895		if (cur_bridge != first_bridge) {
896			ret = select_bus_fmt_recursive(first_bridge,
897						       prev_bridge, crtc_state,
898						       conn_state,
899						       MEDIA_BUS_FMT_FIXED);
900			if (ret)
901				return ret;
902		}
903
904		/*
905		 * Driver does not implement the atomic state hooks, but that's
906		 * fine, as long as it does not access the bridge state.
907		 */
908		if (cur_state) {
909			cur_state->input_bus_cfg.format = MEDIA_BUS_FMT_FIXED;
910			cur_state->output_bus_cfg.format = out_bus_fmt;
911		}
912
913		return 0;
914	}
915
916	/*
917	 * If the driver implements ->atomic_get_input_bus_fmts() it
918	 * should also implement the atomic state hooks.
919	 */
920	if (WARN_ON(!cur_state))
921		return -EINVAL;
922
923	in_bus_fmts = cur_bridge->funcs->atomic_get_input_bus_fmts(cur_bridge,
924							cur_state,
925							crtc_state,
926							conn_state,
927							out_bus_fmt,
928							&num_in_bus_fmts);
929	if (!num_in_bus_fmts)
930		return -ENOTSUPP;
931	else if (!in_bus_fmts)
932		return -ENOMEM;
933
934	if (first_bridge == cur_bridge) {
935		cur_state->input_bus_cfg.format = in_bus_fmts[0];
936		cur_state->output_bus_cfg.format = out_bus_fmt;
937		kfree(in_bus_fmts);
938		return 0;
939	}
940
941	for (i = 0; i < num_in_bus_fmts; i++) {
942		ret = select_bus_fmt_recursive(first_bridge, prev_bridge,
943					       crtc_state, conn_state,
944					       in_bus_fmts[i]);
945		if (ret != -ENOTSUPP)
946			break;
947	}
948
949	if (!ret) {
950		cur_state->input_bus_cfg.format = in_bus_fmts[i];
951		cur_state->output_bus_cfg.format = out_bus_fmt;
952	}
953
954	kfree(in_bus_fmts);
955	return ret;
956}
957
958/*
959 * This function is called by &drm_atomic_bridge_chain_check() just before
960 * calling &drm_bridge_funcs.atomic_check() on all elements of the chain.
961 * It performs bus format negotiation between bridge elements. The negotiation
962 * happens in reverse order, starting from the last element in the chain up to
963 * @bridge.
964 *
965 * Negotiation starts by retrieving supported output bus formats on the last
966 * bridge element and testing them one by one. The test is recursive, meaning
967 * that for each tested output format, the whole chain will be walked backward,
968 * and each element will have to choose an input bus format that can be
969 * transcoded to the requested output format. When a bridge element does not
970 * support transcoding into a specific output format -ENOTSUPP is returned and
971 * the next bridge element will have to try a different format. If none of the
972 * combinations worked, -ENOTSUPP is returned and the atomic modeset will fail.
973 *
974 * This implementation is relying on
975 * &drm_bridge_funcs.atomic_get_output_bus_fmts() and
976 * &drm_bridge_funcs.atomic_get_input_bus_fmts() to gather supported
977 * input/output formats.
978 *
979 * When &drm_bridge_funcs.atomic_get_output_bus_fmts() is not implemented by
980 * the last element of the chain, &drm_atomic_bridge_chain_select_bus_fmts()
981 * tries a single format: &drm_connector.display_info.bus_formats[0] if
982 * available, MEDIA_BUS_FMT_FIXED otherwise.
983 *
984 * When &drm_bridge_funcs.atomic_get_input_bus_fmts() is not implemented,
985 * &drm_atomic_bridge_chain_select_bus_fmts() skips the negotiation on the
986 * bridge element that lacks this hook and asks the previous element in the
987 * chain to try MEDIA_BUS_FMT_FIXED. It's up to bridge drivers to decide what
988 * to do in that case (fail if they want to enforce bus format negotiation, or
989 * provide a reasonable default if they need to support pipelines where not
990 * all elements support bus format negotiation).
991 */
992static int
993drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge,
994					struct drm_crtc_state *crtc_state,
995					struct drm_connector_state *conn_state)
996{
997	struct drm_connector *conn = conn_state->connector;
998	struct drm_encoder *encoder = bridge->encoder;
999	struct drm_bridge_state *last_bridge_state;
1000	unsigned int i, num_out_bus_fmts = 0;
1001	struct drm_bridge *last_bridge;
1002	u32 *out_bus_fmts;
1003	int ret = 0;
1004
1005	last_bridge = list_last_entry(&encoder->bridge_chain,
1006				      struct drm_bridge, chain_node);
1007	last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
1008							    last_bridge);
1009
1010	if (last_bridge->funcs->atomic_get_output_bus_fmts) {
1011		const struct drm_bridge_funcs *funcs = last_bridge->funcs;
1012
1013		/*
1014		 * If the driver implements ->atomic_get_output_bus_fmts() it
1015		 * should also implement the atomic state hooks.
1016		 */
1017		if (WARN_ON(!last_bridge_state))
1018			return -EINVAL;
1019
1020		out_bus_fmts = funcs->atomic_get_output_bus_fmts(last_bridge,
1021							last_bridge_state,
1022							crtc_state,
1023							conn_state,
1024							&num_out_bus_fmts);
1025		if (!num_out_bus_fmts)
1026			return -ENOTSUPP;
1027		else if (!out_bus_fmts)
1028			return -ENOMEM;
1029	} else {
1030		num_out_bus_fmts = 1;
1031		out_bus_fmts = kmalloc(sizeof(*out_bus_fmts), GFP_KERNEL);
1032		if (!out_bus_fmts)
1033			return -ENOMEM;
1034
1035		if (conn->display_info.num_bus_formats &&
1036		    conn->display_info.bus_formats)
1037			out_bus_fmts[0] = conn->display_info.bus_formats[0];
1038		else
1039			out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED;
1040	}
1041
1042	for (i = 0; i < num_out_bus_fmts; i++) {
1043		ret = select_bus_fmt_recursive(bridge, last_bridge, crtc_state,
1044					       conn_state, out_bus_fmts[i]);
1045		if (ret != -ENOTSUPP)
1046			break;
1047	}
1048
1049	kfree(out_bus_fmts);
1050
1051	return ret;
1052}
1053
1054static void
1055drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge,
1056				      struct drm_connector *conn,
1057				      struct drm_atomic_state *state)
1058{
1059	struct drm_bridge_state *bridge_state, *next_bridge_state;
1060	struct drm_bridge *next_bridge;
1061	u32 output_flags = 0;
1062
1063	bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
1064
1065	/* No bridge state attached to this bridge => nothing to propagate. */
1066	if (!bridge_state)
1067		return;
1068
1069	next_bridge = drm_bridge_get_next_bridge(bridge);
1070
1071	/*
1072	 * Let's try to apply the most common case here, that is, propagate
1073	 * display_info flags for the last bridge, and propagate the input
1074	 * flags of the next bridge element to the output end of the current
1075	 * bridge when the bridge is not the last one.
1076	 * There are exceptions to this rule, like when signal inversion is
1077	 * happening at the board level, but that's something drivers can deal
1078	 * with from their &drm_bridge_funcs.atomic_check() implementation by
1079	 * simply overriding the flags value we've set here.
1080	 */
1081	if (!next_bridge) {
1082		output_flags = conn->display_info.bus_flags;
1083	} else {
1084		next_bridge_state = drm_atomic_get_new_bridge_state(state,
1085								next_bridge);
1086		/*
1087		 * No bridge state attached to the next bridge, just leave the
1088		 * flags to 0.
1089		 */
1090		if (next_bridge_state)
1091			output_flags = next_bridge_state->input_bus_cfg.flags;
1092	}
1093
1094	bridge_state->output_bus_cfg.flags = output_flags;
1095
1096	/*
1097	 * Propagate the output flags to the input end of the bridge. Again, it's
1098	 * not necessarily what all bridges want, but that's what most of them
1099	 * do, and by doing that by default we avoid forcing drivers to
1100	 * duplicate the "dummy propagation" logic.
1101	 */
1102	bridge_state->input_bus_cfg.flags = output_flags;
1103}
1104
1105/**
1106 * drm_atomic_bridge_chain_check() - Do an atomic check on the bridge chain
1107 * @bridge: bridge control structure
1108 * @crtc_state: new CRTC state
1109 * @conn_state: new connector state
1110 *
1111 * First trigger a bus format negotiation before calling
1112 * &drm_bridge_funcs.atomic_check() (falls back on
1113 * &drm_bridge_funcs.mode_fixup()) op for all the bridges in the encoder chain,
1114 * starting from the last bridge to the first. These are called before calling
1115 * &drm_encoder_helper_funcs.atomic_check()
1116 *
1117 * RETURNS:
1118 * 0 on success, a negative error code on failure
1119 */
1120int drm_atomic_bridge_chain_check(struct drm_bridge *bridge,
1121				  struct drm_crtc_state *crtc_state,
1122				  struct drm_connector_state *conn_state)
1123{
1124	struct drm_connector *conn = conn_state->connector;
1125	struct drm_encoder *encoder;
1126	struct drm_bridge *iter;
1127	int ret;
1128
1129	if (!bridge)
1130		return 0;
1131
1132	ret = drm_atomic_bridge_chain_select_bus_fmts(bridge, crtc_state,
1133						      conn_state);
1134	if (ret)
1135		return ret;
1136
1137	encoder = bridge->encoder;
1138	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
1139		int ret;
1140
1141		/*
1142		 * Bus flags are propagated by default. If a bridge needs to
1143		 * tweak the input bus flags for any reason, it should happen
1144		 * in its &drm_bridge_funcs.atomic_check() implementation such
1145		 * that preceding bridges in the chain can propagate the new
1146		 * bus flags.
1147		 */
1148		drm_atomic_bridge_propagate_bus_flags(iter, conn,
1149						      crtc_state->state);
1150
1151		ret = drm_atomic_bridge_check(iter, crtc_state, conn_state);
1152		if (ret)
1153			return ret;
1154
1155		if (iter == bridge)
1156			break;
1157	}
1158
1159	return 0;
1160}
1161EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
1162
1163/**
1164 * drm_bridge_detect - check if anything is attached to the bridge output
1165 * @bridge: bridge control structure
1166 *
1167 * If the bridge supports output detection, as reported by the
1168 * DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the
1169 * bridge and return the connection status. Otherwise return
1170 * connector_status_unknown.
1171 *
1172 * RETURNS:
1173 * The detection status on success, or connector_status_unknown if the bridge
1174 * doesn't support output detection.
1175 */
1176enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge)
1177{
1178	if (!(bridge->ops & DRM_BRIDGE_OP_DETECT))
1179		return connector_status_unknown;
1180
1181	return bridge->funcs->detect(bridge);
1182}
1183EXPORT_SYMBOL_GPL(drm_bridge_detect);
1184
1185/**
1186 * drm_bridge_get_modes - fill all modes currently valid for the sink into the
1187 * @connector
1188 * @bridge: bridge control structure
1189 * @connector: the connector to fill with modes
1190 *
1191 * If the bridge supports output modes retrieval, as reported by the
1192 * DRM_BRIDGE_OP_MODES bridge ops flag, call &drm_bridge_funcs.get_modes to
1193 * fill the connector with all valid modes and return the number of modes
1194 * added. Otherwise return 0.
1195 *
1196 * RETURNS:
1197 * The number of modes added to the connector.
1198 */
1199int drm_bridge_get_modes(struct drm_bridge *bridge,
1200			 struct drm_connector *connector)
1201{
1202	if (!(bridge->ops & DRM_BRIDGE_OP_MODES))
1203		return 0;
1204
1205	return bridge->funcs->get_modes(bridge, connector);
1206}
1207EXPORT_SYMBOL_GPL(drm_bridge_get_modes);
1208
1209/**
1210 * drm_bridge_get_edid - get the EDID data of the connected display
1211 * @bridge: bridge control structure
1212 * @connector: the connector to read EDID for
1213 *
1214 * If the bridge supports output EDID retrieval, as reported by the
1215 * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.get_edid to
1216 * get the EDID and return it. Otherwise return NULL.
1217 *
1218 * RETURNS:
1219 * The retrieved EDID on success, or NULL otherwise.
1220 */
1221struct edid *drm_bridge_get_edid(struct drm_bridge *bridge,
1222				 struct drm_connector *connector)
1223{
1224	if (!(bridge->ops & DRM_BRIDGE_OP_EDID))
1225		return NULL;
1226
1227	return bridge->funcs->get_edid(bridge, connector);
1228}
1229EXPORT_SYMBOL_GPL(drm_bridge_get_edid);
1230
1231/**
1232 * drm_bridge_hpd_enable - enable hot plug detection for the bridge
1233 * @bridge: bridge control structure
1234 * @cb: hot-plug detection callback
1235 * @data: data to be passed to the hot-plug detection callback
1236 *
1237 * Call &drm_bridge_funcs.hpd_enable if implemented and register the given @cb
1238 * and @data as hot plug notification callback. From now on the @cb will be
1239 * called with @data when an output status change is detected by the bridge,
1240 * until hot plug notification gets disabled with drm_bridge_hpd_disable().
1241 *
1242 * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
1243 * bridge->ops. This function shall not be called when the flag is not set.
1244 *
1245 * Only one hot plug detection callback can be registered at a time, it is an
1246 * error to call this function when hot plug detection is already enabled for
1247 * the bridge.
1248 */
1249void drm_bridge_hpd_enable(struct drm_bridge *bridge,
1250			   void (*cb)(void *data,
1251				      enum drm_connector_status status),
1252			   void *data)
1253{
1254	if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
1255		return;
1256
1257	mutex_lock(&bridge->hpd_mutex);
1258
1259	if (WARN(bridge->hpd_cb, "Hot plug detection already enabled\n"))
1260		goto unlock;
1261
1262	bridge->hpd_cb = cb;
1263	bridge->hpd_data = data;
1264
1265	if (bridge->funcs->hpd_enable)
1266		bridge->funcs->hpd_enable(bridge);
1267
1268unlock:
1269	mutex_unlock(&bridge->hpd_mutex);
1270}
1271EXPORT_SYMBOL_GPL(drm_bridge_hpd_enable);
1272
1273/**
1274 * drm_bridge_hpd_disable - disable hot plug detection for the bridge
1275 * @bridge: bridge control structure
1276 *
1277 * Call &drm_bridge_funcs.hpd_disable if implemented and unregister the hot
1278 * plug detection callback previously registered with drm_bridge_hpd_enable().
1279 * Once this function returns the callback will not be called by the bridge
1280 * when an output status change occurs.
1281 *
1282 * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
1283 * bridge->ops. This function shall not be called when the flag is not set.
1284 */
1285void drm_bridge_hpd_disable(struct drm_bridge *bridge)
1286{
1287	if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
1288		return;
1289
1290	mutex_lock(&bridge->hpd_mutex);
1291	if (bridge->funcs->hpd_disable)
1292		bridge->funcs->hpd_disable(bridge);
1293
1294	bridge->hpd_cb = NULL;
1295	bridge->hpd_data = NULL;
1296	mutex_unlock(&bridge->hpd_mutex);
1297}
1298EXPORT_SYMBOL_GPL(drm_bridge_hpd_disable);
1299
1300/**
1301 * drm_bridge_hpd_notify - notify hot plug detection events
1302 * @bridge: bridge control structure
1303 * @status: output connection status
1304 *
1305 * Bridge drivers shall call this function to report hot plug events when they
1306 * detect a change in the output status, when hot plug detection has been
1307 * enabled by drm_bridge_hpd_enable().
1308 *
1309 * This function shall be called in a context that can sleep.
1310 */
1311void drm_bridge_hpd_notify(struct drm_bridge *bridge,
1312			   enum drm_connector_status status)
1313{
1314	mutex_lock(&bridge->hpd_mutex);
1315	if (bridge->hpd_cb)
1316		bridge->hpd_cb(bridge->hpd_data, status);
1317	mutex_unlock(&bridge->hpd_mutex);
1318}
1319EXPORT_SYMBOL_GPL(drm_bridge_hpd_notify);
1320
1321#ifdef CONFIG_OF
1322/**
1323 * of_drm_find_bridge - find the bridge corresponding to the device node in
1324 *			the global bridge list
1325 *
1326 * @np: device node
1327 *
1328 * RETURNS:
1329 * drm_bridge control struct on success, NULL on failure
1330 */
1331struct drm_bridge *of_drm_find_bridge(struct device_node *np)
1332{
1333	struct drm_bridge *bridge;
1334
1335	mutex_lock(&bridge_lock);
1336
1337	list_for_each_entry(bridge, &bridge_list, list) {
1338		if (bridge->of_node == np) {
1339			mutex_unlock(&bridge_lock);
1340			return bridge;
1341		}
1342	}
1343
1344	mutex_unlock(&bridge_lock);
1345	return NULL;
1346}
1347EXPORT_SYMBOL(of_drm_find_bridge);
1348#endif
1349
1350#ifdef CONFIG_DEBUG_FS
1351static int drm_bridge_chains_info(struct seq_file *m, void *data)
1352{
1353	struct drm_debugfs_entry *entry = m->private;
1354	struct drm_device *dev = entry->dev;
1355	struct drm_printer p = drm_seq_file_printer(m);
1356	struct drm_mode_config *config = &dev->mode_config;
1357	struct drm_encoder *encoder;
1358	unsigned int bridge_idx = 0;
1359
1360	list_for_each_entry(encoder, &config->encoder_list, head) {
1361		struct drm_bridge *bridge;
1362
1363		drm_printf(&p, "encoder[%u]\n", encoder->base.id);
1364
1365		drm_for_each_bridge_in_chain(encoder, bridge) {
1366			drm_printf(&p, "\tbridge[%u] type: %u, ops: %#x",
1367				   bridge_idx, bridge->type, bridge->ops);
1368
1369#ifdef CONFIG_OF
1370			if (bridge->of_node)
1371				drm_printf(&p, ", OF: %pOFfc", bridge->of_node);
1372#endif
1373
1374			drm_printf(&p, "\n");
1375
1376			bridge_idx++;
1377		}
1378	}
1379
1380	return 0;
1381}
1382
1383static const struct drm_debugfs_info drm_bridge_debugfs_list[] = {
1384	{ "bridge_chains", drm_bridge_chains_info, 0 },
1385};
1386
1387void drm_bridge_debugfs_init(struct drm_minor *minor)
1388{
1389	drm_debugfs_add_files(minor->dev, drm_bridge_debugfs_list,
1390			      ARRAY_SIZE(drm_bridge_debugfs_list));
1391}
1392#endif
1393
1394MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>");
1395MODULE_DESCRIPTION("DRM bridge infrastructure");
1396MODULE_LICENSE("GPL and additional rights");
1397