1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #endif
103 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
105 
106 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
107 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
108 
109 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
110 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
111 
112 /* Number of bytes in PSP header for firmware. */
113 #define PSP_HEADER_BYTES 0x100
114 
115 /* Number of bytes in PSP footer for firmware. */
116 #define PSP_FOOTER_BYTES 0x100
117 
118 /**
119  * DOC: overview
120  *
121  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
122  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
123  * requests into DC requests, and DC responses into DRM responses.
124  *
125  * The root control structure is &struct amdgpu_display_manager.
126  */
127 
128 /* basic init/fini API */
129 static int amdgpu_dm_init(struct amdgpu_device *adev);
130 static void amdgpu_dm_fini(struct amdgpu_device *adev);
131 
get_subconnector_type(struct dc_link *link)132 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
133 {
134 	switch (link->dpcd_caps.dongle_type) {
135 	case DISPLAY_DONGLE_NONE:
136 		return DRM_MODE_SUBCONNECTOR_Native;
137 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
138 		return DRM_MODE_SUBCONNECTOR_VGA;
139 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
140 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
141 		return DRM_MODE_SUBCONNECTOR_DVID;
142 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
143 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
144 		return DRM_MODE_SUBCONNECTOR_HDMIA;
145 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
146 	default:
147 		return DRM_MODE_SUBCONNECTOR_Unknown;
148 	}
149 }
150 
update_subconnector_property(struct amdgpu_dm_connector *aconnector)151 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
152 {
153 	struct dc_link *link = aconnector->dc_link;
154 	struct drm_connector *connector = &aconnector->base;
155 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
156 
157 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
158 		return;
159 
160 	if (aconnector->dc_sink)
161 		subconnector = get_subconnector_type(link);
162 
163 	drm_object_property_set_value(&connector->base,
164 			connector->dev->mode_config.dp_subconnector_property,
165 			subconnector);
166 }
167 
168 /*
169  * initializes drm_device display related structures, based on the information
170  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
171  * drm_encoder, drm_mode_config
172  *
173  * Returns 0 on success
174  */
175 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
176 /* removes and deallocates the drm structures, created by the above function */
177 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
178 
179 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
180 				struct drm_plane *plane,
181 				unsigned long possible_crtcs,
182 				const struct dc_plane_cap *plane_cap);
183 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
184 			       struct drm_plane *plane,
185 			       uint32_t link_index);
186 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
187 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
188 				    uint32_t link_index,
189 				    struct amdgpu_encoder *amdgpu_encoder);
190 static int amdgpu_dm_encoder_init(struct drm_device *dev,
191 				  struct amdgpu_encoder *aencoder,
192 				  uint32_t link_index);
193 
194 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
195 
196 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
197 				   struct drm_atomic_state *state,
198 				   bool nonblock);
199 
200 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
201 
202 static int amdgpu_dm_atomic_check(struct drm_device *dev,
203 				  struct drm_atomic_state *state);
204 
205 static void handle_cursor_update(struct drm_plane *plane,
206 				 struct drm_plane_state *old_plane_state);
207 
208 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
209 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
212 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
213 
214 /*
215  * dm_vblank_get_counter
216  *
217  * @brief
218  * Get counter for number of vertical blanks
219  *
220  * @param
221  * struct amdgpu_device *adev - [in] desired amdgpu device
222  * int disp_idx - [in] which CRTC to get the counter from
223  *
224  * @return
225  * Counter for vertical blanks
226  */
dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)227 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
228 {
229 	if (crtc >= adev->mode_info.num_crtc)
230 		return 0;
231 	else {
232 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
233 
234 		if (acrtc->dm_irq_params.stream == NULL) {
235 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
236 				  crtc);
237 			return 0;
238 		}
239 
240 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
241 	}
242 }
243 
dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, u32 *vbl, u32 *position)244 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
245 				  u32 *vbl, u32 *position)
246 {
247 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
248 
249 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
250 		return -EINVAL;
251 	else {
252 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
253 
254 		if (acrtc->dm_irq_params.stream ==  NULL) {
255 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
256 				  crtc);
257 			return 0;
258 		}
259 
260 		/*
261 		 * TODO rework base driver to use values directly.
262 		 * for now parse it back into reg-format
263 		 */
264 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
265 					 &v_blank_start,
266 					 &v_blank_end,
267 					 &h_position,
268 					 &v_position);
269 
270 		*position = v_position | (h_position << 16);
271 		*vbl = v_blank_start | (v_blank_end << 16);
272 	}
273 
274 	return 0;
275 }
276 
dm_is_idle(void *handle)277 static bool dm_is_idle(void *handle)
278 {
279 	/* XXX todo */
280 	return true;
281 }
282 
dm_wait_for_idle(void *handle)283 static int dm_wait_for_idle(void *handle)
284 {
285 	/* XXX todo */
286 	return 0;
287 }
288 
dm_check_soft_reset(void *handle)289 static bool dm_check_soft_reset(void *handle)
290 {
291 	return false;
292 }
293 
dm_soft_reset(void *handle)294 static int dm_soft_reset(void *handle)
295 {
296 	/* XXX todo */
297 	return 0;
298 }
299 
300 static struct amdgpu_crtc *
get_crtc_by_otg_inst(struct amdgpu_device *adev, int otg_inst)301 get_crtc_by_otg_inst(struct amdgpu_device *adev,
302 		     int otg_inst)
303 {
304 	struct drm_device *dev = adev_to_drm(adev);
305 	struct drm_crtc *crtc;
306 	struct amdgpu_crtc *amdgpu_crtc;
307 
308 	if (otg_inst == -1) {
309 		WARN_ON(1);
310 		return adev->mode_info.crtcs[0];
311 	}
312 
313 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
314 		amdgpu_crtc = to_amdgpu_crtc(crtc);
315 
316 		if (amdgpu_crtc->otg_inst == otg_inst)
317 			return amdgpu_crtc;
318 	}
319 
320 	return NULL;
321 }
322 
amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)323 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
324 {
325 	return acrtc->dm_irq_params.freesync_config.state ==
326 		       VRR_STATE_ACTIVE_VARIABLE ||
327 	       acrtc->dm_irq_params.freesync_config.state ==
328 		       VRR_STATE_ACTIVE_FIXED;
329 }
330 
amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)331 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
332 {
333 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
334 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
335 }
336 
337 /**
338  * dm_pflip_high_irq() - Handle pageflip interrupt
339  * @interrupt_params: ignored
340  *
341  * Handles the pageflip interrupt by notifying all interested parties
342  * that the pageflip has been completed.
343  */
dm_pflip_high_irq(void *interrupt_params)344 static void dm_pflip_high_irq(void *interrupt_params)
345 {
346 	struct amdgpu_crtc *amdgpu_crtc;
347 	struct common_irq_params *irq_params = interrupt_params;
348 	struct amdgpu_device *adev = irq_params->adev;
349 	unsigned long flags;
350 	struct drm_pending_vblank_event *e;
351 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
352 	bool vrr_active;
353 
354 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
355 
356 	/* IRQ could occur when in initial stage */
357 	/* TODO work and BO cleanup */
358 	if (amdgpu_crtc == NULL) {
359 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
360 		return;
361 	}
362 
363 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
364 
365 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
366 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
367 						 amdgpu_crtc->pflip_status,
368 						 AMDGPU_FLIP_SUBMITTED,
369 						 amdgpu_crtc->crtc_id,
370 						 amdgpu_crtc);
371 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
372 		return;
373 	}
374 
375 	/* page flip completed. */
376 	e = amdgpu_crtc->event;
377 	amdgpu_crtc->event = NULL;
378 
379 	if (!e)
380 		WARN_ON(1);
381 
382 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
383 
384 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
385 	if (!vrr_active ||
386 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
387 				      &v_blank_end, &hpos, &vpos) ||
388 	    (vpos < v_blank_start)) {
389 		/* Update to correct count and vblank timestamp if racing with
390 		 * vblank irq. This also updates to the correct vblank timestamp
391 		 * even in VRR mode, as scanout is past the front-porch atm.
392 		 */
393 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
394 
395 		/* Wake up userspace by sending the pageflip event with proper
396 		 * count and timestamp of vblank of flip completion.
397 		 */
398 		if (e) {
399 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
400 
401 			/* Event sent, so done with vblank for this flip */
402 			drm_crtc_vblank_put(&amdgpu_crtc->base);
403 		}
404 	} else if (e) {
405 		/* VRR active and inside front-porch: vblank count and
406 		 * timestamp for pageflip event will only be up to date after
407 		 * drm_crtc_handle_vblank() has been executed from late vblank
408 		 * irq handler after start of back-porch (vline 0). We queue the
409 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
410 		 * updated timestamp and count, once it runs after us.
411 		 *
412 		 * We need to open-code this instead of using the helper
413 		 * drm_crtc_arm_vblank_event(), as that helper would
414 		 * call drm_crtc_accurate_vblank_count(), which we must
415 		 * not call in VRR mode while we are in front-porch!
416 		 */
417 
418 		/* sequence will be replaced by real count during send-out. */
419 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
420 		e->pipe = amdgpu_crtc->crtc_id;
421 
422 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
423 		e = NULL;
424 	}
425 
426 	/* Keep track of vblank of this flip for flip throttling. We use the
427 	 * cooked hw counter, as that one incremented at start of this vblank
428 	 * of pageflip completion, so last_flip_vblank is the forbidden count
429 	 * for queueing new pageflips if vsync + VRR is enabled.
430 	 */
431 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
432 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
433 
434 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
435 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
436 
437 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
438 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
439 			 vrr_active, (int) !e);
440 }
441 
dm_vupdate_high_irq(void *interrupt_params)442 static void dm_vupdate_high_irq(void *interrupt_params)
443 {
444 	struct common_irq_params *irq_params = interrupt_params;
445 	struct amdgpu_device *adev = irq_params->adev;
446 	struct amdgpu_crtc *acrtc;
447 	unsigned long flags;
448 	int vrr_active;
449 
450 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
451 
452 	if (acrtc) {
453 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
454 
455 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
456 			      acrtc->crtc_id,
457 			      vrr_active);
458 
459 		/* Core vblank handling is done here after end of front-porch in
460 		 * vrr mode, as vblank timestamping will give valid results
461 		 * while now done after front-porch. This will also deliver
462 		 * page-flip completion events that have been queued to us
463 		 * if a pageflip happened inside front-porch.
464 		 */
465 		if (vrr_active) {
466 			drm_crtc_handle_vblank(&acrtc->base);
467 
468 			/* BTR processing for pre-DCE12 ASICs */
469 			if (acrtc->dm_irq_params.stream &&
470 			    adev->family < AMDGPU_FAMILY_AI) {
471 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
472 				mod_freesync_handle_v_update(
473 				    adev->dm.freesync_module,
474 				    acrtc->dm_irq_params.stream,
475 				    &acrtc->dm_irq_params.vrr_params);
476 
477 				dc_stream_adjust_vmin_vmax(
478 				    adev->dm.dc,
479 				    acrtc->dm_irq_params.stream,
480 				    &acrtc->dm_irq_params.vrr_params.adjust);
481 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
482 			}
483 		}
484 	}
485 }
486 
487 /**
488  * dm_crtc_high_irq() - Handles CRTC interrupt
489  * @interrupt_params: used for determining the CRTC instance
490  *
491  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
492  * event handler.
493  */
dm_crtc_high_irq(void *interrupt_params)494 static void dm_crtc_high_irq(void *interrupt_params)
495 {
496 	struct common_irq_params *irq_params = interrupt_params;
497 	struct amdgpu_device *adev = irq_params->adev;
498 	struct amdgpu_crtc *acrtc;
499 	unsigned long flags;
500 	int vrr_active;
501 
502 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
503 	if (!acrtc)
504 		return;
505 
506 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
507 
508 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
509 		      vrr_active, acrtc->dm_irq_params.active_planes);
510 
511 	/**
512 	 * Core vblank handling at start of front-porch is only possible
513 	 * in non-vrr mode, as only there vblank timestamping will give
514 	 * valid results while done in front-porch. Otherwise defer it
515 	 * to dm_vupdate_high_irq after end of front-porch.
516 	 */
517 	if (!vrr_active)
518 		drm_crtc_handle_vblank(&acrtc->base);
519 
520 	/**
521 	 * Following stuff must happen at start of vblank, for crc
522 	 * computation and below-the-range btr support in vrr mode.
523 	 */
524 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
525 
526 	/* BTR updates need to happen before VUPDATE on Vega and above. */
527 	if (adev->family < AMDGPU_FAMILY_AI)
528 		return;
529 
530 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
531 
532 	if (acrtc->dm_irq_params.stream &&
533 	    acrtc->dm_irq_params.vrr_params.supported &&
534 	    acrtc->dm_irq_params.freesync_config.state ==
535 		    VRR_STATE_ACTIVE_VARIABLE) {
536 		mod_freesync_handle_v_update(adev->dm.freesync_module,
537 					     acrtc->dm_irq_params.stream,
538 					     &acrtc->dm_irq_params.vrr_params);
539 
540 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
541 					   &acrtc->dm_irq_params.vrr_params.adjust);
542 	}
543 
544 	/*
545 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
546 	 * In that case, pageflip completion interrupts won't fire and pageflip
547 	 * completion events won't get delivered. Prevent this by sending
548 	 * pending pageflip events from here if a flip is still pending.
549 	 *
550 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
551 	 * avoid race conditions between flip programming and completion,
552 	 * which could cause too early flip completion events.
553 	 */
554 	if (adev->family >= AMDGPU_FAMILY_RV &&
555 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
556 	    acrtc->dm_irq_params.active_planes == 0) {
557 		if (acrtc->event) {
558 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
559 			acrtc->event = NULL;
560 			drm_crtc_vblank_put(&acrtc->base);
561 		}
562 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
563 	}
564 
565 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
566 }
567 
dm_set_clockgating_state(void *handle, enum amd_clockgating_state state)568 static int dm_set_clockgating_state(void *handle,
569 		  enum amd_clockgating_state state)
570 {
571 	return 0;
572 }
573 
dm_set_powergating_state(void *handle, enum amd_powergating_state state)574 static int dm_set_powergating_state(void *handle,
575 		  enum amd_powergating_state state)
576 {
577 	return 0;
578 }
579 
580 /* Prototypes of private functions */
581 static int dm_early_init(void* handle);
582 
583 /* Allocate memory for FBC compressed data  */
amdgpu_dm_fbc_init(struct drm_connector *connector)584 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
585 {
586 	struct drm_device *dev = connector->dev;
587 	struct amdgpu_device *adev = drm_to_adev(dev);
588 	struct dm_compressor_info *compressor = &adev->dm.compressor;
589 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
590 	struct drm_display_mode *mode;
591 	unsigned long max_size = 0;
592 
593 	if (adev->dm.dc->fbc_compressor == NULL)
594 		return;
595 
596 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
597 		return;
598 
599 	if (compressor->bo_ptr)
600 		return;
601 
602 
603 	list_for_each_entry(mode, &connector->modes, head) {
604 		if (max_size < mode->htotal * mode->vtotal)
605 			max_size = mode->htotal * mode->vtotal;
606 	}
607 
608 	if (max_size) {
609 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
610 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
611 			    &compressor->gpu_addr, &compressor->cpu_addr);
612 
613 		if (r)
614 			DRM_ERROR("DM: Failed to initialize FBC\n");
615 		else {
616 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
617 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
618 		}
619 
620 	}
621 
622 }
623 
amdgpu_dm_audio_component_get_eld(struct device *kdev, int port, int pipe, bool *enabled, unsigned char *buf, int max_bytes)624 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
625 					  int pipe, bool *enabled,
626 					  unsigned char *buf, int max_bytes)
627 {
628 	struct drm_device *dev = dev_get_drvdata(kdev);
629 	struct amdgpu_device *adev = drm_to_adev(dev);
630 	struct drm_connector *connector;
631 	struct drm_connector_list_iter conn_iter;
632 	struct amdgpu_dm_connector *aconnector;
633 	int ret = 0;
634 
635 	*enabled = false;
636 
637 	mutex_lock(&adev->dm.audio_lock);
638 
639 	drm_connector_list_iter_begin(dev, &conn_iter);
640 	drm_for_each_connector_iter(connector, &conn_iter) {
641 		aconnector = to_amdgpu_dm_connector(connector);
642 		if (aconnector->audio_inst != port)
643 			continue;
644 
645 		*enabled = true;
646 		ret = drm_eld_size(connector->eld);
647 		memcpy(buf, connector->eld, min(max_bytes, ret));
648 
649 		break;
650 	}
651 	drm_connector_list_iter_end(&conn_iter);
652 
653 	mutex_unlock(&adev->dm.audio_lock);
654 
655 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
656 
657 	return ret;
658 }
659 
660 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
661 	.get_eld = amdgpu_dm_audio_component_get_eld,
662 };
663 
amdgpu_dm_audio_component_bind(struct device *kdev, struct device *hda_kdev, void *data)664 static int amdgpu_dm_audio_component_bind(struct device *kdev,
665 				       struct device *hda_kdev, void *data)
666 {
667 	struct drm_device *dev = dev_get_drvdata(kdev);
668 	struct amdgpu_device *adev = drm_to_adev(dev);
669 	struct drm_audio_component *acomp = data;
670 
671 	acomp->ops = &amdgpu_dm_audio_component_ops;
672 	acomp->dev = kdev;
673 	adev->dm.audio_component = acomp;
674 
675 	return 0;
676 }
677 
amdgpu_dm_audio_component_unbind(struct device *kdev, struct device *hda_kdev, void *data)678 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
679 					  struct device *hda_kdev, void *data)
680 {
681 	struct drm_device *dev = dev_get_drvdata(kdev);
682 	struct amdgpu_device *adev = drm_to_adev(dev);
683 	struct drm_audio_component *acomp = data;
684 
685 	acomp->ops = NULL;
686 	acomp->dev = NULL;
687 	adev->dm.audio_component = NULL;
688 }
689 
690 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
691 	.bind	= amdgpu_dm_audio_component_bind,
692 	.unbind	= amdgpu_dm_audio_component_unbind,
693 };
694 
amdgpu_dm_audio_init(struct amdgpu_device *adev)695 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
696 {
697 	int i, ret;
698 
699 	if (!amdgpu_audio)
700 		return 0;
701 
702 	adev->mode_info.audio.enabled = true;
703 
704 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
705 
706 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
707 		adev->mode_info.audio.pin[i].channels = -1;
708 		adev->mode_info.audio.pin[i].rate = -1;
709 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
710 		adev->mode_info.audio.pin[i].status_bits = 0;
711 		adev->mode_info.audio.pin[i].category_code = 0;
712 		adev->mode_info.audio.pin[i].connected = false;
713 		adev->mode_info.audio.pin[i].id =
714 			adev->dm.dc->res_pool->audios[i]->inst;
715 		adev->mode_info.audio.pin[i].offset = 0;
716 	}
717 
718 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
719 	if (ret < 0)
720 		return ret;
721 
722 	adev->dm.audio_registered = true;
723 
724 	return 0;
725 }
726 
amdgpu_dm_audio_fini(struct amdgpu_device *adev)727 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
728 {
729 	if (!amdgpu_audio)
730 		return;
731 
732 	if (!adev->mode_info.audio.enabled)
733 		return;
734 
735 	if (adev->dm.audio_registered) {
736 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
737 		adev->dm.audio_registered = false;
738 	}
739 
740 	/* TODO: Disable audio? */
741 
742 	adev->mode_info.audio.enabled = false;
743 }
744 
amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)745 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
746 {
747 	struct drm_audio_component *acomp = adev->dm.audio_component;
748 
749 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
750 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
751 
752 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
753 						 pin, -1);
754 	}
755 }
756 
dm_dmub_hw_init(struct amdgpu_device *adev)757 static int dm_dmub_hw_init(struct amdgpu_device *adev)
758 {
759 	const struct dmcub_firmware_header_v1_0 *hdr;
760 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
761 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
762 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
763 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
764 	struct abm *abm = adev->dm.dc->res_pool->abm;
765 	struct dmub_srv_hw_params hw_params;
766 	enum dmub_status status;
767 	const unsigned char *fw_inst_const, *fw_bss_data;
768 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
769 	bool has_hw_support;
770 
771 	if (!dmub_srv)
772 		/* DMUB isn't supported on the ASIC. */
773 		return 0;
774 
775 	if (!fb_info) {
776 		DRM_ERROR("No framebuffer info for DMUB service.\n");
777 		return -EINVAL;
778 	}
779 
780 	if (!dmub_fw) {
781 		/* Firmware required for DMUB support. */
782 		DRM_ERROR("No firmware provided for DMUB.\n");
783 		return -EINVAL;
784 	}
785 
786 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
787 	if (status != DMUB_STATUS_OK) {
788 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
789 		return -EINVAL;
790 	}
791 
792 	if (!has_hw_support) {
793 		DRM_INFO("DMUB unsupported on ASIC\n");
794 		return 0;
795 	}
796 
797 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
798 
799 	fw_inst_const = dmub_fw->data +
800 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
801 			PSP_HEADER_BYTES;
802 
803 	fw_bss_data = dmub_fw->data +
804 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
805 		      le32_to_cpu(hdr->inst_const_bytes);
806 
807 	/* Copy firmware and bios info into FB memory. */
808 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
809 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
810 
811 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
812 
813 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
814 	 * amdgpu_ucode_init_single_fw will load dmub firmware
815 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
816 	 * will be done by dm_dmub_hw_init
817 	 */
818 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
819 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
820 				fw_inst_const_size);
821 	}
822 
823 	if (fw_bss_data_size)
824 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
825 		       fw_bss_data, fw_bss_data_size);
826 
827 	/* Copy firmware bios info into FB memory. */
828 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
829 	       adev->bios_size);
830 
831 	/* Reset regions that need to be reset. */
832 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
833 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
834 
835 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
836 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
837 
838 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
839 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
840 
841 	/* Initialize hardware. */
842 	memset(&hw_params, 0, sizeof(hw_params));
843 	hw_params.fb_base = adev->gmc.fb_start;
844 	hw_params.fb_offset = adev->gmc.aper_base;
845 
846 	/* backdoor load firmware and trigger dmub running */
847 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
848 		hw_params.load_inst_const = true;
849 
850 	if (dmcu)
851 		hw_params.psp_version = dmcu->psp_version;
852 
853 	for (i = 0; i < fb_info->num_fb; ++i)
854 		hw_params.fb[i] = &fb_info->fb[i];
855 
856 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
857 	if (status != DMUB_STATUS_OK) {
858 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
859 		return -EINVAL;
860 	}
861 
862 	/* Wait for firmware load to finish. */
863 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
864 	if (status != DMUB_STATUS_OK)
865 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
866 
867 	/* Init DMCU and ABM if available. */
868 	if (dmcu && abm) {
869 		dmcu->funcs->dmcu_init(dmcu);
870 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
871 	}
872 
873 	if (!adev->dm.dc->ctx->dmub_srv)
874 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
875 	if (!adev->dm.dc->ctx->dmub_srv) {
876 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
877 		return -ENOMEM;
878 	}
879 
880 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
881 		 adev->dm.dmcub_fw_version);
882 
883 	return 0;
884 }
885 
amdgpu_check_debugfs_connector_property_change(struct amdgpu_device *adev, struct drm_atomic_state *state)886 static void amdgpu_check_debugfs_connector_property_change(struct amdgpu_device *adev,
887 							   struct drm_atomic_state *state)
888 {
889 	struct drm_connector *connector;
890 	struct drm_crtc *crtc;
891 	struct amdgpu_dm_connector *amdgpu_dm_connector;
892 	struct drm_connector_state *conn_state;
893 	struct dm_crtc_state *acrtc_state;
894 	struct drm_crtc_state *crtc_state;
895 	struct dc_stream_state *stream;
896 	struct drm_device *dev = adev_to_drm(adev);
897 
898 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
899 
900 		amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
901 		conn_state = connector->state;
902 
903 		if (!(conn_state && conn_state->crtc))
904 			continue;
905 
906 		crtc = conn_state->crtc;
907 		acrtc_state = to_dm_crtc_state(crtc->state);
908 
909 		if (!(acrtc_state && acrtc_state->stream))
910 			continue;
911 
912 		stream = acrtc_state->stream;
913 
914 		if (amdgpu_dm_connector->dsc_settings.dsc_force_enable ||
915 		    amdgpu_dm_connector->dsc_settings.dsc_num_slices_v ||
916 		    amdgpu_dm_connector->dsc_settings.dsc_num_slices_h ||
917 		    amdgpu_dm_connector->dsc_settings.dsc_bits_per_pixel) {
918 			conn_state = drm_atomic_get_connector_state(state, connector);
919 			crtc_state = drm_atomic_get_crtc_state(state, crtc);
920 			crtc_state->mode_changed = true;
921 		}
922 	}
923 }
924 
925 struct amdgpu_stutter_quirk {
926 	u16 chip_vendor;
927 	u16 chip_device;
928 	u16 subsys_vendor;
929 	u16 subsys_device;
930 	u8 revision;
931 };
932 
933 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
934 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
935 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
936 	{ 0, 0, 0, 0, 0 },
937 };
938 
dm_should_disable_stutter(struct pci_dev *pdev)939 static bool dm_should_disable_stutter(struct pci_dev *pdev)
940 {
941 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
942 
943 	while (p && p->chip_device != 0) {
944 		if (pdev->vendor == p->chip_vendor &&
945 		    pdev->device == p->chip_device &&
946 		    pdev->subsystem_vendor == p->subsys_vendor &&
947 		    pdev->subsystem_device == p->subsys_device &&
948 		    pdev->revision == p->revision) {
949 			return true;
950 		}
951 		++p;
952 	}
953 	return false;
954 }
955 
amdgpu_dm_init(struct amdgpu_device *adev)956 static int amdgpu_dm_init(struct amdgpu_device *adev)
957 {
958 	struct dc_init_data init_data;
959 #ifdef CONFIG_DRM_AMD_DC_HDCP
960 	struct dc_callback_init init_params;
961 #endif
962 	int r;
963 
964 	adev->dm.ddev = adev_to_drm(adev);
965 	adev->dm.adev = adev;
966 
967 	/* Zero all the fields */
968 	memset(&init_data, 0, sizeof(init_data));
969 #ifdef CONFIG_DRM_AMD_DC_HDCP
970 	memset(&init_params, 0, sizeof(init_params));
971 #endif
972 
973 	mutex_init(&adev->dm.dc_lock);
974 	mutex_init(&adev->dm.audio_lock);
975 
976 	if(amdgpu_dm_irq_init(adev)) {
977 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
978 		goto error;
979 	}
980 
981 	init_data.asic_id.chip_family = adev->family;
982 
983 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
984 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
985 	init_data.asic_id.chip_id = adev->pdev->device;
986 
987 	init_data.asic_id.vram_width = adev->gmc.vram_width;
988 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
989 	init_data.asic_id.atombios_base_address =
990 		adev->mode_info.atom_context->bios;
991 
992 	init_data.driver = adev;
993 
994 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
995 
996 	if (!adev->dm.cgs_device) {
997 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
998 		goto error;
999 	}
1000 
1001 	init_data.cgs_device = adev->dm.cgs_device;
1002 
1003 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1004 
1005 	switch (adev->asic_type) {
1006 	case CHIP_CARRIZO:
1007 	case CHIP_STONEY:
1008 	case CHIP_RAVEN:
1009 	case CHIP_RENOIR:
1010 		init_data.flags.gpu_vm_support = true;
1011 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1012 			init_data.flags.disable_dmcu = true;
1013 		break;
1014 	default:
1015 		break;
1016 	}
1017 
1018 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1019 		init_data.flags.fbc_support = true;
1020 
1021 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1022 		init_data.flags.multi_mon_pp_mclk_switch = true;
1023 
1024 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1025 		init_data.flags.disable_fractional_pwm = true;
1026 
1027 	init_data.flags.power_down_display_on_boot = true;
1028 
1029 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
1030 
1031 	/* Display Core create. */
1032 	adev->dm.dc = dc_create(&init_data);
1033 
1034 	if (adev->dm.dc) {
1035 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1036 	} else {
1037 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1038 		goto error;
1039 	}
1040 
1041 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1042 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1043 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1044 	}
1045 
1046 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1047 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1048 	if (dm_should_disable_stutter(adev->pdev))
1049 		adev->dm.dc->debug.disable_stutter = true;
1050 
1051 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1052 		adev->dm.dc->debug.disable_stutter = true;
1053 
1054 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1055 		adev->dm.dc->debug.disable_dsc = true;
1056 
1057 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1058 		adev->dm.dc->debug.disable_clock_gate = true;
1059 
1060 	r = dm_dmub_hw_init(adev);
1061 	if (r) {
1062 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1063 		goto error;
1064 	}
1065 
1066 	dc_hardware_init(adev->dm.dc);
1067 
1068 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1069 	if (!adev->dm.freesync_module) {
1070 		DRM_ERROR(
1071 		"amdgpu: failed to initialize freesync_module.\n");
1072 	} else
1073 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1074 				adev->dm.freesync_module);
1075 
1076 	amdgpu_dm_init_color_mod();
1077 
1078 #ifdef CONFIG_DRM_AMD_DC_HDCP
1079 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1080 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1081 
1082 		if (!adev->dm.hdcp_workqueue)
1083 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1084 		else
1085 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1086 
1087 		dc_init_callbacks(adev->dm.dc, &init_params);
1088 	}
1089 #endif
1090 	if (amdgpu_dm_initialize_drm_device(adev)) {
1091 		DRM_ERROR(
1092 		"amdgpu: failed to initialize sw for display support.\n");
1093 		goto error;
1094 	}
1095 
1096 	/* create fake encoders for MST */
1097 	dm_dp_create_fake_mst_encoders(adev);
1098 
1099 	/* TODO: Add_display_info? */
1100 
1101 	/* TODO use dynamic cursor width */
1102 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1103 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1104 
1105 	/* Disable vblank IRQs aggressively for power-saving */
1106 	adev_to_drm(adev)->vblank_disable_immediate = true;
1107 
1108 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1109 		DRM_ERROR(
1110 		"amdgpu: failed to initialize sw for display support.\n");
1111 		goto error;
1112 	}
1113 
1114 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1115 
1116 	return 0;
1117 error:
1118 	amdgpu_dm_fini(adev);
1119 
1120 	return -EINVAL;
1121 }
1122 
amdgpu_dm_fini(struct amdgpu_device *adev)1123 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1124 {
1125 	int i;
1126 
1127 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1128 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1129 	}
1130 
1131 	amdgpu_dm_audio_fini(adev);
1132 
1133 	amdgpu_dm_destroy_drm_device(&adev->dm);
1134 
1135 #ifdef CONFIG_DRM_AMD_DC_HDCP
1136 	if (adev->dm.hdcp_workqueue) {
1137 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1138 		adev->dm.hdcp_workqueue = NULL;
1139 	}
1140 
1141 	if (adev->dm.dc)
1142 		dc_deinit_callbacks(adev->dm.dc);
1143 #endif
1144 	if (adev->dm.dc->ctx->dmub_srv) {
1145 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1146 		adev->dm.dc->ctx->dmub_srv = NULL;
1147 	}
1148 
1149 	if (adev->dm.dmub_bo)
1150 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1151 				      &adev->dm.dmub_bo_gpu_addr,
1152 				      &adev->dm.dmub_bo_cpu_addr);
1153 
1154 	/* DC Destroy TODO: Replace destroy DAL */
1155 	if (adev->dm.dc)
1156 		dc_destroy(&adev->dm.dc);
1157 	/*
1158 	 * TODO: pageflip, vlank interrupt
1159 	 *
1160 	 * amdgpu_dm_irq_fini(adev);
1161 	 */
1162 
1163 	if (adev->dm.cgs_device) {
1164 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1165 		adev->dm.cgs_device = NULL;
1166 	}
1167 	if (adev->dm.freesync_module) {
1168 		mod_freesync_destroy(adev->dm.freesync_module);
1169 		adev->dm.freesync_module = NULL;
1170 	}
1171 
1172 	mutex_destroy(&adev->dm.audio_lock);
1173 	mutex_destroy(&adev->dm.dc_lock);
1174 
1175 	return;
1176 }
1177 
load_dmcu_fw(struct amdgpu_device *adev)1178 static int load_dmcu_fw(struct amdgpu_device *adev)
1179 {
1180 	const char *fw_name_dmcu = NULL;
1181 	int r;
1182 	const struct dmcu_firmware_header_v1_0 *hdr;
1183 
1184 	switch(adev->asic_type) {
1185 #if defined(CONFIG_DRM_AMD_DC_SI)
1186 	case CHIP_TAHITI:
1187 	case CHIP_PITCAIRN:
1188 	case CHIP_VERDE:
1189 	case CHIP_OLAND:
1190 #endif
1191 	case CHIP_BONAIRE:
1192 	case CHIP_HAWAII:
1193 	case CHIP_KAVERI:
1194 	case CHIP_KABINI:
1195 	case CHIP_MULLINS:
1196 	case CHIP_TONGA:
1197 	case CHIP_FIJI:
1198 	case CHIP_CARRIZO:
1199 	case CHIP_STONEY:
1200 	case CHIP_POLARIS11:
1201 	case CHIP_POLARIS10:
1202 	case CHIP_POLARIS12:
1203 	case CHIP_VEGAM:
1204 	case CHIP_VEGA10:
1205 	case CHIP_VEGA12:
1206 	case CHIP_VEGA20:
1207 	case CHIP_NAVI10:
1208 	case CHIP_NAVI14:
1209 	case CHIP_RENOIR:
1210 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1211 	case CHIP_SIENNA_CICHLID:
1212 	case CHIP_NAVY_FLOUNDER:
1213 #endif
1214 		return 0;
1215 	case CHIP_NAVI12:
1216 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1217 		break;
1218 	case CHIP_RAVEN:
1219 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1220 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1221 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1222 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1223 		else
1224 			return 0;
1225 		break;
1226 	default:
1227 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1228 		return -EINVAL;
1229 	}
1230 
1231 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1232 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1233 		return 0;
1234 	}
1235 
1236 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1237 	if (r == -ENOENT) {
1238 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1239 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1240 		adev->dm.fw_dmcu = NULL;
1241 		return 0;
1242 	}
1243 	if (r) {
1244 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1245 			fw_name_dmcu);
1246 		return r;
1247 	}
1248 
1249 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1250 	if (r) {
1251 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1252 			fw_name_dmcu);
1253 		release_firmware(adev->dm.fw_dmcu);
1254 		adev->dm.fw_dmcu = NULL;
1255 		return r;
1256 	}
1257 
1258 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1259 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1260 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1261 	adev->firmware.fw_size +=
1262 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1263 
1264 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1265 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1266 	adev->firmware.fw_size +=
1267 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1268 
1269 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1270 
1271 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1272 
1273 	return 0;
1274 }
1275 
amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)1276 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1277 {
1278 	struct amdgpu_device *adev = ctx;
1279 
1280 	return dm_read_reg(adev->dm.dc->ctx, address);
1281 }
1282 
amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address, uint32_t value)1283 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1284 				     uint32_t value)
1285 {
1286 	struct amdgpu_device *adev = ctx;
1287 
1288 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1289 }
1290 
dm_dmub_sw_init(struct amdgpu_device *adev)1291 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1292 {
1293 	struct dmub_srv_create_params create_params;
1294 	struct dmub_srv_region_params region_params;
1295 	struct dmub_srv_region_info region_info;
1296 	struct dmub_srv_memory_params memory_params;
1297 	struct dmub_srv_fb_info *fb_info;
1298 	struct dmub_srv *dmub_srv;
1299 	const struct dmcub_firmware_header_v1_0 *hdr;
1300 	const char *fw_name_dmub;
1301 	enum dmub_asic dmub_asic;
1302 	enum dmub_status status;
1303 	int r;
1304 
1305 	switch (adev->asic_type) {
1306 	case CHIP_RENOIR:
1307 		dmub_asic = DMUB_ASIC_DCN21;
1308 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1309 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1310 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1311 		break;
1312 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1313 	case CHIP_SIENNA_CICHLID:
1314 		dmub_asic = DMUB_ASIC_DCN30;
1315 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1316 		break;
1317 	case CHIP_NAVY_FLOUNDER:
1318 		dmub_asic = DMUB_ASIC_DCN30;
1319 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1320 		break;
1321 #endif
1322 
1323 	default:
1324 		/* ASIC doesn't support DMUB. */
1325 		return 0;
1326 	}
1327 
1328 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1329 	if (r) {
1330 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1331 		return 0;
1332 	}
1333 
1334 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1335 	if (r) {
1336 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1337 		return 0;
1338 	}
1339 
1340 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1341 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1342 
1343 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1344 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1345 			AMDGPU_UCODE_ID_DMCUB;
1346 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1347 			adev->dm.dmub_fw;
1348 		adev->firmware.fw_size +=
1349 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1350 
1351 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1352 			 adev->dm.dmcub_fw_version);
1353 	}
1354 
1355 
1356 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1357 	dmub_srv = adev->dm.dmub_srv;
1358 
1359 	if (!dmub_srv) {
1360 		DRM_ERROR("Failed to allocate DMUB service!\n");
1361 		return -ENOMEM;
1362 	}
1363 
1364 	memset(&create_params, 0, sizeof(create_params));
1365 	create_params.user_ctx = adev;
1366 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1367 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1368 	create_params.asic = dmub_asic;
1369 
1370 	/* Create the DMUB service. */
1371 	status = dmub_srv_create(dmub_srv, &create_params);
1372 	if (status != DMUB_STATUS_OK) {
1373 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1374 		return -EINVAL;
1375 	}
1376 
1377 	/* Calculate the size of all the regions for the DMUB service. */
1378 	memset(&region_params, 0, sizeof(region_params));
1379 
1380 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1381 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1382 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1383 	region_params.vbios_size = adev->bios_size;
1384 	region_params.fw_bss_data = region_params.bss_data_size ?
1385 		adev->dm.dmub_fw->data +
1386 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1387 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1388 	region_params.fw_inst_const =
1389 		adev->dm.dmub_fw->data +
1390 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1391 		PSP_HEADER_BYTES;
1392 	region_params.is_mailbox_in_inbox = false;
1393 
1394 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1395 					   &region_info);
1396 
1397 	if (status != DMUB_STATUS_OK) {
1398 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1399 		return -EINVAL;
1400 	}
1401 
1402 	/*
1403 	 * Allocate a framebuffer based on the total size of all the regions.
1404 	 * TODO: Move this into GART.
1405 	 */
1406 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1407 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1408 				    &adev->dm.dmub_bo_gpu_addr,
1409 				    &adev->dm.dmub_bo_cpu_addr);
1410 	if (r)
1411 		return r;
1412 
1413 	/* Rebase the regions on the framebuffer address. */
1414 	memset(&memory_params, 0, sizeof(memory_params));
1415 	memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
1416 	memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
1417 	memory_params.region_info = &region_info;
1418 
1419 	adev->dm.dmub_fb_info =
1420 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1421 	fb_info = adev->dm.dmub_fb_info;
1422 
1423 	if (!fb_info) {
1424 		DRM_ERROR(
1425 			"Failed to allocate framebuffer info for DMUB service!\n");
1426 		return -ENOMEM;
1427 	}
1428 
1429 	status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
1430 	if (status != DMUB_STATUS_OK) {
1431 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1432 		return -EINVAL;
1433 	}
1434 
1435 	return 0;
1436 }
1437 
dm_sw_init(void *handle)1438 static int dm_sw_init(void *handle)
1439 {
1440 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1441 	int r;
1442 
1443 	r = dm_dmub_sw_init(adev);
1444 	if (r)
1445 		return r;
1446 
1447 	return load_dmcu_fw(adev);
1448 }
1449 
dm_sw_fini(void *handle)1450 static int dm_sw_fini(void *handle)
1451 {
1452 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1453 
1454 	kfree(adev->dm.dmub_fb_info);
1455 	adev->dm.dmub_fb_info = NULL;
1456 
1457 	if (adev->dm.dmub_srv) {
1458 		dmub_srv_destroy(adev->dm.dmub_srv);
1459 		adev->dm.dmub_srv = NULL;
1460 	}
1461 
1462 	release_firmware(adev->dm.dmub_fw);
1463 	adev->dm.dmub_fw = NULL;
1464 
1465 	release_firmware(adev->dm.fw_dmcu);
1466 	adev->dm.fw_dmcu = NULL;
1467 
1468 	return 0;
1469 }
1470 
detect_mst_link_for_all_connectors(struct drm_device *dev)1471 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1472 {
1473 	struct amdgpu_dm_connector *aconnector;
1474 	struct drm_connector *connector;
1475 	struct drm_connector_list_iter iter;
1476 	int ret = 0;
1477 
1478 	drm_connector_list_iter_begin(dev, &iter);
1479 	drm_for_each_connector_iter(connector, &iter) {
1480 		aconnector = to_amdgpu_dm_connector(connector);
1481 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1482 		    aconnector->mst_mgr.aux) {
1483 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1484 					 aconnector,
1485 					 aconnector->base.base.id);
1486 
1487 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1488 			if (ret < 0) {
1489 				DRM_ERROR("DM_MST: Failed to start MST\n");
1490 				aconnector->dc_link->type =
1491 					dc_connection_single;
1492 				break;
1493 			}
1494 		}
1495 	}
1496 	drm_connector_list_iter_end(&iter);
1497 
1498 	return ret;
1499 }
1500 
dm_late_init(void *handle)1501 static int dm_late_init(void *handle)
1502 {
1503 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1504 
1505 	struct dmcu_iram_parameters params;
1506 	unsigned int linear_lut[16];
1507 	int i;
1508 	struct dmcu *dmcu = NULL;
1509 	bool ret = true;
1510 
1511 	dmcu = adev->dm.dc->res_pool->dmcu;
1512 
1513 	for (i = 0; i < 16; i++)
1514 		linear_lut[i] = 0xFFFF * i / 15;
1515 
1516 	params.set = 0;
1517 	params.backlight_ramping_start = 0xCCCC;
1518 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1519 	params.backlight_lut_array_size = 16;
1520 	params.backlight_lut_array = linear_lut;
1521 
1522 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1523 	 * 0xFFFF x 0.01 = 0x28F
1524 	 */
1525 	params.min_abm_backlight = 0x28F;
1526 
1527 	/* In the case where abm is implemented on dmcub,
1528 	 * dmcu object will be null.
1529 	 * ABM 2.4 and up are implemented on dmcub.
1530 	 */
1531 	if (dmcu)
1532 		ret = dmcu_load_iram(dmcu, params);
1533 	else if (adev->dm.dc->ctx->dmub_srv)
1534 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1535 
1536 	if (!ret)
1537 		return -EINVAL;
1538 
1539 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1540 }
1541 
s3_handle_mst(struct drm_device *dev, bool suspend)1542 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1543 {
1544 	struct amdgpu_dm_connector *aconnector;
1545 	struct drm_connector *connector;
1546 	struct drm_connector_list_iter iter;
1547 	struct drm_dp_mst_topology_mgr *mgr;
1548 	int ret;
1549 	bool need_hotplug = false;
1550 
1551 	drm_connector_list_iter_begin(dev, &iter);
1552 	drm_for_each_connector_iter(connector, &iter) {
1553 		aconnector = to_amdgpu_dm_connector(connector);
1554 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1555 		    aconnector->mst_port)
1556 			continue;
1557 
1558 		mgr = &aconnector->mst_mgr;
1559 
1560 		if (suspend) {
1561 			drm_dp_mst_topology_mgr_suspend(mgr);
1562 		} else {
1563 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1564 			if (ret < 0) {
1565 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1566 				need_hotplug = true;
1567 			}
1568 		}
1569 	}
1570 	drm_connector_list_iter_end(&iter);
1571 
1572 	if (need_hotplug)
1573 		drm_kms_helper_hotplug_event(dev);
1574 }
1575 
amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)1576 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1577 {
1578 	struct smu_context *smu = &adev->smu;
1579 	int ret = 0;
1580 
1581 	if (!is_support_sw_smu(adev))
1582 		return 0;
1583 
1584 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1585 	 * on window driver dc implementation.
1586 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1587 	 * should be passed to smu during boot up and resume from s3.
1588 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1589 	 * dcn20_resource_construct
1590 	 * then call pplib functions below to pass the settings to smu:
1591 	 * smu_set_watermarks_for_clock_ranges
1592 	 * smu_set_watermarks_table
1593 	 * navi10_set_watermarks_table
1594 	 * smu_write_watermarks_table
1595 	 *
1596 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1597 	 * dc has implemented different flow for window driver:
1598 	 * dc_hardware_init / dc_set_power_state
1599 	 * dcn10_init_hw
1600 	 * notify_wm_ranges
1601 	 * set_wm_ranges
1602 	 * -- Linux
1603 	 * smu_set_watermarks_for_clock_ranges
1604 	 * renoir_set_watermarks_table
1605 	 * smu_write_watermarks_table
1606 	 *
1607 	 * For Linux,
1608 	 * dc_hardware_init -> amdgpu_dm_init
1609 	 * dc_set_power_state --> dm_resume
1610 	 *
1611 	 * therefore, this function apply to navi10/12/14 but not Renoir
1612 	 * *
1613 	 */
1614 	switch(adev->asic_type) {
1615 	case CHIP_NAVI10:
1616 	case CHIP_NAVI14:
1617 	case CHIP_NAVI12:
1618 		break;
1619 	default:
1620 		return 0;
1621 	}
1622 
1623 	ret = smu_write_watermarks_table(smu);
1624 	if (ret) {
1625 		DRM_ERROR("Failed to update WMTABLE!\n");
1626 		return ret;
1627 	}
1628 
1629 	return 0;
1630 }
1631 
1632 /**
1633  * dm_hw_init() - Initialize DC device
1634  * @handle: The base driver device containing the amdgpu_dm device.
1635  *
1636  * Initialize the &struct amdgpu_display_manager device. This involves calling
1637  * the initializers of each DM component, then populating the struct with them.
1638  *
1639  * Although the function implies hardware initialization, both hardware and
1640  * software are initialized here. Splitting them out to their relevant init
1641  * hooks is a future TODO item.
1642  *
1643  * Some notable things that are initialized here:
1644  *
1645  * - Display Core, both software and hardware
1646  * - DC modules that we need (freesync and color management)
1647  * - DRM software states
1648  * - Interrupt sources and handlers
1649  * - Vblank support
1650  * - Debug FS entries, if enabled
1651  */
dm_hw_init(void *handle)1652 static int dm_hw_init(void *handle)
1653 {
1654 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1655 	/* Create DAL display manager */
1656 	amdgpu_dm_init(adev);
1657 	amdgpu_dm_hpd_init(adev);
1658 
1659 	return 0;
1660 }
1661 
1662 /**
1663  * dm_hw_fini() - Teardown DC device
1664  * @handle: The base driver device containing the amdgpu_dm device.
1665  *
1666  * Teardown components within &struct amdgpu_display_manager that require
1667  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1668  * were loaded. Also flush IRQ workqueues and disable them.
1669  */
dm_hw_fini(void *handle)1670 static int dm_hw_fini(void *handle)
1671 {
1672 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1673 
1674 	amdgpu_dm_hpd_fini(adev);
1675 
1676 	amdgpu_dm_irq_fini(adev);
1677 	amdgpu_dm_fini(adev);
1678 	return 0;
1679 }
1680 
1681 
1682 static int dm_enable_vblank(struct drm_crtc *crtc);
1683 static void dm_disable_vblank(struct drm_crtc *crtc);
1684 
dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, struct dc_state *state, bool enable)1685 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1686 				 struct dc_state *state, bool enable)
1687 {
1688 	enum dc_irq_source irq_source;
1689 	struct amdgpu_crtc *acrtc;
1690 	int rc = -EBUSY;
1691 	int i = 0;
1692 
1693 	for (i = 0; i < state->stream_count; i++) {
1694 		acrtc = get_crtc_by_otg_inst(
1695 				adev, state->stream_status[i].primary_otg_inst);
1696 
1697 		if (acrtc && state->stream_status[i].plane_count != 0) {
1698 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1699 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1700 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1701 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1702 			if (rc)
1703 				DRM_WARN("Failed to %s pflip interrupts\n",
1704 					 enable ? "enable" : "disable");
1705 
1706 			if (enable) {
1707 				rc = dm_enable_vblank(&acrtc->base);
1708 				if (rc)
1709 					DRM_WARN("Failed to enable vblank interrupts\n");
1710 			} else {
1711 				dm_disable_vblank(&acrtc->base);
1712 			}
1713 
1714 		}
1715 	}
1716 
1717 }
1718 
amdgpu_dm_commit_zero_streams(struct dc *dc)1719 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1720 {
1721 	struct dc_state *context = NULL;
1722 	enum dc_status res = DC_ERROR_UNEXPECTED;
1723 	int i;
1724 	struct dc_stream_state *del_streams[MAX_PIPES];
1725 	int del_streams_count = 0;
1726 
1727 	memset(del_streams, 0, sizeof(del_streams));
1728 
1729 	context = dc_create_state(dc);
1730 	if (context == NULL)
1731 		goto context_alloc_fail;
1732 
1733 	dc_resource_state_copy_construct_current(dc, context);
1734 
1735 	/* First remove from context all streams */
1736 	for (i = 0; i < context->stream_count; i++) {
1737 		struct dc_stream_state *stream = context->streams[i];
1738 
1739 		del_streams[del_streams_count++] = stream;
1740 	}
1741 
1742 	/* Remove all planes for removed streams and then remove the streams */
1743 	for (i = 0; i < del_streams_count; i++) {
1744 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1745 			res = DC_FAIL_DETACH_SURFACES;
1746 			goto fail;
1747 		}
1748 
1749 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1750 		if (res != DC_OK)
1751 			goto fail;
1752 	}
1753 
1754 
1755 	res = dc_validate_global_state(dc, context, false);
1756 
1757 	if (res != DC_OK) {
1758 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1759 		goto fail;
1760 	}
1761 
1762 	res = dc_commit_state(dc, context);
1763 
1764 fail:
1765 	dc_release_state(context);
1766 
1767 context_alloc_fail:
1768 	return res;
1769 }
1770 
dm_suspend(void *handle)1771 static int dm_suspend(void *handle)
1772 {
1773 	struct amdgpu_device *adev = handle;
1774 	struct amdgpu_display_manager *dm = &adev->dm;
1775 	int ret = 0;
1776 
1777 	if (amdgpu_in_reset(adev)) {
1778 		mutex_lock(&dm->dc_lock);
1779 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1780 
1781 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1782 
1783 		amdgpu_dm_commit_zero_streams(dm->dc);
1784 
1785 		amdgpu_dm_irq_suspend(adev);
1786 
1787 		return ret;
1788 	}
1789 
1790 	WARN_ON(adev->dm.cached_state);
1791 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1792 
1793 	s3_handle_mst(adev_to_drm(adev), true);
1794 
1795 	amdgpu_dm_irq_suspend(adev);
1796 
1797 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1798 
1799 	return 0;
1800 }
1801 
1802 static struct amdgpu_dm_connector *
amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, struct drm_crtc *crtc)1803 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1804 					     struct drm_crtc *crtc)
1805 {
1806 	uint32_t i;
1807 	struct drm_connector_state *new_con_state;
1808 	struct drm_connector *connector;
1809 	struct drm_crtc *crtc_from_state;
1810 
1811 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1812 		crtc_from_state = new_con_state->crtc;
1813 
1814 		if (crtc_from_state == crtc)
1815 			return to_amdgpu_dm_connector(connector);
1816 	}
1817 
1818 	return NULL;
1819 }
1820 
emulated_link_detect(struct dc_link *link)1821 static void emulated_link_detect(struct dc_link *link)
1822 {
1823 	struct dc_sink_init_data sink_init_data = { 0 };
1824 	struct display_sink_capability sink_caps = { 0 };
1825 	enum dc_edid_status edid_status;
1826 	struct dc_context *dc_ctx = link->ctx;
1827 	struct dc_sink *sink = NULL;
1828 	struct dc_sink *prev_sink = NULL;
1829 
1830 	link->type = dc_connection_none;
1831 	prev_sink = link->local_sink;
1832 
1833 	if (prev_sink)
1834 		dc_sink_release(prev_sink);
1835 
1836 	switch (link->connector_signal) {
1837 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1838 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1839 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1840 		break;
1841 	}
1842 
1843 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1844 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1845 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1846 		break;
1847 	}
1848 
1849 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1850 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1851 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1852 		break;
1853 	}
1854 
1855 	case SIGNAL_TYPE_LVDS: {
1856 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1857 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1858 		break;
1859 	}
1860 
1861 	case SIGNAL_TYPE_EDP: {
1862 		sink_caps.transaction_type =
1863 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1864 		sink_caps.signal = SIGNAL_TYPE_EDP;
1865 		break;
1866 	}
1867 
1868 	case SIGNAL_TYPE_DISPLAY_PORT: {
1869 		sink_caps.transaction_type =
1870 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1871 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1872 		break;
1873 	}
1874 
1875 	default:
1876 		DC_ERROR("Invalid connector type! signal:%d\n",
1877 			link->connector_signal);
1878 		return;
1879 	}
1880 
1881 	sink_init_data.link = link;
1882 	sink_init_data.sink_signal = sink_caps.signal;
1883 
1884 	sink = dc_sink_create(&sink_init_data);
1885 	if (!sink) {
1886 		DC_ERROR("Failed to create sink!\n");
1887 		return;
1888 	}
1889 
1890 	/* dc_sink_create returns a new reference */
1891 	link->local_sink = sink;
1892 
1893 	edid_status = dm_helpers_read_local_edid(
1894 			link->ctx,
1895 			link,
1896 			sink);
1897 
1898 	if (edid_status != EDID_OK)
1899 		DC_ERROR("Failed to read EDID");
1900 
1901 }
1902 
dm_gpureset_commit_state(struct dc_state *dc_state, struct amdgpu_display_manager *dm)1903 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1904 				     struct amdgpu_display_manager *dm)
1905 {
1906 	struct {
1907 		struct dc_surface_update surface_updates[MAX_SURFACES];
1908 		struct dc_plane_info plane_infos[MAX_SURFACES];
1909 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1910 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1911 		struct dc_stream_update stream_update;
1912 	} * bundle;
1913 	int k, m;
1914 
1915 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1916 
1917 	if (!bundle) {
1918 		dm_error("Failed to allocate update bundle\n");
1919 		goto cleanup;
1920 	}
1921 
1922 	for (k = 0; k < dc_state->stream_count; k++) {
1923 		bundle->stream_update.stream = dc_state->streams[k];
1924 
1925 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1926 			bundle->surface_updates[m].surface =
1927 				dc_state->stream_status->plane_states[m];
1928 			bundle->surface_updates[m].surface->force_full_update =
1929 				true;
1930 		}
1931 		dc_commit_updates_for_stream(
1932 			dm->dc, bundle->surface_updates,
1933 			dc_state->stream_status->plane_count,
1934 			dc_state->streams[k], &bundle->stream_update, dc_state);
1935 	}
1936 
1937 cleanup:
1938 	kfree(bundle);
1939 
1940 	return;
1941 }
1942 
dm_set_dpms_off(struct dc_link *link)1943 static void dm_set_dpms_off(struct dc_link *link)
1944 {
1945 	struct dc_stream_state *stream_state;
1946 	struct amdgpu_dm_connector *aconnector = link->priv;
1947 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1948 	struct dc_stream_update stream_update;
1949 	bool dpms_off = true;
1950 
1951 	memset(&stream_update, 0, sizeof(stream_update));
1952 	stream_update.dpms_off = &dpms_off;
1953 
1954 	mutex_lock(&adev->dm.dc_lock);
1955 	stream_state = dc_stream_find_from_link(link);
1956 
1957 	if (stream_state == NULL) {
1958 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
1959 		mutex_unlock(&adev->dm.dc_lock);
1960 		return;
1961 	}
1962 
1963 	stream_update.stream = stream_state;
1964 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
1965 				     stream_state, &stream_update,
1966 				     stream_state->ctx->dc->current_state);
1967 	mutex_unlock(&adev->dm.dc_lock);
1968 }
1969 
dm_resume(void *handle)1970 static int dm_resume(void *handle)
1971 {
1972 	struct amdgpu_device *adev = handle;
1973 	struct drm_device *ddev = adev_to_drm(adev);
1974 	struct amdgpu_display_manager *dm = &adev->dm;
1975 	struct amdgpu_dm_connector *aconnector;
1976 	struct drm_connector *connector;
1977 	struct drm_connector_list_iter iter;
1978 	struct drm_crtc *crtc;
1979 	struct drm_crtc_state *new_crtc_state;
1980 	struct dm_crtc_state *dm_new_crtc_state;
1981 	struct drm_plane *plane;
1982 	struct drm_plane_state *new_plane_state;
1983 	struct dm_plane_state *dm_new_plane_state;
1984 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1985 	enum dc_connection_type new_connection_type = dc_connection_none;
1986 	struct dc_state *dc_state;
1987 	int i, r, j;
1988 
1989 	if (amdgpu_in_reset(adev)) {
1990 		dc_state = dm->cached_dc_state;
1991 
1992 		r = dm_dmub_hw_init(adev);
1993 		if (r)
1994 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1995 
1996 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1997 		dc_resume(dm->dc);
1998 
1999 		amdgpu_dm_irq_resume_early(adev);
2000 
2001 		for (i = 0; i < dc_state->stream_count; i++) {
2002 			dc_state->streams[i]->mode_changed = true;
2003 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2004 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2005 					= 0xffffffff;
2006 			}
2007 		}
2008 
2009 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2010 
2011 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2012 
2013 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2014 
2015 		dc_release_state(dm->cached_dc_state);
2016 		dm->cached_dc_state = NULL;
2017 
2018 		amdgpu_dm_irq_resume_late(adev);
2019 
2020 		mutex_unlock(&dm->dc_lock);
2021 
2022 		return 0;
2023 	}
2024 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2025 	dc_release_state(dm_state->context);
2026 	dm_state->context = dc_create_state(dm->dc);
2027 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2028 	dc_resource_state_construct(dm->dc, dm_state->context);
2029 
2030 	/* Before powering on DC we need to re-initialize DMUB. */
2031 	r = dm_dmub_hw_init(adev);
2032 	if (r)
2033 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2034 
2035 	/* power on hardware */
2036 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2037 
2038 	/* program HPD filter */
2039 	dc_resume(dm->dc);
2040 
2041 	/*
2042 	 * early enable HPD Rx IRQ, should be done before set mode as short
2043 	 * pulse interrupts are used for MST
2044 	 */
2045 	amdgpu_dm_irq_resume_early(adev);
2046 
2047 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2048 	s3_handle_mst(ddev, false);
2049 
2050 	/* Do detection*/
2051 	drm_connector_list_iter_begin(ddev, &iter);
2052 	drm_for_each_connector_iter(connector, &iter) {
2053 		aconnector = to_amdgpu_dm_connector(connector);
2054 
2055 		if (!aconnector->dc_link)
2056 			continue;
2057 
2058 		/*
2059 		 * this is the case when traversing through already created
2060 		 * MST connectors, should be skipped
2061 		 */
2062 		if (aconnector->dc_link->type == dc_connection_mst_branch)
2063 			continue;
2064 
2065 		mutex_lock(&aconnector->hpd_lock);
2066 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2067 			DRM_ERROR("KMS: Failed to detect connector\n");
2068 
2069 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2070 			emulated_link_detect(aconnector->dc_link);
2071 		else
2072 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2073 
2074 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2075 			aconnector->fake_enable = false;
2076 
2077 		if (aconnector->dc_sink)
2078 			dc_sink_release(aconnector->dc_sink);
2079 		aconnector->dc_sink = NULL;
2080 		amdgpu_dm_update_connector_after_detect(aconnector);
2081 		mutex_unlock(&aconnector->hpd_lock);
2082 	}
2083 	drm_connector_list_iter_end(&iter);
2084 
2085 	/* Force mode set in atomic commit */
2086 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2087 		new_crtc_state->active_changed = true;
2088 
2089 	/*
2090 	 * atomic_check is expected to create the dc states. We need to release
2091 	 * them here, since they were duplicated as part of the suspend
2092 	 * procedure.
2093 	 */
2094 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2095 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2096 		if (dm_new_crtc_state->stream) {
2097 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2098 			dc_stream_release(dm_new_crtc_state->stream);
2099 			dm_new_crtc_state->stream = NULL;
2100 		}
2101 	}
2102 
2103 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2104 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2105 		if (dm_new_plane_state->dc_state) {
2106 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2107 			dc_plane_state_release(dm_new_plane_state->dc_state);
2108 			dm_new_plane_state->dc_state = NULL;
2109 		}
2110 	}
2111 
2112 	drm_atomic_helper_resume(ddev, dm->cached_state);
2113 
2114 	dm->cached_state = NULL;
2115 
2116 	amdgpu_dm_irq_resume_late(adev);
2117 
2118 	amdgpu_dm_smu_write_watermarks_table(adev);
2119 
2120 	return 0;
2121 }
2122 
2123 /**
2124  * DOC: DM Lifecycle
2125  *
2126  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2127  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2128  * the base driver's device list to be initialized and torn down accordingly.
2129  *
2130  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2131  */
2132 
2133 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2134 	.name = "dm",
2135 	.early_init = dm_early_init,
2136 	.late_init = dm_late_init,
2137 	.sw_init = dm_sw_init,
2138 	.sw_fini = dm_sw_fini,
2139 	.hw_init = dm_hw_init,
2140 	.hw_fini = dm_hw_fini,
2141 	.suspend = dm_suspend,
2142 	.resume = dm_resume,
2143 	.is_idle = dm_is_idle,
2144 	.wait_for_idle = dm_wait_for_idle,
2145 	.check_soft_reset = dm_check_soft_reset,
2146 	.soft_reset = dm_soft_reset,
2147 	.set_clockgating_state = dm_set_clockgating_state,
2148 	.set_powergating_state = dm_set_powergating_state,
2149 };
2150 
2151 const struct amdgpu_ip_block_version dm_ip_block =
2152 {
2153 	.type = AMD_IP_BLOCK_TYPE_DCE,
2154 	.major = 1,
2155 	.minor = 0,
2156 	.rev = 0,
2157 	.funcs = &amdgpu_dm_funcs,
2158 };
2159 
2160 
2161 /**
2162  * DOC: atomic
2163  *
2164  * *WIP*
2165  */
2166 
2167 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2168 	.fb_create = amdgpu_display_user_framebuffer_create,
2169 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2170 	.atomic_check = amdgpu_dm_atomic_check,
2171 	.atomic_commit = amdgpu_dm_atomic_commit,
2172 };
2173 
2174 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2175 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2176 };
2177 
update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)2178 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2179 {
2180 	u32 max_avg, min_cll, max, min, q, r;
2181 	struct amdgpu_dm_backlight_caps *caps;
2182 	struct amdgpu_display_manager *dm;
2183 	struct drm_connector *conn_base;
2184 	struct amdgpu_device *adev;
2185 	struct dc_link *link = NULL;
2186 	static const u8 pre_computed_values[] = {
2187 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2188 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2189 
2190 	if (!aconnector || !aconnector->dc_link)
2191 		return;
2192 
2193 	link = aconnector->dc_link;
2194 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2195 		return;
2196 
2197 	conn_base = &aconnector->base;
2198 	adev = drm_to_adev(conn_base->dev);
2199 	dm = &adev->dm;
2200 	caps = &dm->backlight_caps;
2201 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2202 	caps->aux_support = false;
2203 	max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2204 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2205 
2206 	if (caps->ext_caps->bits.oled == 1 /*||
2207 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2208 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2209 		caps->aux_support = true;
2210 
2211 	if (amdgpu_backlight == 0)
2212 		caps->aux_support = false;
2213 	else if (amdgpu_backlight == 1)
2214 		caps->aux_support = true;
2215 
2216 	/* From the specification (CTA-861-G), for calculating the maximum
2217 	 * luminance we need to use:
2218 	 *	Luminance = 50*2**(CV/32)
2219 	 * Where CV is a one-byte value.
2220 	 * For calculating this expression we may need float point precision;
2221 	 * to avoid this complexity level, we take advantage that CV is divided
2222 	 * by a constant. From the Euclids division algorithm, we know that CV
2223 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2224 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2225 	 * need to pre-compute the value of r/32. For pre-computing the values
2226 	 * We just used the following Ruby line:
2227 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2228 	 * The results of the above expressions can be verified at
2229 	 * pre_computed_values.
2230 	 */
2231 	q = max_avg >> 5;
2232 	r = max_avg % 32;
2233 	max = (1 << q) * pre_computed_values[r];
2234 
2235 	// min luminance: maxLum * (CV/255)^2 / 100
2236 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2237 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2238 
2239 	caps->aux_max_input_signal = max;
2240 	caps->aux_min_input_signal = min;
2241 }
2242 
amdgpu_dm_update_connector_after_detect( struct amdgpu_dm_connector *aconnector)2243 void amdgpu_dm_update_connector_after_detect(
2244 		struct amdgpu_dm_connector *aconnector)
2245 {
2246 	struct drm_connector *connector = &aconnector->base;
2247 	struct drm_device *dev = connector->dev;
2248 	struct dc_sink *sink;
2249 
2250 	/* MST handled by drm_mst framework */
2251 	if (aconnector->mst_mgr.mst_state == true)
2252 		return;
2253 
2254 	sink = aconnector->dc_link->local_sink;
2255 	if (sink)
2256 		dc_sink_retain(sink);
2257 
2258 	/*
2259 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2260 	 * the connector sink is set to either fake or physical sink depends on link status.
2261 	 * Skip if already done during boot.
2262 	 */
2263 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2264 			&& aconnector->dc_em_sink) {
2265 
2266 		/*
2267 		 * For S3 resume with headless use eml_sink to fake stream
2268 		 * because on resume connector->sink is set to NULL
2269 		 */
2270 		mutex_lock(&dev->mode_config.mutex);
2271 
2272 		if (sink) {
2273 			if (aconnector->dc_sink) {
2274 				amdgpu_dm_update_freesync_caps(connector, NULL);
2275 				/*
2276 				 * retain and release below are used to
2277 				 * bump up refcount for sink because the link doesn't point
2278 				 * to it anymore after disconnect, so on next crtc to connector
2279 				 * reshuffle by UMD we will get into unwanted dc_sink release
2280 				 */
2281 				dc_sink_release(aconnector->dc_sink);
2282 			}
2283 			aconnector->dc_sink = sink;
2284 			dc_sink_retain(aconnector->dc_sink);
2285 			amdgpu_dm_update_freesync_caps(connector,
2286 					aconnector->edid);
2287 		} else {
2288 			amdgpu_dm_update_freesync_caps(connector, NULL);
2289 			if (!aconnector->dc_sink) {
2290 				aconnector->dc_sink = aconnector->dc_em_sink;
2291 				dc_sink_retain(aconnector->dc_sink);
2292 			}
2293 		}
2294 
2295 		mutex_unlock(&dev->mode_config.mutex);
2296 
2297 		if (sink)
2298 			dc_sink_release(sink);
2299 		return;
2300 	}
2301 
2302 	/*
2303 	 * TODO: temporary guard to look for proper fix
2304 	 * if this sink is MST sink, we should not do anything
2305 	 */
2306 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2307 		dc_sink_release(sink);
2308 		return;
2309 	}
2310 
2311 	if (aconnector->dc_sink == sink) {
2312 		/*
2313 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2314 		 * Do nothing!!
2315 		 */
2316 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2317 				aconnector->connector_id);
2318 		if (sink)
2319 			dc_sink_release(sink);
2320 		return;
2321 	}
2322 
2323 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2324 		aconnector->connector_id, aconnector->dc_sink, sink);
2325 
2326 	mutex_lock(&dev->mode_config.mutex);
2327 
2328 	/*
2329 	 * 1. Update status of the drm connector
2330 	 * 2. Send an event and let userspace tell us what to do
2331 	 */
2332 	if (sink) {
2333 		/*
2334 		 * TODO: check if we still need the S3 mode update workaround.
2335 		 * If yes, put it here.
2336 		 */
2337 		if (aconnector->dc_sink) {
2338 			amdgpu_dm_update_freesync_caps(connector, NULL);
2339 			dc_sink_release(aconnector->dc_sink);
2340 		}
2341 
2342 		aconnector->dc_sink = sink;
2343 		dc_sink_retain(aconnector->dc_sink);
2344 		if (sink->dc_edid.length == 0) {
2345 			aconnector->edid = NULL;
2346 			if (aconnector->dc_link->aux_mode) {
2347 				drm_dp_cec_unset_edid(
2348 					&aconnector->dm_dp_aux.aux);
2349 			}
2350 		} else {
2351 			aconnector->edid =
2352 				(struct edid *)sink->dc_edid.raw_edid;
2353 
2354 			if (aconnector->dc_link->aux_mode)
2355 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2356 						    aconnector->edid);
2357 		}
2358 
2359 		drm_connector_update_edid_property(connector, aconnector->edid);
2360 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2361 		update_connector_ext_caps(aconnector);
2362 	} else {
2363 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2364 		amdgpu_dm_update_freesync_caps(connector, NULL);
2365 		drm_connector_update_edid_property(connector, NULL);
2366 		aconnector->num_modes = 0;
2367 		dc_sink_release(aconnector->dc_sink);
2368 		aconnector->dc_sink = NULL;
2369 		aconnector->edid = NULL;
2370 #ifdef CONFIG_DRM_AMD_DC_HDCP
2371 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2372 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2373 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2374 #endif
2375 	}
2376 
2377 	mutex_unlock(&dev->mode_config.mutex);
2378 
2379 	update_subconnector_property(aconnector);
2380 
2381 	if (sink)
2382 		dc_sink_release(sink);
2383 }
2384 
handle_hpd_irq(void *param)2385 static void handle_hpd_irq(void *param)
2386 {
2387 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2388 	struct drm_connector *connector = &aconnector->base;
2389 	struct drm_device *dev = connector->dev;
2390 	enum dc_connection_type new_connection_type = dc_connection_none;
2391 #ifdef CONFIG_DRM_AMD_DC_HDCP
2392 	struct amdgpu_device *adev = drm_to_adev(dev);
2393 #endif
2394 
2395 	/*
2396 	 * In case of failure or MST no need to update connector status or notify the OS
2397 	 * since (for MST case) MST does this in its own context.
2398 	 */
2399 	mutex_lock(&aconnector->hpd_lock);
2400 
2401 #ifdef CONFIG_DRM_AMD_DC_HDCP
2402 	if (adev->dm.hdcp_workqueue)
2403 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2404 #endif
2405 	if (aconnector->fake_enable)
2406 		aconnector->fake_enable = false;
2407 
2408 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2409 		DRM_ERROR("KMS: Failed to detect connector\n");
2410 
2411 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2412 		emulated_link_detect(aconnector->dc_link);
2413 
2414 
2415 		drm_modeset_lock_all(dev);
2416 		dm_restore_drm_connector_state(dev, connector);
2417 		drm_modeset_unlock_all(dev);
2418 
2419 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2420 			drm_kms_helper_hotplug_event(dev);
2421 
2422 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2423 		if (new_connection_type == dc_connection_none &&
2424 		    aconnector->dc_link->type == dc_connection_none)
2425 			dm_set_dpms_off(aconnector->dc_link);
2426 
2427 		amdgpu_dm_update_connector_after_detect(aconnector);
2428 
2429 		drm_modeset_lock_all(dev);
2430 		dm_restore_drm_connector_state(dev, connector);
2431 		drm_modeset_unlock_all(dev);
2432 
2433 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2434 			drm_kms_helper_hotplug_event(dev);
2435 	}
2436 	mutex_unlock(&aconnector->hpd_lock);
2437 
2438 }
2439 
dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)2440 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2441 {
2442 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2443 	uint8_t dret;
2444 	bool new_irq_handled = false;
2445 	int dpcd_addr;
2446 	int dpcd_bytes_to_read;
2447 
2448 	const int max_process_count = 30;
2449 	int process_count = 0;
2450 
2451 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2452 
2453 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2454 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2455 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2456 		dpcd_addr = DP_SINK_COUNT;
2457 	} else {
2458 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2459 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2460 		dpcd_addr = DP_SINK_COUNT_ESI;
2461 	}
2462 
2463 	dret = drm_dp_dpcd_read(
2464 		&aconnector->dm_dp_aux.aux,
2465 		dpcd_addr,
2466 		esi,
2467 		dpcd_bytes_to_read);
2468 
2469 	while (dret == dpcd_bytes_to_read &&
2470 		process_count < max_process_count) {
2471 		uint8_t retry;
2472 		dret = 0;
2473 
2474 		process_count++;
2475 
2476 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2477 		/* handle HPD short pulse irq */
2478 		if (aconnector->mst_mgr.mst_state)
2479 			drm_dp_mst_hpd_irq(
2480 				&aconnector->mst_mgr,
2481 				esi,
2482 				&new_irq_handled);
2483 
2484 		if (new_irq_handled) {
2485 			/* ACK at DPCD to notify down stream */
2486 			const int ack_dpcd_bytes_to_write =
2487 				dpcd_bytes_to_read - 1;
2488 
2489 			for (retry = 0; retry < 3; retry++) {
2490 				uint8_t wret;
2491 
2492 				wret = drm_dp_dpcd_write(
2493 					&aconnector->dm_dp_aux.aux,
2494 					dpcd_addr + 1,
2495 					&esi[1],
2496 					ack_dpcd_bytes_to_write);
2497 				if (wret == ack_dpcd_bytes_to_write)
2498 					break;
2499 			}
2500 
2501 			/* check if there is new irq to be handled */
2502 			dret = drm_dp_dpcd_read(
2503 				&aconnector->dm_dp_aux.aux,
2504 				dpcd_addr,
2505 				esi,
2506 				dpcd_bytes_to_read);
2507 
2508 			new_irq_handled = false;
2509 		} else {
2510 			break;
2511 		}
2512 	}
2513 
2514 	if (process_count == max_process_count)
2515 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2516 }
2517 
handle_hpd_rx_irq(void *param)2518 static void handle_hpd_rx_irq(void *param)
2519 {
2520 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2521 	struct drm_connector *connector = &aconnector->base;
2522 	struct drm_device *dev = connector->dev;
2523 	struct dc_link *dc_link = aconnector->dc_link;
2524 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2525 	enum dc_connection_type new_connection_type = dc_connection_none;
2526 #ifdef CONFIG_DRM_AMD_DC_HDCP
2527 	union hpd_irq_data hpd_irq_data;
2528 	struct amdgpu_device *adev = drm_to_adev(dev);
2529 
2530 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2531 #endif
2532 
2533 	/*
2534 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2535 	 * conflict, after implement i2c helper, this mutex should be
2536 	 * retired.
2537 	 */
2538 	if (dc_link->type != dc_connection_mst_branch)
2539 		mutex_lock(&aconnector->hpd_lock);
2540 
2541 
2542 #ifdef CONFIG_DRM_AMD_DC_HDCP
2543 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2544 #else
2545 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2546 #endif
2547 			!is_mst_root_connector) {
2548 		/* Downstream Port status changed. */
2549 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2550 			DRM_ERROR("KMS: Failed to detect connector\n");
2551 
2552 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2553 			emulated_link_detect(dc_link);
2554 
2555 			if (aconnector->fake_enable)
2556 				aconnector->fake_enable = false;
2557 
2558 			amdgpu_dm_update_connector_after_detect(aconnector);
2559 
2560 
2561 			drm_modeset_lock_all(dev);
2562 			dm_restore_drm_connector_state(dev, connector);
2563 			drm_modeset_unlock_all(dev);
2564 
2565 			drm_kms_helper_hotplug_event(dev);
2566 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2567 
2568 			if (aconnector->fake_enable)
2569 				aconnector->fake_enable = false;
2570 
2571 			amdgpu_dm_update_connector_after_detect(aconnector);
2572 
2573 
2574 			drm_modeset_lock_all(dev);
2575 			dm_restore_drm_connector_state(dev, connector);
2576 			drm_modeset_unlock_all(dev);
2577 
2578 			drm_kms_helper_hotplug_event(dev);
2579 		}
2580 	}
2581 #ifdef CONFIG_DRM_AMD_DC_HDCP
2582 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2583 		if (adev->dm.hdcp_workqueue)
2584 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2585 	}
2586 #endif
2587 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2588 	    (dc_link->type == dc_connection_mst_branch))
2589 		dm_handle_hpd_rx_irq(aconnector);
2590 
2591 	if (dc_link->type != dc_connection_mst_branch) {
2592 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2593 		mutex_unlock(&aconnector->hpd_lock);
2594 	}
2595 }
2596 
2597 static void register_hpd_handlers(struct amdgpu_device *adev)
2598 {
2599 	struct drm_device *dev = adev_to_drm(adev);
2600 	struct drm_connector *connector;
2601 	struct amdgpu_dm_connector *aconnector;
2602 	const struct dc_link *dc_link;
2603 	struct dc_interrupt_params int_params = {0};
2604 
2605 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2606 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2607 
2608 	list_for_each_entry(connector,
2609 			&dev->mode_config.connector_list, head)	{
2610 
2611 		aconnector = to_amdgpu_dm_connector(connector);
2612 		dc_link = aconnector->dc_link;
2613 
2614 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2615 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2616 			int_params.irq_source = dc_link->irq_source_hpd;
2617 
2618 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2619 					handle_hpd_irq,
2620 					(void *) aconnector);
2621 		}
2622 
2623 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2624 
2625 			/* Also register for DP short pulse (hpd_rx). */
2626 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2627 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2628 
2629 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2630 					handle_hpd_rx_irq,
2631 					(void *) aconnector);
2632 		}
2633 	}
2634 }
2635 
2636 #if defined(CONFIG_DRM_AMD_DC_SI)
2637 /* Register IRQ sources and initialize IRQ callbacks */
2638 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2639 {
2640 	struct dc *dc = adev->dm.dc;
2641 	struct common_irq_params *c_irq_params;
2642 	struct dc_interrupt_params int_params = {0};
2643 	int r;
2644 	int i;
2645 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2646 
2647 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2648 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2649 
2650 	/*
2651 	 * Actions of amdgpu_irq_add_id():
2652 	 * 1. Register a set() function with base driver.
2653 	 *    Base driver will call set() function to enable/disable an
2654 	 *    interrupt in DC hardware.
2655 	 * 2. Register amdgpu_dm_irq_handler().
2656 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2657 	 *    coming from DC hardware.
2658 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2659 	 *    for acknowledging and handling. */
2660 
2661 	/* Use VBLANK interrupt */
2662 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2663 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2664 		if (r) {
2665 			DRM_ERROR("Failed to add crtc irq id!\n");
2666 			return r;
2667 		}
2668 
2669 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2670 		int_params.irq_source =
2671 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2672 
2673 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2674 
2675 		c_irq_params->adev = adev;
2676 		c_irq_params->irq_src = int_params.irq_source;
2677 
2678 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2679 				dm_crtc_high_irq, c_irq_params);
2680 	}
2681 
2682 	/* Use GRPH_PFLIP interrupt */
2683 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2684 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2685 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2686 		if (r) {
2687 			DRM_ERROR("Failed to add page flip irq id!\n");
2688 			return r;
2689 		}
2690 
2691 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2692 		int_params.irq_source =
2693 			dc_interrupt_to_irq_source(dc, i, 0);
2694 
2695 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2696 
2697 		c_irq_params->adev = adev;
2698 		c_irq_params->irq_src = int_params.irq_source;
2699 
2700 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2701 				dm_pflip_high_irq, c_irq_params);
2702 
2703 	}
2704 
2705 	/* HPD */
2706 	r = amdgpu_irq_add_id(adev, client_id,
2707 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2708 	if (r) {
2709 		DRM_ERROR("Failed to add hpd irq id!\n");
2710 		return r;
2711 	}
2712 
2713 	register_hpd_handlers(adev);
2714 
2715 	return 0;
2716 }
2717 #endif
2718 
2719 /* Register IRQ sources and initialize IRQ callbacks */
2720 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2721 {
2722 	struct dc *dc = adev->dm.dc;
2723 	struct common_irq_params *c_irq_params;
2724 	struct dc_interrupt_params int_params = {0};
2725 	int r;
2726 	int i;
2727 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2728 
2729 	if (adev->asic_type >= CHIP_VEGA10)
2730 		client_id = SOC15_IH_CLIENTID_DCE;
2731 
2732 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2733 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2734 
2735 	/*
2736 	 * Actions of amdgpu_irq_add_id():
2737 	 * 1. Register a set() function with base driver.
2738 	 *    Base driver will call set() function to enable/disable an
2739 	 *    interrupt in DC hardware.
2740 	 * 2. Register amdgpu_dm_irq_handler().
2741 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2742 	 *    coming from DC hardware.
2743 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2744 	 *    for acknowledging and handling. */
2745 
2746 	/* Use VBLANK interrupt */
2747 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2748 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2749 		if (r) {
2750 			DRM_ERROR("Failed to add crtc irq id!\n");
2751 			return r;
2752 		}
2753 
2754 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2755 		int_params.irq_source =
2756 			dc_interrupt_to_irq_source(dc, i, 0);
2757 
2758 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2759 
2760 		c_irq_params->adev = adev;
2761 		c_irq_params->irq_src = int_params.irq_source;
2762 
2763 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2764 				dm_crtc_high_irq, c_irq_params);
2765 	}
2766 
2767 	/* Use VUPDATE interrupt */
2768 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2769 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2770 		if (r) {
2771 			DRM_ERROR("Failed to add vupdate irq id!\n");
2772 			return r;
2773 		}
2774 
2775 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2776 		int_params.irq_source =
2777 			dc_interrupt_to_irq_source(dc, i, 0);
2778 
2779 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2780 
2781 		c_irq_params->adev = adev;
2782 		c_irq_params->irq_src = int_params.irq_source;
2783 
2784 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2785 				dm_vupdate_high_irq, c_irq_params);
2786 	}
2787 
2788 	/* Use GRPH_PFLIP interrupt */
2789 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2790 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2791 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2792 		if (r) {
2793 			DRM_ERROR("Failed to add page flip irq id!\n");
2794 			return r;
2795 		}
2796 
2797 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2798 		int_params.irq_source =
2799 			dc_interrupt_to_irq_source(dc, i, 0);
2800 
2801 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2802 
2803 		c_irq_params->adev = adev;
2804 		c_irq_params->irq_src = int_params.irq_source;
2805 
2806 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2807 				dm_pflip_high_irq, c_irq_params);
2808 
2809 	}
2810 
2811 	/* HPD */
2812 	r = amdgpu_irq_add_id(adev, client_id,
2813 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2814 	if (r) {
2815 		DRM_ERROR("Failed to add hpd irq id!\n");
2816 		return r;
2817 	}
2818 
2819 	register_hpd_handlers(adev);
2820 
2821 	return 0;
2822 }
2823 
2824 #if defined(CONFIG_DRM_AMD_DC_DCN)
2825 /* Register IRQ sources and initialize IRQ callbacks */
2826 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2827 {
2828 	struct dc *dc = adev->dm.dc;
2829 	struct common_irq_params *c_irq_params;
2830 	struct dc_interrupt_params int_params = {0};
2831 	int r;
2832 	int i;
2833 
2834 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2835 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2836 
2837 	/*
2838 	 * Actions of amdgpu_irq_add_id():
2839 	 * 1. Register a set() function with base driver.
2840 	 *    Base driver will call set() function to enable/disable an
2841 	 *    interrupt in DC hardware.
2842 	 * 2. Register amdgpu_dm_irq_handler().
2843 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2844 	 *    coming from DC hardware.
2845 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2846 	 *    for acknowledging and handling.
2847 	 */
2848 
2849 	/* Use VSTARTUP interrupt */
2850 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2851 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2852 			i++) {
2853 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2854 
2855 		if (r) {
2856 			DRM_ERROR("Failed to add crtc irq id!\n");
2857 			return r;
2858 		}
2859 
2860 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2861 		int_params.irq_source =
2862 			dc_interrupt_to_irq_source(dc, i, 0);
2863 
2864 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2865 
2866 		c_irq_params->adev = adev;
2867 		c_irq_params->irq_src = int_params.irq_source;
2868 
2869 		amdgpu_dm_irq_register_interrupt(
2870 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2871 	}
2872 
2873 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2874 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2875 	 * to trigger at end of each vblank, regardless of state of the lock,
2876 	 * matching DCE behaviour.
2877 	 */
2878 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2879 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2880 	     i++) {
2881 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2882 
2883 		if (r) {
2884 			DRM_ERROR("Failed to add vupdate irq id!\n");
2885 			return r;
2886 		}
2887 
2888 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2889 		int_params.irq_source =
2890 			dc_interrupt_to_irq_source(dc, i, 0);
2891 
2892 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2893 
2894 		c_irq_params->adev = adev;
2895 		c_irq_params->irq_src = int_params.irq_source;
2896 
2897 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2898 				dm_vupdate_high_irq, c_irq_params);
2899 	}
2900 
2901 	/* Use GRPH_PFLIP interrupt */
2902 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2903 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2904 			i++) {
2905 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2906 		if (r) {
2907 			DRM_ERROR("Failed to add page flip irq id!\n");
2908 			return r;
2909 		}
2910 
2911 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2912 		int_params.irq_source =
2913 			dc_interrupt_to_irq_source(dc, i, 0);
2914 
2915 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2916 
2917 		c_irq_params->adev = adev;
2918 		c_irq_params->irq_src = int_params.irq_source;
2919 
2920 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2921 				dm_pflip_high_irq, c_irq_params);
2922 
2923 	}
2924 
2925 	/* HPD */
2926 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2927 			&adev->hpd_irq);
2928 	if (r) {
2929 		DRM_ERROR("Failed to add hpd irq id!\n");
2930 		return r;
2931 	}
2932 
2933 	register_hpd_handlers(adev);
2934 
2935 	return 0;
2936 }
2937 #endif
2938 
2939 /*
2940  * Acquires the lock for the atomic state object and returns
2941  * the new atomic state.
2942  *
2943  * This should only be called during atomic check.
2944  */
2945 static int dm_atomic_get_state(struct drm_atomic_state *state,
2946 			       struct dm_atomic_state **dm_state)
2947 {
2948 	struct drm_device *dev = state->dev;
2949 	struct amdgpu_device *adev = drm_to_adev(dev);
2950 	struct amdgpu_display_manager *dm = &adev->dm;
2951 	struct drm_private_state *priv_state;
2952 
2953 	if (*dm_state)
2954 		return 0;
2955 
2956 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2957 	if (IS_ERR(priv_state))
2958 		return PTR_ERR(priv_state);
2959 
2960 	*dm_state = to_dm_atomic_state(priv_state);
2961 
2962 	return 0;
2963 }
2964 
2965 static struct dm_atomic_state *
2966 dm_atomic_get_new_state(struct drm_atomic_state *state)
2967 {
2968 	struct drm_device *dev = state->dev;
2969 	struct amdgpu_device *adev = drm_to_adev(dev);
2970 	struct amdgpu_display_manager *dm = &adev->dm;
2971 	struct drm_private_obj *obj;
2972 	struct drm_private_state *new_obj_state;
2973 	int i;
2974 
2975 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2976 		if (obj->funcs == dm->atomic_obj.funcs)
2977 			return to_dm_atomic_state(new_obj_state);
2978 	}
2979 
2980 	return NULL;
2981 }
2982 
2983 static struct drm_private_state *
2984 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2985 {
2986 	struct dm_atomic_state *old_state, *new_state;
2987 
2988 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2989 	if (!new_state)
2990 		return NULL;
2991 
2992 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2993 
2994 	old_state = to_dm_atomic_state(obj->state);
2995 
2996 	if (old_state && old_state->context)
2997 		new_state->context = dc_copy_state(old_state->context);
2998 
2999 	if (!new_state->context) {
3000 		kfree(new_state);
3001 		return NULL;
3002 	}
3003 
3004 	return &new_state->base;
3005 }
3006 
3007 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3008 				    struct drm_private_state *state)
3009 {
3010 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3011 
3012 	if (dm_state && dm_state->context)
3013 		dc_release_state(dm_state->context);
3014 
3015 	kfree(dm_state);
3016 }
3017 
3018 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3019 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3020 	.atomic_destroy_state = dm_atomic_destroy_state,
3021 };
3022 
3023 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3024 {
3025 	struct dm_atomic_state *state;
3026 	int r;
3027 
3028 	adev->mode_info.mode_config_initialized = true;
3029 
3030 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3031 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3032 
3033 	adev_to_drm(adev)->mode_config.max_width = 16384;
3034 	adev_to_drm(adev)->mode_config.max_height = 16384;
3035 
3036 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3037 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3038 	/* indicates support for immediate flip */
3039 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3040 
3041 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3042 
3043 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3044 	if (!state)
3045 		return -ENOMEM;
3046 
3047 	state->context = dc_create_state(adev->dm.dc);
3048 	if (!state->context) {
3049 		kfree(state);
3050 		return -ENOMEM;
3051 	}
3052 
3053 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3054 
3055 	drm_atomic_private_obj_init(adev_to_drm(adev),
3056 				    &adev->dm.atomic_obj,
3057 				    &state->base,
3058 				    &dm_atomic_state_funcs);
3059 
3060 	r = amdgpu_display_modeset_create_props(adev);
3061 	if (r) {
3062 		dc_release_state(state->context);
3063 		kfree(state);
3064 		return r;
3065 	}
3066 
3067 	r = amdgpu_dm_audio_init(adev);
3068 	if (r) {
3069 		dc_release_state(state->context);
3070 		kfree(state);
3071 		return r;
3072 	}
3073 
3074 	return 0;
3075 }
3076 
3077 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3078 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3079 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3080 
3081 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3082 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3083 
3084 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3085 {
3086 #if defined(CONFIG_ACPI)
3087 	struct amdgpu_dm_backlight_caps caps;
3088 
3089 	memset(&caps, 0, sizeof(caps));
3090 
3091 	if (dm->backlight_caps.caps_valid)
3092 		return;
3093 
3094 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3095 	if (caps.caps_valid) {
3096 		dm->backlight_caps.caps_valid = true;
3097 		if (caps.aux_support)
3098 			return;
3099 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3100 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3101 	} else {
3102 		dm->backlight_caps.min_input_signal =
3103 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3104 		dm->backlight_caps.max_input_signal =
3105 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3106 	}
3107 #else
3108 	if (dm->backlight_caps.aux_support)
3109 		return;
3110 
3111 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3112 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3113 #endif
3114 }
3115 
3116 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3117 				unsigned *min, unsigned *max)
3118 {
3119 	if (!caps)
3120 		return 0;
3121 
3122 	if (caps->aux_support) {
3123 		// Firmware limits are in nits, DC API wants millinits.
3124 		*max = 1000 * caps->aux_max_input_signal;
3125 		*min = 1000 * caps->aux_min_input_signal;
3126 	} else {
3127 		// Firmware limits are 8-bit, PWM control is 16-bit.
3128 		*max = 0x101 * caps->max_input_signal;
3129 		*min = 0x101 * caps->min_input_signal;
3130 	}
3131 	return 1;
3132 }
3133 
3134 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3135 					uint32_t brightness)
3136 {
3137 	unsigned min, max;
3138 
3139 	if (!get_brightness_range(caps, &min, &max))
3140 		return brightness;
3141 
3142 	// Rescale 0..255 to min..max
3143 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3144 				       AMDGPU_MAX_BL_LEVEL);
3145 }
3146 
3147 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3148 				      uint32_t brightness)
3149 {
3150 	unsigned min, max;
3151 
3152 	if (!get_brightness_range(caps, &min, &max))
3153 		return brightness;
3154 
3155 	if (brightness < min)
3156 		return 0;
3157 	// Rescale min..max to 0..255
3158 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3159 				 max - min);
3160 }
3161 
3162 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3163 {
3164 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3165 	struct amdgpu_dm_backlight_caps caps;
3166 	struct dc_link *link = NULL;
3167 	u32 brightness;
3168 	bool rc;
3169 
3170 	amdgpu_dm_update_backlight_caps(dm);
3171 	caps = dm->backlight_caps;
3172 
3173 	link = (struct dc_link *)dm->backlight_link;
3174 
3175 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3176 	// Change brightness based on AUX property
3177 	if (caps.aux_support)
3178 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3179 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3180 	else
3181 		rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3182 
3183 	return rc ? 0 : 1;
3184 }
3185 
3186 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3187 {
3188 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3189 	struct amdgpu_dm_backlight_caps caps;
3190 
3191 	amdgpu_dm_update_backlight_caps(dm);
3192 	caps = dm->backlight_caps;
3193 
3194 	if (caps.aux_support) {
3195 		struct dc_link *link = (struct dc_link *)dm->backlight_link;
3196 		u32 avg, peak;
3197 		bool rc;
3198 
3199 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3200 		if (!rc)
3201 			return bd->props.brightness;
3202 		return convert_brightness_to_user(&caps, avg);
3203 	} else {
3204 		int ret = dc_link_get_backlight_level(dm->backlight_link);
3205 
3206 		if (ret == DC_ERROR_UNEXPECTED)
3207 			return bd->props.brightness;
3208 		return convert_brightness_to_user(&caps, ret);
3209 	}
3210 }
3211 
3212 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3213 	.options = BL_CORE_SUSPENDRESUME,
3214 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3215 	.update_status	= amdgpu_dm_backlight_update_status,
3216 };
3217 
3218 static void
3219 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3220 {
3221 	char bl_name[16];
3222 	struct backlight_properties props = { 0 };
3223 
3224 	amdgpu_dm_update_backlight_caps(dm);
3225 
3226 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3227 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3228 	props.type = BACKLIGHT_RAW;
3229 
3230 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3231 		 adev_to_drm(dm->adev)->primary->index);
3232 
3233 	dm->backlight_dev = backlight_device_register(bl_name,
3234 						      adev_to_drm(dm->adev)->dev,
3235 						      dm,
3236 						      &amdgpu_dm_backlight_ops,
3237 						      &props);
3238 
3239 	if (IS_ERR(dm->backlight_dev))
3240 		DRM_ERROR("DM: Backlight registration failed!\n");
3241 	else
3242 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3243 }
3244 
3245 #endif
3246 
3247 static int initialize_plane(struct amdgpu_display_manager *dm,
3248 			    struct amdgpu_mode_info *mode_info, int plane_id,
3249 			    enum drm_plane_type plane_type,
3250 			    const struct dc_plane_cap *plane_cap)
3251 {
3252 	struct drm_plane *plane;
3253 	unsigned long possible_crtcs;
3254 	int ret = 0;
3255 
3256 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3257 	if (!plane) {
3258 		DRM_ERROR("KMS: Failed to allocate plane\n");
3259 		return -ENOMEM;
3260 	}
3261 	plane->type = plane_type;
3262 
3263 	/*
3264 	 * HACK: IGT tests expect that the primary plane for a CRTC
3265 	 * can only have one possible CRTC. Only expose support for
3266 	 * any CRTC if they're not going to be used as a primary plane
3267 	 * for a CRTC - like overlay or underlay planes.
3268 	 */
3269 	possible_crtcs = 1 << plane_id;
3270 	if (plane_id >= dm->dc->caps.max_streams)
3271 		possible_crtcs = 0xff;
3272 
3273 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3274 
3275 	if (ret) {
3276 		DRM_ERROR("KMS: Failed to initialize plane\n");
3277 		kfree(plane);
3278 		return ret;
3279 	}
3280 
3281 	if (mode_info)
3282 		mode_info->planes[plane_id] = plane;
3283 
3284 	return ret;
3285 }
3286 
3287 
3288 static void register_backlight_device(struct amdgpu_display_manager *dm,
3289 				      struct dc_link *link)
3290 {
3291 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3292 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3293 
3294 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3295 	    link->type != dc_connection_none) {
3296 		/*
3297 		 * Event if registration failed, we should continue with
3298 		 * DM initialization because not having a backlight control
3299 		 * is better then a black screen.
3300 		 */
3301 		amdgpu_dm_register_backlight_device(dm);
3302 
3303 		if (dm->backlight_dev)
3304 			dm->backlight_link = link;
3305 	}
3306 #endif
3307 }
3308 
3309 
3310 /*
3311  * In this architecture, the association
3312  * connector -> encoder -> crtc
3313  * id not really requried. The crtc and connector will hold the
3314  * display_index as an abstraction to use with DAL component
3315  *
3316  * Returns 0 on success
3317  */
3318 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3319 {
3320 	struct amdgpu_display_manager *dm = &adev->dm;
3321 	int32_t i;
3322 	struct amdgpu_dm_connector *aconnector = NULL;
3323 	struct amdgpu_encoder *aencoder = NULL;
3324 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3325 	uint32_t link_cnt;
3326 	int32_t primary_planes;
3327 	enum dc_connection_type new_connection_type = dc_connection_none;
3328 	const struct dc_plane_cap *plane;
3329 
3330 	dm->display_indexes_num = dm->dc->caps.max_streams;
3331 	/* Update the actual used number of crtc */
3332 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3333 
3334 	link_cnt = dm->dc->caps.max_links;
3335 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3336 		DRM_ERROR("DM: Failed to initialize mode config\n");
3337 		return -EINVAL;
3338 	}
3339 
3340 	/* There is one primary plane per CRTC */
3341 	primary_planes = dm->dc->caps.max_streams;
3342 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3343 
3344 	/*
3345 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3346 	 * Order is reversed to match iteration order in atomic check.
3347 	 */
3348 	for (i = (primary_planes - 1); i >= 0; i--) {
3349 		plane = &dm->dc->caps.planes[i];
3350 
3351 		if (initialize_plane(dm, mode_info, i,
3352 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3353 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3354 			goto fail;
3355 		}
3356 	}
3357 
3358 	/*
3359 	 * Initialize overlay planes, index starting after primary planes.
3360 	 * These planes have a higher DRM index than the primary planes since
3361 	 * they should be considered as having a higher z-order.
3362 	 * Order is reversed to match iteration order in atomic check.
3363 	 *
3364 	 * Only support DCN for now, and only expose one so we don't encourage
3365 	 * userspace to use up all the pipes.
3366 	 */
3367 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3368 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3369 
3370 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3371 			continue;
3372 
3373 		if (!plane->blends_with_above || !plane->blends_with_below)
3374 			continue;
3375 
3376 		if (!plane->pixel_format_support.argb8888)
3377 			continue;
3378 
3379 		if (initialize_plane(dm, NULL, primary_planes + i,
3380 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3381 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3382 			goto fail;
3383 		}
3384 
3385 		/* Only create one overlay plane. */
3386 		break;
3387 	}
3388 
3389 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3390 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3391 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3392 			goto fail;
3393 		}
3394 
3395 	/* loops over all connectors on the board */
3396 	for (i = 0; i < link_cnt; i++) {
3397 		struct dc_link *link = NULL;
3398 
3399 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3400 			DRM_ERROR(
3401 				"KMS: Cannot support more than %d display indexes\n",
3402 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3403 			continue;
3404 		}
3405 
3406 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3407 		if (!aconnector)
3408 			goto fail;
3409 
3410 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3411 		if (!aencoder)
3412 			goto fail;
3413 
3414 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3415 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3416 			goto fail;
3417 		}
3418 
3419 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3420 			DRM_ERROR("KMS: Failed to initialize connector\n");
3421 			goto fail;
3422 		}
3423 
3424 		link = dc_get_link_at_index(dm->dc, i);
3425 
3426 		if (!dc_link_detect_sink(link, &new_connection_type))
3427 			DRM_ERROR("KMS: Failed to detect connector\n");
3428 
3429 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3430 			emulated_link_detect(link);
3431 			amdgpu_dm_update_connector_after_detect(aconnector);
3432 
3433 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3434 			amdgpu_dm_update_connector_after_detect(aconnector);
3435 			register_backlight_device(dm, link);
3436 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3437 				amdgpu_dm_set_psr_caps(link);
3438 		}
3439 
3440 
3441 	}
3442 
3443 	/* Software is initialized. Now we can register interrupt handlers. */
3444 	switch (adev->asic_type) {
3445 #if defined(CONFIG_DRM_AMD_DC_SI)
3446 	case CHIP_TAHITI:
3447 	case CHIP_PITCAIRN:
3448 	case CHIP_VERDE:
3449 	case CHIP_OLAND:
3450 		if (dce60_register_irq_handlers(dm->adev)) {
3451 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3452 			goto fail;
3453 		}
3454 		break;
3455 #endif
3456 	case CHIP_BONAIRE:
3457 	case CHIP_HAWAII:
3458 	case CHIP_KAVERI:
3459 	case CHIP_KABINI:
3460 	case CHIP_MULLINS:
3461 	case CHIP_TONGA:
3462 	case CHIP_FIJI:
3463 	case CHIP_CARRIZO:
3464 	case CHIP_STONEY:
3465 	case CHIP_POLARIS11:
3466 	case CHIP_POLARIS10:
3467 	case CHIP_POLARIS12:
3468 	case CHIP_VEGAM:
3469 	case CHIP_VEGA10:
3470 	case CHIP_VEGA12:
3471 	case CHIP_VEGA20:
3472 		if (dce110_register_irq_handlers(dm->adev)) {
3473 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3474 			goto fail;
3475 		}
3476 		break;
3477 #if defined(CONFIG_DRM_AMD_DC_DCN)
3478 	case CHIP_RAVEN:
3479 	case CHIP_NAVI12:
3480 	case CHIP_NAVI10:
3481 	case CHIP_NAVI14:
3482 	case CHIP_RENOIR:
3483 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3484 	case CHIP_SIENNA_CICHLID:
3485 	case CHIP_NAVY_FLOUNDER:
3486 #endif
3487 		if (dcn10_register_irq_handlers(dm->adev)) {
3488 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3489 			goto fail;
3490 		}
3491 		break;
3492 #endif
3493 	default:
3494 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3495 		goto fail;
3496 	}
3497 
3498 	return 0;
3499 fail:
3500 	kfree(aencoder);
3501 	kfree(aconnector);
3502 
3503 	return -EINVAL;
3504 }
3505 
3506 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3507 {
3508 	drm_mode_config_cleanup(dm->ddev);
3509 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3510 	return;
3511 }
3512 
3513 /******************************************************************************
3514  * amdgpu_display_funcs functions
3515  *****************************************************************************/
3516 
3517 /*
3518  * dm_bandwidth_update - program display watermarks
3519  *
3520  * @adev: amdgpu_device pointer
3521  *
3522  * Calculate and program the display watermarks and line buffer allocation.
3523  */
3524 static void dm_bandwidth_update(struct amdgpu_device *adev)
3525 {
3526 	/* TODO: implement later */
3527 }
3528 
3529 static const struct amdgpu_display_funcs dm_display_funcs = {
3530 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3531 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3532 	.backlight_set_level = NULL, /* never called for DC */
3533 	.backlight_get_level = NULL, /* never called for DC */
3534 	.hpd_sense = NULL,/* called unconditionally */
3535 	.hpd_set_polarity = NULL, /* called unconditionally */
3536 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3537 	.page_flip_get_scanoutpos =
3538 		dm_crtc_get_scanoutpos,/* called unconditionally */
3539 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3540 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3541 };
3542 
3543 #if defined(CONFIG_DEBUG_KERNEL_DC)
3544 
3545 static ssize_t s3_debug_store(struct device *device,
3546 			      struct device_attribute *attr,
3547 			      const char *buf,
3548 			      size_t count)
3549 {
3550 	int ret;
3551 	int s3_state;
3552 	struct drm_device *drm_dev = dev_get_drvdata(device);
3553 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3554 
3555 	ret = kstrtoint(buf, 0, &s3_state);
3556 
3557 	if (ret == 0) {
3558 		if (s3_state) {
3559 			dm_resume(adev);
3560 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3561 		} else
3562 			dm_suspend(adev);
3563 	}
3564 
3565 	return ret == 0 ? count : 0;
3566 }
3567 
3568 DEVICE_ATTR_WO(s3_debug);
3569 
3570 #endif
3571 
3572 static int dm_early_init(void *handle)
3573 {
3574 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3575 
3576 	switch (adev->asic_type) {
3577 #if defined(CONFIG_DRM_AMD_DC_SI)
3578 	case CHIP_TAHITI:
3579 	case CHIP_PITCAIRN:
3580 	case CHIP_VERDE:
3581 		adev->mode_info.num_crtc = 6;
3582 		adev->mode_info.num_hpd = 6;
3583 		adev->mode_info.num_dig = 6;
3584 		break;
3585 	case CHIP_OLAND:
3586 		adev->mode_info.num_crtc = 2;
3587 		adev->mode_info.num_hpd = 2;
3588 		adev->mode_info.num_dig = 2;
3589 		break;
3590 #endif
3591 	case CHIP_BONAIRE:
3592 	case CHIP_HAWAII:
3593 		adev->mode_info.num_crtc = 6;
3594 		adev->mode_info.num_hpd = 6;
3595 		adev->mode_info.num_dig = 6;
3596 		break;
3597 	case CHIP_KAVERI:
3598 		adev->mode_info.num_crtc = 4;
3599 		adev->mode_info.num_hpd = 6;
3600 		adev->mode_info.num_dig = 7;
3601 		break;
3602 	case CHIP_KABINI:
3603 	case CHIP_MULLINS:
3604 		adev->mode_info.num_crtc = 2;
3605 		adev->mode_info.num_hpd = 6;
3606 		adev->mode_info.num_dig = 6;
3607 		break;
3608 	case CHIP_FIJI:
3609 	case CHIP_TONGA:
3610 		adev->mode_info.num_crtc = 6;
3611 		adev->mode_info.num_hpd = 6;
3612 		adev->mode_info.num_dig = 7;
3613 		break;
3614 	case CHIP_CARRIZO:
3615 		adev->mode_info.num_crtc = 3;
3616 		adev->mode_info.num_hpd = 6;
3617 		adev->mode_info.num_dig = 9;
3618 		break;
3619 	case CHIP_STONEY:
3620 		adev->mode_info.num_crtc = 2;
3621 		adev->mode_info.num_hpd = 6;
3622 		adev->mode_info.num_dig = 9;
3623 		break;
3624 	case CHIP_POLARIS11:
3625 	case CHIP_POLARIS12:
3626 		adev->mode_info.num_crtc = 5;
3627 		adev->mode_info.num_hpd = 5;
3628 		adev->mode_info.num_dig = 5;
3629 		break;
3630 	case CHIP_POLARIS10:
3631 	case CHIP_VEGAM:
3632 		adev->mode_info.num_crtc = 6;
3633 		adev->mode_info.num_hpd = 6;
3634 		adev->mode_info.num_dig = 6;
3635 		break;
3636 	case CHIP_VEGA10:
3637 	case CHIP_VEGA12:
3638 	case CHIP_VEGA20:
3639 		adev->mode_info.num_crtc = 6;
3640 		adev->mode_info.num_hpd = 6;
3641 		adev->mode_info.num_dig = 6;
3642 		break;
3643 #if defined(CONFIG_DRM_AMD_DC_DCN)
3644 	case CHIP_RAVEN:
3645 		adev->mode_info.num_crtc = 4;
3646 		adev->mode_info.num_hpd = 4;
3647 		adev->mode_info.num_dig = 4;
3648 		break;
3649 #endif
3650 	case CHIP_NAVI10:
3651 	case CHIP_NAVI12:
3652 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3653 	case CHIP_SIENNA_CICHLID:
3654 	case CHIP_NAVY_FLOUNDER:
3655 #endif
3656 		adev->mode_info.num_crtc = 6;
3657 		adev->mode_info.num_hpd = 6;
3658 		adev->mode_info.num_dig = 6;
3659 		break;
3660 	case CHIP_NAVI14:
3661 		adev->mode_info.num_crtc = 5;
3662 		adev->mode_info.num_hpd = 5;
3663 		adev->mode_info.num_dig = 5;
3664 		break;
3665 	case CHIP_RENOIR:
3666 		adev->mode_info.num_crtc = 4;
3667 		adev->mode_info.num_hpd = 4;
3668 		adev->mode_info.num_dig = 4;
3669 		break;
3670 	default:
3671 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3672 		return -EINVAL;
3673 	}
3674 
3675 	amdgpu_dm_set_irq_funcs(adev);
3676 
3677 	if (adev->mode_info.funcs == NULL)
3678 		adev->mode_info.funcs = &dm_display_funcs;
3679 
3680 	/*
3681 	 * Note: Do NOT change adev->audio_endpt_rreg and
3682 	 * adev->audio_endpt_wreg because they are initialised in
3683 	 * amdgpu_device_init()
3684 	 */
3685 #if defined(CONFIG_DEBUG_KERNEL_DC)
3686 	device_create_file(
3687 		adev_to_drm(adev)->dev,
3688 		&dev_attr_s3_debug);
3689 #endif
3690 
3691 	return 0;
3692 }
3693 
3694 static bool modeset_required(struct drm_crtc_state *crtc_state,
3695 			     struct dc_stream_state *new_stream,
3696 			     struct dc_stream_state *old_stream)
3697 {
3698 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3699 }
3700 
3701 static bool modereset_required(struct drm_crtc_state *crtc_state)
3702 {
3703 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3704 }
3705 
3706 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3707 {
3708 	drm_encoder_cleanup(encoder);
3709 	kfree(encoder);
3710 }
3711 
3712 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3713 	.destroy = amdgpu_dm_encoder_destroy,
3714 };
3715 
3716 
3717 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3718 				struct dc_scaling_info *scaling_info)
3719 {
3720 	int scale_w, scale_h;
3721 
3722 	memset(scaling_info, 0, sizeof(*scaling_info));
3723 
3724 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3725 	scaling_info->src_rect.x = state->src_x >> 16;
3726 	scaling_info->src_rect.y = state->src_y >> 16;
3727 
3728 	/*
3729 	 * For reasons we don't (yet) fully understand a non-zero
3730 	 * src_y coordinate into an NV12 buffer can cause a
3731 	 * system hang. To avoid hangs (and maybe be overly cautious)
3732 	 * let's reject both non-zero src_x and src_y.
3733 	 *
3734 	 * We currently know of only one use-case to reproduce a
3735 	 * scenario with non-zero src_x and src_y for NV12, which
3736 	 * is to gesture the YouTube Android app into full screen
3737 	 * on ChromeOS.
3738 	 */
3739 	if (state->fb &&
3740 	    state->fb->format->format == DRM_FORMAT_NV12 &&
3741 	    (scaling_info->src_rect.x != 0 ||
3742 	     scaling_info->src_rect.y != 0))
3743 		return -EINVAL;
3744 
3745 	/*
3746 	 * For reasons we don't (yet) fully understand a non-zero
3747 	 * src_y coordinate into an NV12 buffer can cause a
3748 	 * system hang. To avoid hangs (and maybe be overly cautious)
3749 	 * let's reject both non-zero src_x and src_y.
3750 	 *
3751 	 * We currently know of only one use-case to reproduce a
3752 	 * scenario with non-zero src_x and src_y for NV12, which
3753 	 * is to gesture the YouTube Android app into full screen
3754 	 * on ChromeOS.
3755 	 */
3756 	if (state->fb &&
3757 	    state->fb->format->format == DRM_FORMAT_NV12 &&
3758 	    (scaling_info->src_rect.x != 0 ||
3759 	     scaling_info->src_rect.y != 0))
3760 		return -EINVAL;
3761 
3762 	scaling_info->src_rect.width = state->src_w >> 16;
3763 	if (scaling_info->src_rect.width == 0)
3764 		return -EINVAL;
3765 
3766 	scaling_info->src_rect.height = state->src_h >> 16;
3767 	if (scaling_info->src_rect.height == 0)
3768 		return -EINVAL;
3769 
3770 	scaling_info->dst_rect.x = state->crtc_x;
3771 	scaling_info->dst_rect.y = state->crtc_y;
3772 
3773 	if (state->crtc_w == 0)
3774 		return -EINVAL;
3775 
3776 	scaling_info->dst_rect.width = state->crtc_w;
3777 
3778 	if (state->crtc_h == 0)
3779 		return -EINVAL;
3780 
3781 	scaling_info->dst_rect.height = state->crtc_h;
3782 
3783 	/* DRM doesn't specify clipping on destination output. */
3784 	scaling_info->clip_rect = scaling_info->dst_rect;
3785 
3786 	/* TODO: Validate scaling per-format with DC plane caps */
3787 	scale_w = scaling_info->dst_rect.width * 1000 /
3788 		  scaling_info->src_rect.width;
3789 
3790 	if (scale_w < 250 || scale_w > 16000)
3791 		return -EINVAL;
3792 
3793 	scale_h = scaling_info->dst_rect.height * 1000 /
3794 		  scaling_info->src_rect.height;
3795 
3796 	if (scale_h < 250 || scale_h > 16000)
3797 		return -EINVAL;
3798 
3799 	/*
3800 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3801 	 * assume reasonable defaults based on the format.
3802 	 */
3803 
3804 	return 0;
3805 }
3806 
3807 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3808 		       uint64_t *tiling_flags, bool *tmz_surface)
3809 {
3810 	struct amdgpu_bo *rbo;
3811 	int r;
3812 
3813 	if (!amdgpu_fb) {
3814 		*tiling_flags = 0;
3815 		*tmz_surface = false;
3816 		return 0;
3817 	}
3818 
3819 	rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3820 	r = amdgpu_bo_reserve(rbo, false);
3821 
3822 	if (unlikely(r)) {
3823 		/* Don't show error message when returning -ERESTARTSYS */
3824 		if (r != -ERESTARTSYS)
3825 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3826 		return r;
3827 	}
3828 
3829 	if (tiling_flags)
3830 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3831 
3832 	if (tmz_surface)
3833 		*tmz_surface = amdgpu_bo_encrypted(rbo);
3834 
3835 	amdgpu_bo_unreserve(rbo);
3836 
3837 	return r;
3838 }
3839 
3840 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3841 {
3842 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3843 
3844 	return offset ? (address + offset * 256) : 0;
3845 }
3846 
3847 static int
3848 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3849 			  const struct amdgpu_framebuffer *afb,
3850 			  const enum surface_pixel_format format,
3851 			  const enum dc_rotation_angle rotation,
3852 			  const struct plane_size *plane_size,
3853 			  const union dc_tiling_info *tiling_info,
3854 			  const uint64_t info,
3855 			  struct dc_plane_dcc_param *dcc,
3856 			  struct dc_plane_address *address,
3857 			  bool force_disable_dcc)
3858 {
3859 	struct dc *dc = adev->dm.dc;
3860 	struct dc_dcc_surface_param input;
3861 	struct dc_surface_dcc_cap output;
3862 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3863 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3864 	uint64_t dcc_address;
3865 
3866 	memset(&input, 0, sizeof(input));
3867 	memset(&output, 0, sizeof(output));
3868 
3869 	if (force_disable_dcc)
3870 		return 0;
3871 
3872 	if (!offset)
3873 		return 0;
3874 
3875 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3876 		return 0;
3877 
3878 	if (!dc->cap_funcs.get_dcc_compression_cap)
3879 		return -EINVAL;
3880 
3881 	input.format = format;
3882 	input.surface_size.width = plane_size->surface_size.width;
3883 	input.surface_size.height = plane_size->surface_size.height;
3884 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3885 
3886 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3887 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3888 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3889 		input.scan = SCAN_DIRECTION_VERTICAL;
3890 
3891 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3892 		return -EINVAL;
3893 
3894 	if (!output.capable)
3895 		return -EINVAL;
3896 
3897 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3898 		return -EINVAL;
3899 
3900 	dcc->enable = 1;
3901 	dcc->meta_pitch =
3902 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3903 	dcc->independent_64b_blks = i64b;
3904 
3905 	dcc_address = get_dcc_address(afb->address, info);
3906 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3907 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3908 
3909 	return 0;
3910 }
3911 
3912 static int
3913 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3914 			     const struct amdgpu_framebuffer *afb,
3915 			     const enum surface_pixel_format format,
3916 			     const enum dc_rotation_angle rotation,
3917 			     const uint64_t tiling_flags,
3918 			     union dc_tiling_info *tiling_info,
3919 			     struct plane_size *plane_size,
3920 			     struct dc_plane_dcc_param *dcc,
3921 			     struct dc_plane_address *address,
3922 			     bool tmz_surface,
3923 			     bool force_disable_dcc)
3924 {
3925 	const struct drm_framebuffer *fb = &afb->base;
3926 	int ret;
3927 
3928 	memset(tiling_info, 0, sizeof(*tiling_info));
3929 	memset(plane_size, 0, sizeof(*plane_size));
3930 	memset(dcc, 0, sizeof(*dcc));
3931 	memset(address, 0, sizeof(*address));
3932 
3933 	address->tmz_surface = tmz_surface;
3934 
3935 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3936 		plane_size->surface_size.x = 0;
3937 		plane_size->surface_size.y = 0;
3938 		plane_size->surface_size.width = fb->width;
3939 		plane_size->surface_size.height = fb->height;
3940 		plane_size->surface_pitch =
3941 			fb->pitches[0] / fb->format->cpp[0];
3942 
3943 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3944 		address->grph.addr.low_part = lower_32_bits(afb->address);
3945 		address->grph.addr.high_part = upper_32_bits(afb->address);
3946 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3947 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3948 
3949 		plane_size->surface_size.x = 0;
3950 		plane_size->surface_size.y = 0;
3951 		plane_size->surface_size.width = fb->width;
3952 		plane_size->surface_size.height = fb->height;
3953 		plane_size->surface_pitch =
3954 			fb->pitches[0] / fb->format->cpp[0];
3955 
3956 		plane_size->chroma_size.x = 0;
3957 		plane_size->chroma_size.y = 0;
3958 		/* TODO: set these based on surface format */
3959 		plane_size->chroma_size.width = fb->width / 2;
3960 		plane_size->chroma_size.height = fb->height / 2;
3961 
3962 		plane_size->chroma_pitch =
3963 			fb->pitches[1] / fb->format->cpp[1];
3964 
3965 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3966 		address->video_progressive.luma_addr.low_part =
3967 			lower_32_bits(afb->address);
3968 		address->video_progressive.luma_addr.high_part =
3969 			upper_32_bits(afb->address);
3970 		address->video_progressive.chroma_addr.low_part =
3971 			lower_32_bits(chroma_addr);
3972 		address->video_progressive.chroma_addr.high_part =
3973 			upper_32_bits(chroma_addr);
3974 	}
3975 
3976 	/* Fill GFX8 params */
3977 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3978 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3979 
3980 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3981 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3982 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3983 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3984 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3985 
3986 		/* XXX fix me for VI */
3987 		tiling_info->gfx8.num_banks = num_banks;
3988 		tiling_info->gfx8.array_mode =
3989 				DC_ARRAY_2D_TILED_THIN1;
3990 		tiling_info->gfx8.tile_split = tile_split;
3991 		tiling_info->gfx8.bank_width = bankw;
3992 		tiling_info->gfx8.bank_height = bankh;
3993 		tiling_info->gfx8.tile_aspect = mtaspect;
3994 		tiling_info->gfx8.tile_mode =
3995 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3996 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3997 			== DC_ARRAY_1D_TILED_THIN1) {
3998 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3999 	}
4000 
4001 	tiling_info->gfx8.pipe_config =
4002 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4003 
4004 	if (adev->asic_type == CHIP_VEGA10 ||
4005 	    adev->asic_type == CHIP_VEGA12 ||
4006 	    adev->asic_type == CHIP_VEGA20 ||
4007 	    adev->asic_type == CHIP_NAVI10 ||
4008 	    adev->asic_type == CHIP_NAVI14 ||
4009 	    adev->asic_type == CHIP_NAVI12 ||
4010 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
4011 		adev->asic_type == CHIP_SIENNA_CICHLID ||
4012 		adev->asic_type == CHIP_NAVY_FLOUNDER ||
4013 #endif
4014 	    adev->asic_type == CHIP_RENOIR ||
4015 	    adev->asic_type == CHIP_RAVEN) {
4016 		/* Fill GFX9 params */
4017 		tiling_info->gfx9.num_pipes =
4018 			adev->gfx.config.gb_addr_config_fields.num_pipes;
4019 		tiling_info->gfx9.num_banks =
4020 			adev->gfx.config.gb_addr_config_fields.num_banks;
4021 		tiling_info->gfx9.pipe_interleave =
4022 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4023 		tiling_info->gfx9.num_shader_engines =
4024 			adev->gfx.config.gb_addr_config_fields.num_se;
4025 		tiling_info->gfx9.max_compressed_frags =
4026 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4027 		tiling_info->gfx9.num_rb_per_se =
4028 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4029 		tiling_info->gfx9.swizzle =
4030 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
4031 		tiling_info->gfx9.shaderEnable = 1;
4032 
4033 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
4034 		if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4035 		    adev->asic_type == CHIP_NAVY_FLOUNDER)
4036 			tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4037 #endif
4038 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
4039 						plane_size, tiling_info,
4040 						tiling_flags, dcc, address,
4041 						force_disable_dcc);
4042 		if (ret)
4043 			return ret;
4044 	}
4045 
4046 	return 0;
4047 }
4048 
4049 static void
4050 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4051 			       bool *per_pixel_alpha, bool *global_alpha,
4052 			       int *global_alpha_value)
4053 {
4054 	*per_pixel_alpha = false;
4055 	*global_alpha = false;
4056 	*global_alpha_value = 0xff;
4057 
4058 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4059 		return;
4060 
4061 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4062 		static const uint32_t alpha_formats[] = {
4063 			DRM_FORMAT_ARGB8888,
4064 			DRM_FORMAT_RGBA8888,
4065 			DRM_FORMAT_ABGR8888,
4066 		};
4067 		uint32_t format = plane_state->fb->format->format;
4068 		unsigned int i;
4069 
4070 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4071 			if (format == alpha_formats[i]) {
4072 				*per_pixel_alpha = true;
4073 				break;
4074 			}
4075 		}
4076 	}
4077 
4078 	if (plane_state->alpha < 0xffff) {
4079 		*global_alpha = true;
4080 		*global_alpha_value = plane_state->alpha >> 8;
4081 	}
4082 }
4083 
4084 static int
4085 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4086 			    const enum surface_pixel_format format,
4087 			    enum dc_color_space *color_space)
4088 {
4089 	bool full_range;
4090 
4091 	*color_space = COLOR_SPACE_SRGB;
4092 
4093 	/* DRM color properties only affect non-RGB formats. */
4094 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4095 		return 0;
4096 
4097 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4098 
4099 	switch (plane_state->color_encoding) {
4100 	case DRM_COLOR_YCBCR_BT601:
4101 		if (full_range)
4102 			*color_space = COLOR_SPACE_YCBCR601;
4103 		else
4104 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4105 		break;
4106 
4107 	case DRM_COLOR_YCBCR_BT709:
4108 		if (full_range)
4109 			*color_space = COLOR_SPACE_YCBCR709;
4110 		else
4111 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4112 		break;
4113 
4114 	case DRM_COLOR_YCBCR_BT2020:
4115 		if (full_range)
4116 			*color_space = COLOR_SPACE_2020_YCBCR;
4117 		else
4118 			return -EINVAL;
4119 		break;
4120 
4121 	default:
4122 		return -EINVAL;
4123 	}
4124 
4125 	return 0;
4126 }
4127 
4128 static int
4129 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4130 			    const struct drm_plane_state *plane_state,
4131 			    const uint64_t tiling_flags,
4132 			    struct dc_plane_info *plane_info,
4133 			    struct dc_plane_address *address,
4134 			    bool tmz_surface,
4135 			    bool force_disable_dcc)
4136 {
4137 	const struct drm_framebuffer *fb = plane_state->fb;
4138 	const struct amdgpu_framebuffer *afb =
4139 		to_amdgpu_framebuffer(plane_state->fb);
4140 	struct drm_format_name_buf format_name;
4141 	int ret;
4142 
4143 	memset(plane_info, 0, sizeof(*plane_info));
4144 
4145 	switch (fb->format->format) {
4146 	case DRM_FORMAT_C8:
4147 		plane_info->format =
4148 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4149 		break;
4150 	case DRM_FORMAT_RGB565:
4151 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4152 		break;
4153 	case DRM_FORMAT_XRGB8888:
4154 	case DRM_FORMAT_ARGB8888:
4155 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4156 		break;
4157 	case DRM_FORMAT_XRGB2101010:
4158 	case DRM_FORMAT_ARGB2101010:
4159 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4160 		break;
4161 	case DRM_FORMAT_XBGR2101010:
4162 	case DRM_FORMAT_ABGR2101010:
4163 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4164 		break;
4165 	case DRM_FORMAT_XBGR8888:
4166 	case DRM_FORMAT_ABGR8888:
4167 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4168 		break;
4169 	case DRM_FORMAT_NV21:
4170 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4171 		break;
4172 	case DRM_FORMAT_NV12:
4173 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4174 		break;
4175 	case DRM_FORMAT_P010:
4176 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4177 		break;
4178 	case DRM_FORMAT_XRGB16161616F:
4179 	case DRM_FORMAT_ARGB16161616F:
4180 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4181 		break;
4182 	case DRM_FORMAT_XBGR16161616F:
4183 	case DRM_FORMAT_ABGR16161616F:
4184 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4185 		break;
4186 	default:
4187 		DRM_ERROR(
4188 			"Unsupported screen format %s\n",
4189 			drm_get_format_name(fb->format->format, &format_name));
4190 		return -EINVAL;
4191 	}
4192 
4193 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4194 	case DRM_MODE_ROTATE_0:
4195 		plane_info->rotation = ROTATION_ANGLE_0;
4196 		break;
4197 	case DRM_MODE_ROTATE_90:
4198 		plane_info->rotation = ROTATION_ANGLE_90;
4199 		break;
4200 	case DRM_MODE_ROTATE_180:
4201 		plane_info->rotation = ROTATION_ANGLE_180;
4202 		break;
4203 	case DRM_MODE_ROTATE_270:
4204 		plane_info->rotation = ROTATION_ANGLE_270;
4205 		break;
4206 	default:
4207 		plane_info->rotation = ROTATION_ANGLE_0;
4208 		break;
4209 	}
4210 
4211 	plane_info->visible = true;
4212 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4213 
4214 	plane_info->layer_index = 0;
4215 
4216 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4217 					  &plane_info->color_space);
4218 	if (ret)
4219 		return ret;
4220 
4221 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4222 					   plane_info->rotation, tiling_flags,
4223 					   &plane_info->tiling_info,
4224 					   &plane_info->plane_size,
4225 					   &plane_info->dcc, address, tmz_surface,
4226 					   force_disable_dcc);
4227 	if (ret)
4228 		return ret;
4229 
4230 	fill_blending_from_plane_state(
4231 		plane_state, &plane_info->per_pixel_alpha,
4232 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4233 
4234 	return 0;
4235 }
4236 
4237 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4238 				    struct dc_plane_state *dc_plane_state,
4239 				    struct drm_plane_state *plane_state,
4240 				    struct drm_crtc_state *crtc_state)
4241 {
4242 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4243 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
4244 	struct dc_scaling_info scaling_info;
4245 	struct dc_plane_info plane_info;
4246 	int ret;
4247 	bool force_disable_dcc = false;
4248 
4249 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4250 	if (ret)
4251 		return ret;
4252 
4253 	dc_plane_state->src_rect = scaling_info.src_rect;
4254 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4255 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4256 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4257 
4258 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4259 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4260 					  dm_plane_state->tiling_flags,
4261 					  &plane_info,
4262 					  &dc_plane_state->address,
4263 					  dm_plane_state->tmz_surface,
4264 					  force_disable_dcc);
4265 	if (ret)
4266 		return ret;
4267 
4268 	dc_plane_state->format = plane_info.format;
4269 	dc_plane_state->color_space = plane_info.color_space;
4270 	dc_plane_state->format = plane_info.format;
4271 	dc_plane_state->plane_size = plane_info.plane_size;
4272 	dc_plane_state->rotation = plane_info.rotation;
4273 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4274 	dc_plane_state->stereo_format = plane_info.stereo_format;
4275 	dc_plane_state->tiling_info = plane_info.tiling_info;
4276 	dc_plane_state->visible = plane_info.visible;
4277 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4278 	dc_plane_state->global_alpha = plane_info.global_alpha;
4279 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4280 	dc_plane_state->dcc = plane_info.dcc;
4281 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4282 
4283 	/*
4284 	 * Always set input transfer function, since plane state is refreshed
4285 	 * every time.
4286 	 */
4287 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4288 	if (ret)
4289 		return ret;
4290 
4291 	return 0;
4292 }
4293 
4294 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4295 					   const struct dm_connector_state *dm_state,
4296 					   struct dc_stream_state *stream)
4297 {
4298 	enum amdgpu_rmx_type rmx_type;
4299 
4300 	struct rect src = { 0 }; /* viewport in composition space*/
4301 	struct rect dst = { 0 }; /* stream addressable area */
4302 
4303 	/* no mode. nothing to be done */
4304 	if (!mode)
4305 		return;
4306 
4307 	/* Full screen scaling by default */
4308 	src.width = mode->hdisplay;
4309 	src.height = mode->vdisplay;
4310 	dst.width = stream->timing.h_addressable;
4311 	dst.height = stream->timing.v_addressable;
4312 
4313 	if (dm_state) {
4314 		rmx_type = dm_state->scaling;
4315 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4316 			if (src.width * dst.height <
4317 					src.height * dst.width) {
4318 				/* height needs less upscaling/more downscaling */
4319 				dst.width = src.width *
4320 						dst.height / src.height;
4321 			} else {
4322 				/* width needs less upscaling/more downscaling */
4323 				dst.height = src.height *
4324 						dst.width / src.width;
4325 			}
4326 		} else if (rmx_type == RMX_CENTER) {
4327 			dst = src;
4328 		}
4329 
4330 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4331 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4332 
4333 		if (dm_state->underscan_enable) {
4334 			dst.x += dm_state->underscan_hborder / 2;
4335 			dst.y += dm_state->underscan_vborder / 2;
4336 			dst.width -= dm_state->underscan_hborder;
4337 			dst.height -= dm_state->underscan_vborder;
4338 		}
4339 	}
4340 
4341 	stream->src = src;
4342 	stream->dst = dst;
4343 
4344 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4345 			dst.x, dst.y, dst.width, dst.height);
4346 
4347 }
4348 
4349 static enum dc_color_depth
4350 convert_color_depth_from_display_info(const struct drm_connector *connector,
4351 				      bool is_y420, int requested_bpc)
4352 {
4353 	uint8_t bpc;
4354 
4355 	if (is_y420) {
4356 		bpc = 8;
4357 
4358 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4359 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4360 			bpc = 16;
4361 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4362 			bpc = 12;
4363 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4364 			bpc = 10;
4365 	} else {
4366 		bpc = (uint8_t)connector->display_info.bpc;
4367 		/* Assume 8 bpc by default if no bpc is specified. */
4368 		bpc = bpc ? bpc : 8;
4369 	}
4370 
4371 	if (requested_bpc > 0) {
4372 		/*
4373 		 * Cap display bpc based on the user requested value.
4374 		 *
4375 		 * The value for state->max_bpc may not correctly updated
4376 		 * depending on when the connector gets added to the state
4377 		 * or if this was called outside of atomic check, so it
4378 		 * can't be used directly.
4379 		 */
4380 		bpc = min_t(u8, bpc, requested_bpc);
4381 
4382 		/* Round down to the nearest even number. */
4383 		bpc = bpc - (bpc & 1);
4384 	}
4385 
4386 	switch (bpc) {
4387 	case 0:
4388 		/*
4389 		 * Temporary Work around, DRM doesn't parse color depth for
4390 		 * EDID revision before 1.4
4391 		 * TODO: Fix edid parsing
4392 		 */
4393 		return COLOR_DEPTH_888;
4394 	case 6:
4395 		return COLOR_DEPTH_666;
4396 	case 8:
4397 		return COLOR_DEPTH_888;
4398 	case 10:
4399 		return COLOR_DEPTH_101010;
4400 	case 12:
4401 		return COLOR_DEPTH_121212;
4402 	case 14:
4403 		return COLOR_DEPTH_141414;
4404 	case 16:
4405 		return COLOR_DEPTH_161616;
4406 	default:
4407 		return COLOR_DEPTH_UNDEFINED;
4408 	}
4409 }
4410 
4411 static enum dc_aspect_ratio
4412 get_aspect_ratio(const struct drm_display_mode *mode_in)
4413 {
4414 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4415 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4416 }
4417 
4418 static enum dc_color_space
4419 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4420 {
4421 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4422 
4423 	switch (dc_crtc_timing->pixel_encoding)	{
4424 	case PIXEL_ENCODING_YCBCR422:
4425 	case PIXEL_ENCODING_YCBCR444:
4426 	case PIXEL_ENCODING_YCBCR420:
4427 	{
4428 		/*
4429 		 * 27030khz is the separation point between HDTV and SDTV
4430 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4431 		 * respectively
4432 		 */
4433 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4434 			if (dc_crtc_timing->flags.Y_ONLY)
4435 				color_space =
4436 					COLOR_SPACE_YCBCR709_LIMITED;
4437 			else
4438 				color_space = COLOR_SPACE_YCBCR709;
4439 		} else {
4440 			if (dc_crtc_timing->flags.Y_ONLY)
4441 				color_space =
4442 					COLOR_SPACE_YCBCR601_LIMITED;
4443 			else
4444 				color_space = COLOR_SPACE_YCBCR601;
4445 		}
4446 
4447 	}
4448 	break;
4449 	case PIXEL_ENCODING_RGB:
4450 		color_space = COLOR_SPACE_SRGB;
4451 		break;
4452 
4453 	default:
4454 		WARN_ON(1);
4455 		break;
4456 	}
4457 
4458 	return color_space;
4459 }
4460 
4461 static bool adjust_colour_depth_from_display_info(
4462 	struct dc_crtc_timing *timing_out,
4463 	const struct drm_display_info *info)
4464 {
4465 	enum dc_color_depth depth = timing_out->display_color_depth;
4466 	int normalized_clk;
4467 	do {
4468 		normalized_clk = timing_out->pix_clk_100hz / 10;
4469 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4470 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4471 			normalized_clk /= 2;
4472 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4473 		switch (depth) {
4474 		case COLOR_DEPTH_888:
4475 			break;
4476 		case COLOR_DEPTH_101010:
4477 			normalized_clk = (normalized_clk * 30) / 24;
4478 			break;
4479 		case COLOR_DEPTH_121212:
4480 			normalized_clk = (normalized_clk * 36) / 24;
4481 			break;
4482 		case COLOR_DEPTH_161616:
4483 			normalized_clk = (normalized_clk * 48) / 24;
4484 			break;
4485 		default:
4486 			/* The above depths are the only ones valid for HDMI. */
4487 			return false;
4488 		}
4489 		if (normalized_clk <= info->max_tmds_clock) {
4490 			timing_out->display_color_depth = depth;
4491 			return true;
4492 		}
4493 	} while (--depth > COLOR_DEPTH_666);
4494 	return false;
4495 }
4496 
4497 static void fill_stream_properties_from_drm_display_mode(
4498 	struct dc_stream_state *stream,
4499 	const struct drm_display_mode *mode_in,
4500 	const struct drm_connector *connector,
4501 	const struct drm_connector_state *connector_state,
4502 	const struct dc_stream_state *old_stream,
4503 	int requested_bpc)
4504 {
4505 	struct dc_crtc_timing *timing_out = &stream->timing;
4506 	const struct drm_display_info *info = &connector->display_info;
4507 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4508 	struct hdmi_vendor_infoframe hv_frame;
4509 	struct hdmi_avi_infoframe avi_frame;
4510 
4511 	memset(&hv_frame, 0, sizeof(hv_frame));
4512 	memset(&avi_frame, 0, sizeof(avi_frame));
4513 
4514 	timing_out->h_border_left = 0;
4515 	timing_out->h_border_right = 0;
4516 	timing_out->v_border_top = 0;
4517 	timing_out->v_border_bottom = 0;
4518 	/* TODO: un-hardcode */
4519 	if (drm_mode_is_420_only(info, mode_in)
4520 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4521 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4522 	else if (drm_mode_is_420_also(info, mode_in)
4523 			&& aconnector->force_yuv420_output)
4524 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4525 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4526 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4527 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4528 	else
4529 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4530 
4531 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4532 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4533 		connector,
4534 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4535 		requested_bpc);
4536 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4537 	timing_out->hdmi_vic = 0;
4538 
4539 	if(old_stream) {
4540 		timing_out->vic = old_stream->timing.vic;
4541 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4542 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4543 	} else {
4544 		timing_out->vic = drm_match_cea_mode(mode_in);
4545 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4546 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4547 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4548 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4549 	}
4550 
4551 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4552 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4553 		timing_out->vic = avi_frame.video_code;
4554 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4555 		timing_out->hdmi_vic = hv_frame.vic;
4556 	}
4557 
4558 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4559 	timing_out->h_total = mode_in->crtc_htotal;
4560 	timing_out->h_sync_width =
4561 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4562 	timing_out->h_front_porch =
4563 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4564 	timing_out->v_total = mode_in->crtc_vtotal;
4565 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4566 	timing_out->v_front_porch =
4567 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4568 	timing_out->v_sync_width =
4569 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4570 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4571 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4572 
4573 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4574 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4575 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4576 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4577 		    drm_mode_is_420_also(info, mode_in) &&
4578 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4579 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4580 			adjust_colour_depth_from_display_info(timing_out, info);
4581 		}
4582 	}
4583 
4584 	stream->output_color_space = get_output_color_space(timing_out);
4585 }
4586 
4587 static void fill_audio_info(struct audio_info *audio_info,
4588 			    const struct drm_connector *drm_connector,
4589 			    const struct dc_sink *dc_sink)
4590 {
4591 	int i = 0;
4592 	int cea_revision = 0;
4593 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4594 
4595 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4596 	audio_info->product_id = edid_caps->product_id;
4597 
4598 	cea_revision = drm_connector->display_info.cea_rev;
4599 
4600 	strscpy(audio_info->display_name,
4601 		edid_caps->display_name,
4602 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4603 
4604 	if (cea_revision >= 3) {
4605 		audio_info->mode_count = edid_caps->audio_mode_count;
4606 
4607 		for (i = 0; i < audio_info->mode_count; ++i) {
4608 			audio_info->modes[i].format_code =
4609 					(enum audio_format_code)
4610 					(edid_caps->audio_modes[i].format_code);
4611 			audio_info->modes[i].channel_count =
4612 					edid_caps->audio_modes[i].channel_count;
4613 			audio_info->modes[i].sample_rates.all =
4614 					edid_caps->audio_modes[i].sample_rate;
4615 			audio_info->modes[i].sample_size =
4616 					edid_caps->audio_modes[i].sample_size;
4617 		}
4618 	}
4619 
4620 	audio_info->flags.all = edid_caps->speaker_flags;
4621 
4622 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4623 	if (drm_connector->latency_present[0]) {
4624 		audio_info->video_latency = drm_connector->video_latency[0];
4625 		audio_info->audio_latency = drm_connector->audio_latency[0];
4626 	}
4627 
4628 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4629 
4630 }
4631 
4632 static void
4633 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4634 				      struct drm_display_mode *dst_mode)
4635 {
4636 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4637 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4638 	dst_mode->crtc_clock = src_mode->crtc_clock;
4639 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4640 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4641 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4642 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4643 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4644 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4645 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4646 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4647 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4648 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4649 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4650 }
4651 
4652 static void
4653 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4654 					const struct drm_display_mode *native_mode,
4655 					bool scale_enabled)
4656 {
4657 	if (scale_enabled) {
4658 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4659 	} else if (native_mode->clock == drm_mode->clock &&
4660 			native_mode->htotal == drm_mode->htotal &&
4661 			native_mode->vtotal == drm_mode->vtotal) {
4662 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4663 	} else {
4664 		/* no scaling nor amdgpu inserted, no need to patch */
4665 	}
4666 }
4667 
4668 static struct dc_sink *
4669 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4670 {
4671 	struct dc_sink_init_data sink_init_data = { 0 };
4672 	struct dc_sink *sink = NULL;
4673 	sink_init_data.link = aconnector->dc_link;
4674 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4675 
4676 	sink = dc_sink_create(&sink_init_data);
4677 	if (!sink) {
4678 		DRM_ERROR("Failed to create sink!\n");
4679 		return NULL;
4680 	}
4681 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4682 
4683 	return sink;
4684 }
4685 
4686 static void set_multisync_trigger_params(
4687 		struct dc_stream_state *stream)
4688 {
4689 	if (stream->triggered_crtc_reset.enabled) {
4690 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4691 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4692 	}
4693 }
4694 
4695 static void set_master_stream(struct dc_stream_state *stream_set[],
4696 			      int stream_count)
4697 {
4698 	int j, highest_rfr = 0, master_stream = 0;
4699 
4700 	for (j = 0;  j < stream_count; j++) {
4701 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4702 			int refresh_rate = 0;
4703 
4704 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4705 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4706 			if (refresh_rate > highest_rfr) {
4707 				highest_rfr = refresh_rate;
4708 				master_stream = j;
4709 			}
4710 		}
4711 	}
4712 	for (j = 0;  j < stream_count; j++) {
4713 		if (stream_set[j])
4714 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4715 	}
4716 }
4717 
4718 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4719 {
4720 	int i = 0;
4721 
4722 	if (context->stream_count < 2)
4723 		return;
4724 	for (i = 0; i < context->stream_count ; i++) {
4725 		if (!context->streams[i])
4726 			continue;
4727 		/*
4728 		 * TODO: add a function to read AMD VSDB bits and set
4729 		 * crtc_sync_master.multi_sync_enabled flag
4730 		 * For now it's set to false
4731 		 */
4732 		set_multisync_trigger_params(context->streams[i]);
4733 	}
4734 	set_master_stream(context->streams, context->stream_count);
4735 }
4736 
4737 static struct dc_stream_state *
4738 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4739 		       const struct drm_display_mode *drm_mode,
4740 		       const struct dm_connector_state *dm_state,
4741 		       const struct dc_stream_state *old_stream,
4742 		       int requested_bpc)
4743 {
4744 	struct drm_display_mode *preferred_mode = NULL;
4745 	struct drm_connector *drm_connector;
4746 	const struct drm_connector_state *con_state =
4747 		dm_state ? &dm_state->base : NULL;
4748 	struct dc_stream_state *stream = NULL;
4749 	struct drm_display_mode mode = *drm_mode;
4750 	bool native_mode_found = false;
4751 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4752 	int mode_refresh;
4753 	int preferred_refresh = 0;
4754 #if defined(CONFIG_DRM_AMD_DC_DCN)
4755 	struct dsc_dec_dpcd_caps dsc_caps;
4756 #endif
4757 	uint32_t link_bandwidth_kbps;
4758 
4759 	struct dc_sink *sink = NULL;
4760 	if (aconnector == NULL) {
4761 		DRM_ERROR("aconnector is NULL!\n");
4762 		return stream;
4763 	}
4764 
4765 	drm_connector = &aconnector->base;
4766 
4767 	if (!aconnector->dc_sink) {
4768 		sink = create_fake_sink(aconnector);
4769 		if (!sink)
4770 			return stream;
4771 	} else {
4772 		sink = aconnector->dc_sink;
4773 		dc_sink_retain(sink);
4774 	}
4775 
4776 	stream = dc_create_stream_for_sink(sink);
4777 
4778 	if (stream == NULL) {
4779 		DRM_ERROR("Failed to create stream for sink!\n");
4780 		goto finish;
4781 	}
4782 
4783 	stream->dm_stream_context = aconnector;
4784 
4785 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4786 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4787 
4788 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4789 		/* Search for preferred mode */
4790 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4791 			native_mode_found = true;
4792 			break;
4793 		}
4794 	}
4795 	if (!native_mode_found)
4796 		preferred_mode = list_first_entry_or_null(
4797 				&aconnector->base.modes,
4798 				struct drm_display_mode,
4799 				head);
4800 
4801 	mode_refresh = drm_mode_vrefresh(&mode);
4802 
4803 	if (preferred_mode == NULL) {
4804 		/*
4805 		 * This may not be an error, the use case is when we have no
4806 		 * usermode calls to reset and set mode upon hotplug. In this
4807 		 * case, we call set mode ourselves to restore the previous mode
4808 		 * and the modelist may not be filled in in time.
4809 		 */
4810 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4811 	} else {
4812 		decide_crtc_timing_for_drm_display_mode(
4813 				&mode, preferred_mode,
4814 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4815 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4816 	}
4817 
4818 	if (!dm_state)
4819 		drm_mode_set_crtcinfo(&mode, 0);
4820 
4821 	/*
4822 	* If scaling is enabled and refresh rate didn't change
4823 	* we copy the vic and polarities of the old timings
4824 	*/
4825 	if (!scale || mode_refresh != preferred_refresh)
4826 		fill_stream_properties_from_drm_display_mode(stream,
4827 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
4828 	else
4829 		fill_stream_properties_from_drm_display_mode(stream,
4830 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
4831 
4832 	stream->timing.flags.DSC = 0;
4833 
4834 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4835 #if defined(CONFIG_DRM_AMD_DC_DCN)
4836 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4837 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4838 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4839 				      &dsc_caps);
4840 #endif
4841 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4842 							     dc_link_get_link_cap(aconnector->dc_link));
4843 
4844 #if defined(CONFIG_DRM_AMD_DC_DCN)
4845 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
4846 			/* Set DSC policy according to dsc_clock_en */
4847 			dc_dsc_policy_set_enable_dsc_when_not_needed(
4848 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
4849 
4850 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4851 						  &dsc_caps,
4852 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4853 						  link_bandwidth_kbps,
4854 						  &stream->timing,
4855 						  &stream->timing.dsc_cfg))
4856 				stream->timing.flags.DSC = 1;
4857 			/* Overwrite the stream flag if DSC is enabled through debugfs */
4858 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
4859 				stream->timing.flags.DSC = 1;
4860 
4861 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
4862 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
4863 
4864 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
4865 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
4866 
4867 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4868 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
4869 		}
4870 #endif
4871 	}
4872 
4873 	update_stream_scaling_settings(&mode, dm_state, stream);
4874 
4875 	fill_audio_info(
4876 		&stream->audio_info,
4877 		drm_connector,
4878 		sink);
4879 
4880 	update_stream_signal(stream, sink);
4881 
4882 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4883 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
4884 
4885 	if (stream->link->psr_settings.psr_feature_enabled) {
4886 		//
4887 		// should decide stream support vsc sdp colorimetry capability
4888 		// before building vsc info packet
4889 		//
4890 		stream->use_vsc_sdp_for_colorimetry = false;
4891 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4892 			stream->use_vsc_sdp_for_colorimetry =
4893 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4894 		} else {
4895 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4896 				stream->use_vsc_sdp_for_colorimetry = true;
4897 		}
4898 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4899 	}
4900 finish:
4901 	dc_sink_release(sink);
4902 
4903 	return stream;
4904 }
4905 
4906 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4907 {
4908 	drm_crtc_cleanup(crtc);
4909 	kfree(crtc);
4910 }
4911 
4912 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4913 				  struct drm_crtc_state *state)
4914 {
4915 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4916 
4917 	/* TODO Destroy dc_stream objects are stream object is flattened */
4918 	if (cur->stream)
4919 		dc_stream_release(cur->stream);
4920 
4921 
4922 	__drm_atomic_helper_crtc_destroy_state(state);
4923 
4924 
4925 	kfree(state);
4926 }
4927 
4928 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4929 {
4930 	struct dm_crtc_state *state;
4931 
4932 	if (crtc->state)
4933 		dm_crtc_destroy_state(crtc, crtc->state);
4934 
4935 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4936 	if (WARN_ON(!state))
4937 		return;
4938 
4939 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
4940 }
4941 
4942 static struct drm_crtc_state *
4943 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4944 {
4945 	struct dm_crtc_state *state, *cur;
4946 
4947 	cur = to_dm_crtc_state(crtc->state);
4948 
4949 	if (WARN_ON(!crtc->state))
4950 		return NULL;
4951 
4952 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4953 	if (!state)
4954 		return NULL;
4955 
4956 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4957 
4958 	if (cur->stream) {
4959 		state->stream = cur->stream;
4960 		dc_stream_retain(state->stream);
4961 	}
4962 
4963 	state->active_planes = cur->active_planes;
4964 	state->vrr_infopacket = cur->vrr_infopacket;
4965 	state->abm_level = cur->abm_level;
4966 	state->vrr_supported = cur->vrr_supported;
4967 	state->freesync_config = cur->freesync_config;
4968 	state->crc_src = cur->crc_src;
4969 	state->cm_has_degamma = cur->cm_has_degamma;
4970 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4971 
4972 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4973 
4974 	return &state->base;
4975 }
4976 
4977 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4978 {
4979 	enum dc_irq_source irq_source;
4980 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4981 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4982 	int rc;
4983 
4984 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4985 
4986 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4987 
4988 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4989 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4990 	return rc;
4991 }
4992 
4993 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4994 {
4995 	enum dc_irq_source irq_source;
4996 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4997 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4998 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4999 	int rc = 0;
5000 
5001 	if (enable) {
5002 		/* vblank irq on -> Only need vupdate irq in vrr mode */
5003 		if (amdgpu_dm_vrr_active(acrtc_state))
5004 			rc = dm_set_vupdate_irq(crtc, true);
5005 	} else {
5006 		/* vblank irq off -> vupdate irq off */
5007 		rc = dm_set_vupdate_irq(crtc, false);
5008 	}
5009 
5010 	if (rc)
5011 		return rc;
5012 
5013 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5014 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5015 }
5016 
5017 static int dm_enable_vblank(struct drm_crtc *crtc)
5018 {
5019 	return dm_set_vblank(crtc, true);
5020 }
5021 
5022 static void dm_disable_vblank(struct drm_crtc *crtc)
5023 {
5024 	dm_set_vblank(crtc, false);
5025 }
5026 
5027 /* Implemented only the options currently availible for the driver */
5028 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5029 	.reset = dm_crtc_reset_state,
5030 	.destroy = amdgpu_dm_crtc_destroy,
5031 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
5032 	.set_config = drm_atomic_helper_set_config,
5033 	.page_flip = drm_atomic_helper_page_flip,
5034 	.atomic_duplicate_state = dm_crtc_duplicate_state,
5035 	.atomic_destroy_state = dm_crtc_destroy_state,
5036 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
5037 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5038 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5039 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
5040 	.enable_vblank = dm_enable_vblank,
5041 	.disable_vblank = dm_disable_vblank,
5042 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5043 };
5044 
5045 static enum drm_connector_status
5046 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5047 {
5048 	bool connected;
5049 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5050 
5051 	/*
5052 	 * Notes:
5053 	 * 1. This interface is NOT called in context of HPD irq.
5054 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5055 	 * makes it a bad place for *any* MST-related activity.
5056 	 */
5057 
5058 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5059 	    !aconnector->fake_enable)
5060 		connected = (aconnector->dc_sink != NULL);
5061 	else
5062 		connected = (aconnector->base.force == DRM_FORCE_ON);
5063 
5064 	update_subconnector_property(aconnector);
5065 
5066 	return (connected ? connector_status_connected :
5067 			connector_status_disconnected);
5068 }
5069 
5070 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5071 					    struct drm_connector_state *connector_state,
5072 					    struct drm_property *property,
5073 					    uint64_t val)
5074 {
5075 	struct drm_device *dev = connector->dev;
5076 	struct amdgpu_device *adev = drm_to_adev(dev);
5077 	struct dm_connector_state *dm_old_state =
5078 		to_dm_connector_state(connector->state);
5079 	struct dm_connector_state *dm_new_state =
5080 		to_dm_connector_state(connector_state);
5081 
5082 	int ret = -EINVAL;
5083 
5084 	if (property == dev->mode_config.scaling_mode_property) {
5085 		enum amdgpu_rmx_type rmx_type;
5086 
5087 		switch (val) {
5088 		case DRM_MODE_SCALE_CENTER:
5089 			rmx_type = RMX_CENTER;
5090 			break;
5091 		case DRM_MODE_SCALE_ASPECT:
5092 			rmx_type = RMX_ASPECT;
5093 			break;
5094 		case DRM_MODE_SCALE_FULLSCREEN:
5095 			rmx_type = RMX_FULL;
5096 			break;
5097 		case DRM_MODE_SCALE_NONE:
5098 		default:
5099 			rmx_type = RMX_OFF;
5100 			break;
5101 		}
5102 
5103 		if (dm_old_state->scaling == rmx_type)
5104 			return 0;
5105 
5106 		dm_new_state->scaling = rmx_type;
5107 		ret = 0;
5108 	} else if (property == adev->mode_info.underscan_hborder_property) {
5109 		dm_new_state->underscan_hborder = val;
5110 		ret = 0;
5111 	} else if (property == adev->mode_info.underscan_vborder_property) {
5112 		dm_new_state->underscan_vborder = val;
5113 		ret = 0;
5114 	} else if (property == adev->mode_info.underscan_property) {
5115 		dm_new_state->underscan_enable = val;
5116 		ret = 0;
5117 	} else if (property == adev->mode_info.abm_level_property) {
5118 		dm_new_state->abm_level = val;
5119 		ret = 0;
5120 	}
5121 
5122 	return ret;
5123 }
5124 
5125 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5126 					    const struct drm_connector_state *state,
5127 					    struct drm_property *property,
5128 					    uint64_t *val)
5129 {
5130 	struct drm_device *dev = connector->dev;
5131 	struct amdgpu_device *adev = drm_to_adev(dev);
5132 	struct dm_connector_state *dm_state =
5133 		to_dm_connector_state(state);
5134 	int ret = -EINVAL;
5135 
5136 	if (property == dev->mode_config.scaling_mode_property) {
5137 		switch (dm_state->scaling) {
5138 		case RMX_CENTER:
5139 			*val = DRM_MODE_SCALE_CENTER;
5140 			break;
5141 		case RMX_ASPECT:
5142 			*val = DRM_MODE_SCALE_ASPECT;
5143 			break;
5144 		case RMX_FULL:
5145 			*val = DRM_MODE_SCALE_FULLSCREEN;
5146 			break;
5147 		case RMX_OFF:
5148 		default:
5149 			*val = DRM_MODE_SCALE_NONE;
5150 			break;
5151 		}
5152 		ret = 0;
5153 	} else if (property == adev->mode_info.underscan_hborder_property) {
5154 		*val = dm_state->underscan_hborder;
5155 		ret = 0;
5156 	} else if (property == adev->mode_info.underscan_vborder_property) {
5157 		*val = dm_state->underscan_vborder;
5158 		ret = 0;
5159 	} else if (property == adev->mode_info.underscan_property) {
5160 		*val = dm_state->underscan_enable;
5161 		ret = 0;
5162 	} else if (property == adev->mode_info.abm_level_property) {
5163 		*val = dm_state->abm_level;
5164 		ret = 0;
5165 	}
5166 
5167 	return ret;
5168 }
5169 
5170 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5171 {
5172 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5173 
5174 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5175 }
5176 
5177 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5178 {
5179 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5180 	const struct dc_link *link = aconnector->dc_link;
5181 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5182 	struct amdgpu_display_manager *dm = &adev->dm;
5183 
5184 	/*
5185 	 * Call only if mst_mgr was iniitalized before since it's not done
5186 	 * for all connector types.
5187 	 */
5188 	if (aconnector->mst_mgr.dev)
5189 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5190 
5191 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5192 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5193 
5194 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5195 	    link->type != dc_connection_none &&
5196 	    dm->backlight_dev) {
5197 		backlight_device_unregister(dm->backlight_dev);
5198 		dm->backlight_dev = NULL;
5199 	}
5200 #endif
5201 
5202 	if (aconnector->dc_em_sink)
5203 		dc_sink_release(aconnector->dc_em_sink);
5204 	aconnector->dc_em_sink = NULL;
5205 	if (aconnector->dc_sink)
5206 		dc_sink_release(aconnector->dc_sink);
5207 	aconnector->dc_sink = NULL;
5208 
5209 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5210 	drm_connector_unregister(connector);
5211 	drm_connector_cleanup(connector);
5212 	if (aconnector->i2c) {
5213 		i2c_del_adapter(&aconnector->i2c->base);
5214 		kfree(aconnector->i2c);
5215 	}
5216 	kfree(aconnector->dm_dp_aux.aux.name);
5217 
5218 	kfree(connector);
5219 }
5220 
5221 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5222 {
5223 	struct dm_connector_state *state =
5224 		to_dm_connector_state(connector->state);
5225 
5226 	if (connector->state)
5227 		__drm_atomic_helper_connector_destroy_state(connector->state);
5228 
5229 	kfree(state);
5230 
5231 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5232 
5233 	if (state) {
5234 		state->scaling = RMX_OFF;
5235 		state->underscan_enable = false;
5236 		state->underscan_hborder = 0;
5237 		state->underscan_vborder = 0;
5238 		state->base.max_requested_bpc = 8;
5239 		state->vcpi_slots = 0;
5240 		state->pbn = 0;
5241 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5242 			state->abm_level = amdgpu_dm_abm_level;
5243 
5244 		__drm_atomic_helper_connector_reset(connector, &state->base);
5245 	}
5246 }
5247 
5248 struct drm_connector_state *
5249 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5250 {
5251 	struct dm_connector_state *state =
5252 		to_dm_connector_state(connector->state);
5253 
5254 	struct dm_connector_state *new_state =
5255 			kmemdup(state, sizeof(*state), GFP_KERNEL);
5256 
5257 	if (!new_state)
5258 		return NULL;
5259 
5260 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5261 
5262 	new_state->freesync_capable = state->freesync_capable;
5263 	new_state->abm_level = state->abm_level;
5264 	new_state->scaling = state->scaling;
5265 	new_state->underscan_enable = state->underscan_enable;
5266 	new_state->underscan_hborder = state->underscan_hborder;
5267 	new_state->underscan_vborder = state->underscan_vborder;
5268 	new_state->vcpi_slots = state->vcpi_slots;
5269 	new_state->pbn = state->pbn;
5270 	return &new_state->base;
5271 }
5272 
5273 static int
5274 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5275 {
5276 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5277 		to_amdgpu_dm_connector(connector);
5278 	int r;
5279 
5280 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5281 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5282 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5283 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5284 		if (r)
5285 			return r;
5286 	}
5287 
5288 #if defined(CONFIG_DEBUG_FS)
5289 	connector_debugfs_init(amdgpu_dm_connector);
5290 #endif
5291 
5292 	return 0;
5293 }
5294 
5295 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5296 	.reset = amdgpu_dm_connector_funcs_reset,
5297 	.detect = amdgpu_dm_connector_detect,
5298 	.fill_modes = drm_helper_probe_single_connector_modes,
5299 	.destroy = amdgpu_dm_connector_destroy,
5300 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5301 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5302 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5303 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5304 	.late_register = amdgpu_dm_connector_late_register,
5305 	.early_unregister = amdgpu_dm_connector_unregister
5306 };
5307 
5308 static int get_modes(struct drm_connector *connector)
5309 {
5310 	return amdgpu_dm_connector_get_modes(connector);
5311 }
5312 
5313 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5314 {
5315 	struct dc_sink_init_data init_params = {
5316 			.link = aconnector->dc_link,
5317 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5318 	};
5319 	struct edid *edid;
5320 
5321 	if (!aconnector->base.edid_blob_ptr) {
5322 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5323 				aconnector->base.name);
5324 
5325 		aconnector->base.force = DRM_FORCE_OFF;
5326 		aconnector->base.override_edid = false;
5327 		return;
5328 	}
5329 
5330 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5331 
5332 	aconnector->edid = edid;
5333 
5334 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5335 		aconnector->dc_link,
5336 		(uint8_t *)edid,
5337 		(edid->extensions + 1) * EDID_LENGTH,
5338 		&init_params);
5339 
5340 	if (aconnector->base.force == DRM_FORCE_ON) {
5341 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5342 		aconnector->dc_link->local_sink :
5343 		aconnector->dc_em_sink;
5344 		dc_sink_retain(aconnector->dc_sink);
5345 	}
5346 }
5347 
5348 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5349 {
5350 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5351 
5352 	/*
5353 	 * In case of headless boot with force on for DP managed connector
5354 	 * Those settings have to be != 0 to get initial modeset
5355 	 */
5356 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5357 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5358 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5359 	}
5360 
5361 
5362 	aconnector->base.override_edid = true;
5363 	create_eml_sink(aconnector);
5364 }
5365 
5366 static struct dc_stream_state *
5367 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5368 				const struct drm_display_mode *drm_mode,
5369 				const struct dm_connector_state *dm_state,
5370 				const struct dc_stream_state *old_stream)
5371 {
5372 	struct drm_connector *connector = &aconnector->base;
5373 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5374 	struct dc_stream_state *stream;
5375 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5376 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5377 	enum dc_status dc_result = DC_OK;
5378 
5379 	do {
5380 		stream = create_stream_for_sink(aconnector, drm_mode,
5381 						dm_state, old_stream,
5382 						requested_bpc);
5383 		if (stream == NULL) {
5384 			DRM_ERROR("Failed to create stream for sink!\n");
5385 			break;
5386 		}
5387 
5388 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5389 
5390 		if (dc_result != DC_OK) {
5391 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5392 				      drm_mode->hdisplay,
5393 				      drm_mode->vdisplay,
5394 				      drm_mode->clock,
5395 				      dc_result,
5396 				      dc_status_to_str(dc_result));
5397 
5398 			dc_stream_release(stream);
5399 			stream = NULL;
5400 			requested_bpc -= 2; /* lower bpc to retry validation */
5401 		}
5402 
5403 	} while (stream == NULL && requested_bpc >= 6);
5404 
5405 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
5406 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
5407 
5408 		aconnector->force_yuv420_output = true;
5409 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
5410 						dm_state, old_stream);
5411 		aconnector->force_yuv420_output = false;
5412 	}
5413 
5414 	return stream;
5415 }
5416 
5417 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5418 				   struct drm_display_mode *mode)
5419 {
5420 	int result = MODE_ERROR;
5421 	struct dc_sink *dc_sink;
5422 	/* TODO: Unhardcode stream count */
5423 	struct dc_stream_state *stream;
5424 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5425 
5426 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5427 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5428 		return result;
5429 
5430 	/*
5431 	 * Only run this the first time mode_valid is called to initilialize
5432 	 * EDID mgmt
5433 	 */
5434 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5435 		!aconnector->dc_em_sink)
5436 		handle_edid_mgmt(aconnector);
5437 
5438 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5439 
5440 	if (dc_sink == NULL) {
5441 		DRM_ERROR("dc_sink is NULL!\n");
5442 		goto fail;
5443 	}
5444 
5445 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5446 	if (stream) {
5447 		dc_stream_release(stream);
5448 		result = MODE_OK;
5449 	}
5450 
5451 fail:
5452 	/* TODO: error handling*/
5453 	return result;
5454 }
5455 
5456 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5457 				struct dc_info_packet *out)
5458 {
5459 	struct hdmi_drm_infoframe frame;
5460 	unsigned char buf[30]; /* 26 + 4 */
5461 	ssize_t len;
5462 	int ret, i;
5463 
5464 	memset(out, 0, sizeof(*out));
5465 
5466 	if (!state->hdr_output_metadata)
5467 		return 0;
5468 
5469 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5470 	if (ret)
5471 		return ret;
5472 
5473 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5474 	if (len < 0)
5475 		return (int)len;
5476 
5477 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5478 	if (len != 30)
5479 		return -EINVAL;
5480 
5481 	/* Prepare the infopacket for DC. */
5482 	switch (state->connector->connector_type) {
5483 	case DRM_MODE_CONNECTOR_HDMIA:
5484 		out->hb0 = 0x87; /* type */
5485 		out->hb1 = 0x01; /* version */
5486 		out->hb2 = 0x1A; /* length */
5487 		out->sb[0] = buf[3]; /* checksum */
5488 		i = 1;
5489 		break;
5490 
5491 	case DRM_MODE_CONNECTOR_DisplayPort:
5492 	case DRM_MODE_CONNECTOR_eDP:
5493 		out->hb0 = 0x00; /* sdp id, zero */
5494 		out->hb1 = 0x87; /* type */
5495 		out->hb2 = 0x1D; /* payload len - 1 */
5496 		out->hb3 = (0x13 << 2); /* sdp version */
5497 		out->sb[0] = 0x01; /* version */
5498 		out->sb[1] = 0x1A; /* length */
5499 		i = 2;
5500 		break;
5501 
5502 	default:
5503 		return -EINVAL;
5504 	}
5505 
5506 	memcpy(&out->sb[i], &buf[4], 26);
5507 	out->valid = true;
5508 
5509 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5510 		       sizeof(out->sb), false);
5511 
5512 	return 0;
5513 }
5514 
5515 static bool
5516 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5517 			  const struct drm_connector_state *new_state)
5518 {
5519 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5520 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5521 
5522 	if (old_blob != new_blob) {
5523 		if (old_blob && new_blob &&
5524 		    old_blob->length == new_blob->length)
5525 			return memcmp(old_blob->data, new_blob->data,
5526 				      old_blob->length);
5527 
5528 		return true;
5529 	}
5530 
5531 	return false;
5532 }
5533 
5534 static int
5535 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5536 				 struct drm_atomic_state *state)
5537 {
5538 	struct drm_connector_state *new_con_state =
5539 		drm_atomic_get_new_connector_state(state, conn);
5540 	struct drm_connector_state *old_con_state =
5541 		drm_atomic_get_old_connector_state(state, conn);
5542 	struct drm_crtc *crtc = new_con_state->crtc;
5543 	struct drm_crtc_state *new_crtc_state;
5544 	int ret;
5545 
5546 	if (!crtc)
5547 		return 0;
5548 
5549 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5550 		struct dc_info_packet hdr_infopacket;
5551 
5552 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5553 		if (ret)
5554 			return ret;
5555 
5556 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5557 		if (IS_ERR(new_crtc_state))
5558 			return PTR_ERR(new_crtc_state);
5559 
5560 		/*
5561 		 * DC considers the stream backends changed if the
5562 		 * static metadata changes. Forcing the modeset also
5563 		 * gives a simple way for userspace to switch from
5564 		 * 8bpc to 10bpc when setting the metadata to enter
5565 		 * or exit HDR.
5566 		 *
5567 		 * Changing the static metadata after it's been
5568 		 * set is permissible, however. So only force a
5569 		 * modeset if we're entering or exiting HDR.
5570 		 */
5571 		new_crtc_state->mode_changed =
5572 			!old_con_state->hdr_output_metadata ||
5573 			!new_con_state->hdr_output_metadata;
5574 	}
5575 
5576 	return 0;
5577 }
5578 
5579 static const struct drm_connector_helper_funcs
5580 amdgpu_dm_connector_helper_funcs = {
5581 	/*
5582 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5583 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5584 	 * are missing after user start lightdm. So we need to renew modes list.
5585 	 * in get_modes call back, not just return the modes count
5586 	 */
5587 	.get_modes = get_modes,
5588 	.mode_valid = amdgpu_dm_connector_mode_valid,
5589 	.atomic_check = amdgpu_dm_connector_atomic_check,
5590 };
5591 
5592 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5593 {
5594 }
5595 
5596 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5597 {
5598 	struct drm_atomic_state *state = new_crtc_state->state;
5599 	struct drm_plane *plane;
5600 	int num_active = 0;
5601 
5602 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5603 		struct drm_plane_state *new_plane_state;
5604 
5605 		/* Cursor planes are "fake". */
5606 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5607 			continue;
5608 
5609 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5610 
5611 		if (!new_plane_state) {
5612 			/*
5613 			 * The plane is enable on the CRTC and hasn't changed
5614 			 * state. This means that it previously passed
5615 			 * validation and is therefore enabled.
5616 			 */
5617 			num_active += 1;
5618 			continue;
5619 		}
5620 
5621 		/* We need a framebuffer to be considered enabled. */
5622 		num_active += (new_plane_state->fb != NULL);
5623 	}
5624 
5625 	return num_active;
5626 }
5627 
5628 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5629 					 struct drm_crtc_state *new_crtc_state)
5630 {
5631 	struct dm_crtc_state *dm_new_crtc_state =
5632 		to_dm_crtc_state(new_crtc_state);
5633 
5634 	dm_new_crtc_state->active_planes = 0;
5635 
5636 	if (!dm_new_crtc_state->stream)
5637 		return;
5638 
5639 	dm_new_crtc_state->active_planes =
5640 		count_crtc_active_planes(new_crtc_state);
5641 }
5642 
5643 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5644 				       struct drm_crtc_state *state)
5645 {
5646 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5647 	struct dc *dc = adev->dm.dc;
5648 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5649 	int ret = -EINVAL;
5650 
5651 	dm_update_crtc_active_planes(crtc, state);
5652 
5653 	if (unlikely(!dm_crtc_state->stream &&
5654 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5655 		WARN_ON(1);
5656 		return ret;
5657 	}
5658 
5659 	/*
5660 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
5661 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
5662 	 * planes are disabled, which is not supported by the hardware. And there is legacy
5663 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
5664 	 */
5665 	if (state->enable &&
5666 	    !(state->plane_mask & drm_plane_mask(crtc->primary)))
5667 		return -EINVAL;
5668 
5669 	/* In some use cases, like reset, no stream is attached */
5670 	if (!dm_crtc_state->stream)
5671 		return 0;
5672 
5673 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5674 		return 0;
5675 
5676 	return ret;
5677 }
5678 
5679 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5680 				      const struct drm_display_mode *mode,
5681 				      struct drm_display_mode *adjusted_mode)
5682 {
5683 	return true;
5684 }
5685 
5686 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5687 	.disable = dm_crtc_helper_disable,
5688 	.atomic_check = dm_crtc_helper_atomic_check,
5689 	.mode_fixup = dm_crtc_helper_mode_fixup,
5690 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5691 };
5692 
5693 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5694 {
5695 
5696 }
5697 
5698 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5699 {
5700 	switch (display_color_depth) {
5701 		case COLOR_DEPTH_666:
5702 			return 6;
5703 		case COLOR_DEPTH_888:
5704 			return 8;
5705 		case COLOR_DEPTH_101010:
5706 			return 10;
5707 		case COLOR_DEPTH_121212:
5708 			return 12;
5709 		case COLOR_DEPTH_141414:
5710 			return 14;
5711 		case COLOR_DEPTH_161616:
5712 			return 16;
5713 		default:
5714 			break;
5715 		}
5716 	return 0;
5717 }
5718 
5719 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5720 					  struct drm_crtc_state *crtc_state,
5721 					  struct drm_connector_state *conn_state)
5722 {
5723 	struct drm_atomic_state *state = crtc_state->state;
5724 	struct drm_connector *connector = conn_state->connector;
5725 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5726 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5727 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5728 	struct drm_dp_mst_topology_mgr *mst_mgr;
5729 	struct drm_dp_mst_port *mst_port;
5730 	enum dc_color_depth color_depth;
5731 	int clock, bpp = 0;
5732 	bool is_y420 = false;
5733 
5734 	if (!aconnector->port || !aconnector->dc_sink)
5735 		return 0;
5736 
5737 	mst_port = aconnector->port;
5738 	mst_mgr = &aconnector->mst_port->mst_mgr;
5739 
5740 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5741 		return 0;
5742 
5743 	if (!state->duplicated) {
5744 		int max_bpc = conn_state->max_requested_bpc;
5745 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5746 				aconnector->force_yuv420_output;
5747 		color_depth = convert_color_depth_from_display_info(connector,
5748 								    is_y420,
5749 								    max_bpc);
5750 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5751 		clock = adjusted_mode->clock;
5752 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5753 	}
5754 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5755 									   mst_mgr,
5756 									   mst_port,
5757 									   dm_new_connector_state->pbn,
5758 									   dm_mst_get_pbn_divider(aconnector->dc_link));
5759 	if (dm_new_connector_state->vcpi_slots < 0) {
5760 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5761 		return dm_new_connector_state->vcpi_slots;
5762 	}
5763 	return 0;
5764 }
5765 
5766 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5767 	.disable = dm_encoder_helper_disable,
5768 	.atomic_check = dm_encoder_helper_atomic_check
5769 };
5770 
5771 #if defined(CONFIG_DRM_AMD_DC_DCN)
5772 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5773 					    struct dc_state *dc_state)
5774 {
5775 	struct dc_stream_state *stream = NULL;
5776 	struct drm_connector *connector;
5777 	struct drm_connector_state *new_con_state, *old_con_state;
5778 	struct amdgpu_dm_connector *aconnector;
5779 	struct dm_connector_state *dm_conn_state;
5780 	int i, j, clock, bpp;
5781 	int vcpi, pbn_div, pbn = 0;
5782 
5783 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5784 
5785 		aconnector = to_amdgpu_dm_connector(connector);
5786 
5787 		if (!aconnector->port)
5788 			continue;
5789 
5790 		if (!new_con_state || !new_con_state->crtc)
5791 			continue;
5792 
5793 		dm_conn_state = to_dm_connector_state(new_con_state);
5794 
5795 		for (j = 0; j < dc_state->stream_count; j++) {
5796 			stream = dc_state->streams[j];
5797 			if (!stream)
5798 				continue;
5799 
5800 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5801 				break;
5802 
5803 			stream = NULL;
5804 		}
5805 
5806 		if (!stream)
5807 			continue;
5808 
5809 		if (stream->timing.flags.DSC != 1) {
5810 			drm_dp_mst_atomic_enable_dsc(state,
5811 						     aconnector->port,
5812 						     dm_conn_state->pbn,
5813 						     0,
5814 						     false);
5815 			continue;
5816 		}
5817 
5818 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5819 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5820 		clock = stream->timing.pix_clk_100hz / 10;
5821 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5822 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5823 						    aconnector->port,
5824 						    pbn, pbn_div,
5825 						    true);
5826 		if (vcpi < 0)
5827 			return vcpi;
5828 
5829 		dm_conn_state->pbn = pbn;
5830 		dm_conn_state->vcpi_slots = vcpi;
5831 	}
5832 	return 0;
5833 }
5834 #endif
5835 
5836 static void dm_drm_plane_reset(struct drm_plane *plane)
5837 {
5838 	struct dm_plane_state *amdgpu_state = NULL;
5839 
5840 	if (plane->state)
5841 		plane->funcs->atomic_destroy_state(plane, plane->state);
5842 
5843 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5844 	WARN_ON(amdgpu_state == NULL);
5845 
5846 	if (amdgpu_state)
5847 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5848 }
5849 
5850 static struct drm_plane_state *
5851 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5852 {
5853 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5854 
5855 	old_dm_plane_state = to_dm_plane_state(plane->state);
5856 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5857 	if (!dm_plane_state)
5858 		return NULL;
5859 
5860 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5861 
5862 	if (old_dm_plane_state->dc_state) {
5863 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5864 		dc_plane_state_retain(dm_plane_state->dc_state);
5865 	}
5866 
5867 	/* Framebuffer hasn't been updated yet, so retain old flags. */
5868 	dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
5869 	dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
5870 
5871 	return &dm_plane_state->base;
5872 }
5873 
5874 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5875 				struct drm_plane_state *state)
5876 {
5877 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5878 
5879 	if (dm_plane_state->dc_state)
5880 		dc_plane_state_release(dm_plane_state->dc_state);
5881 
5882 	drm_atomic_helper_plane_destroy_state(plane, state);
5883 }
5884 
5885 static const struct drm_plane_funcs dm_plane_funcs = {
5886 	.update_plane	= drm_atomic_helper_update_plane,
5887 	.disable_plane	= drm_atomic_helper_disable_plane,
5888 	.destroy	= drm_primary_helper_destroy,
5889 	.reset = dm_drm_plane_reset,
5890 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5891 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5892 };
5893 
5894 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5895 				      struct drm_plane_state *new_state)
5896 {
5897 	struct amdgpu_framebuffer *afb;
5898 	struct drm_gem_object *obj;
5899 	struct amdgpu_device *adev;
5900 	struct amdgpu_bo *rbo;
5901 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5902 	struct list_head list;
5903 	struct ttm_validate_buffer tv;
5904 	struct ww_acquire_ctx ticket;
5905 	uint32_t domain;
5906 	int r;
5907 
5908 	if (!new_state->fb) {
5909 		DRM_DEBUG_DRIVER("No FB bound\n");
5910 		return 0;
5911 	}
5912 
5913 	afb = to_amdgpu_framebuffer(new_state->fb);
5914 	obj = new_state->fb->obj[0];
5915 	rbo = gem_to_amdgpu_bo(obj);
5916 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5917 	INIT_LIST_HEAD(&list);
5918 
5919 	tv.bo = &rbo->tbo;
5920 	tv.num_shared = 1;
5921 	list_add(&tv.head, &list);
5922 
5923 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5924 	if (r) {
5925 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5926 		return r;
5927 	}
5928 
5929 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5930 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5931 	else
5932 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5933 
5934 	r = amdgpu_bo_pin(rbo, domain);
5935 	if (unlikely(r != 0)) {
5936 		if (r != -ERESTARTSYS)
5937 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5938 		ttm_eu_backoff_reservation(&ticket, &list);
5939 		return r;
5940 	}
5941 
5942 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5943 	if (unlikely(r != 0)) {
5944 		amdgpu_bo_unpin(rbo);
5945 		ttm_eu_backoff_reservation(&ticket, &list);
5946 		DRM_ERROR("%p bind failed\n", rbo);
5947 		return r;
5948 	}
5949 
5950 	ttm_eu_backoff_reservation(&ticket, &list);
5951 
5952 	afb->address = amdgpu_bo_gpu_offset(rbo);
5953 
5954 	amdgpu_bo_ref(rbo);
5955 
5956 	/**
5957 	 * We don't do surface updates on planes that have been newly created,
5958 	 * but we also don't have the afb->address during atomic check.
5959 	 *
5960 	 * Fill in buffer attributes depending on the address here, but only on
5961 	 * newly created planes since they're not being used by DC yet and this
5962 	 * won't modify global state.
5963 	 */
5964 	dm_plane_state_old = to_dm_plane_state(plane->state);
5965 	dm_plane_state_new = to_dm_plane_state(new_state);
5966 
5967 	if (dm_plane_state_new->dc_state &&
5968 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5969 		struct dc_plane_state *plane_state =
5970 			dm_plane_state_new->dc_state;
5971 		bool force_disable_dcc = !plane_state->dcc.enable;
5972 
5973 		fill_plane_buffer_attributes(
5974 			adev, afb, plane_state->format, plane_state->rotation,
5975 			dm_plane_state_new->tiling_flags,
5976 			&plane_state->tiling_info, &plane_state->plane_size,
5977 			&plane_state->dcc, &plane_state->address,
5978 			dm_plane_state_new->tmz_surface, force_disable_dcc);
5979 	}
5980 
5981 	return 0;
5982 }
5983 
5984 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5985 				       struct drm_plane_state *old_state)
5986 {
5987 	struct amdgpu_bo *rbo;
5988 	int r;
5989 
5990 	if (!old_state->fb)
5991 		return;
5992 
5993 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5994 	r = amdgpu_bo_reserve(rbo, false);
5995 	if (unlikely(r)) {
5996 		DRM_ERROR("failed to reserve rbo before unpin\n");
5997 		return;
5998 	}
5999 
6000 	amdgpu_bo_unpin(rbo);
6001 	amdgpu_bo_unreserve(rbo);
6002 	amdgpu_bo_unref(&rbo);
6003 }
6004 
6005 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6006 				       struct drm_crtc_state *new_crtc_state)
6007 {
6008 	int max_downscale = 0;
6009 	int max_upscale = INT_MAX;
6010 
6011 	/* TODO: These should be checked against DC plane caps */
6012 	return drm_atomic_helper_check_plane_state(
6013 		state, new_crtc_state, max_downscale, max_upscale, true, true);
6014 }
6015 
6016 static int dm_plane_atomic_check(struct drm_plane *plane,
6017 				 struct drm_plane_state *state)
6018 {
6019 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6020 	struct dc *dc = adev->dm.dc;
6021 	struct dm_plane_state *dm_plane_state;
6022 	struct dc_scaling_info scaling_info;
6023 	struct drm_crtc_state *new_crtc_state;
6024 	int ret;
6025 
6026 	dm_plane_state = to_dm_plane_state(state);
6027 
6028 	if (!dm_plane_state->dc_state)
6029 		return 0;
6030 
6031 	new_crtc_state =
6032 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
6033 	if (!new_crtc_state)
6034 		return -EINVAL;
6035 
6036 	ret = dm_plane_helper_check_state(state, new_crtc_state);
6037 	if (ret)
6038 		return ret;
6039 
6040 	ret = fill_dc_scaling_info(state, &scaling_info);
6041 	if (ret)
6042 		return ret;
6043 
6044 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6045 		return 0;
6046 
6047 	return -EINVAL;
6048 }
6049 
6050 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6051 				       struct drm_plane_state *new_plane_state)
6052 {
6053 	/* Only support async updates on cursor planes. */
6054 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6055 		return -EINVAL;
6056 
6057 	return 0;
6058 }
6059 
6060 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6061 					 struct drm_plane_state *new_state)
6062 {
6063 	struct drm_plane_state *old_state =
6064 		drm_atomic_get_old_plane_state(new_state->state, plane);
6065 
6066 	swap(plane->state->fb, new_state->fb);
6067 
6068 	plane->state->src_x = new_state->src_x;
6069 	plane->state->src_y = new_state->src_y;
6070 	plane->state->src_w = new_state->src_w;
6071 	plane->state->src_h = new_state->src_h;
6072 	plane->state->crtc_x = new_state->crtc_x;
6073 	plane->state->crtc_y = new_state->crtc_y;
6074 	plane->state->crtc_w = new_state->crtc_w;
6075 	plane->state->crtc_h = new_state->crtc_h;
6076 
6077 	handle_cursor_update(plane, old_state);
6078 }
6079 
6080 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6081 	.prepare_fb = dm_plane_helper_prepare_fb,
6082 	.cleanup_fb = dm_plane_helper_cleanup_fb,
6083 	.atomic_check = dm_plane_atomic_check,
6084 	.atomic_async_check = dm_plane_atomic_async_check,
6085 	.atomic_async_update = dm_plane_atomic_async_update
6086 };
6087 
6088 /*
6089  * TODO: these are currently initialized to rgb formats only.
6090  * For future use cases we should either initialize them dynamically based on
6091  * plane capabilities, or initialize this array to all formats, so internal drm
6092  * check will succeed, and let DC implement proper check
6093  */
6094 static const uint32_t rgb_formats[] = {
6095 	DRM_FORMAT_XRGB8888,
6096 	DRM_FORMAT_ARGB8888,
6097 	DRM_FORMAT_RGBA8888,
6098 	DRM_FORMAT_XRGB2101010,
6099 	DRM_FORMAT_XBGR2101010,
6100 	DRM_FORMAT_ARGB2101010,
6101 	DRM_FORMAT_ABGR2101010,
6102 	DRM_FORMAT_XBGR8888,
6103 	DRM_FORMAT_ABGR8888,
6104 	DRM_FORMAT_RGB565,
6105 };
6106 
6107 static const uint32_t overlay_formats[] = {
6108 	DRM_FORMAT_XRGB8888,
6109 	DRM_FORMAT_ARGB8888,
6110 	DRM_FORMAT_RGBA8888,
6111 	DRM_FORMAT_XBGR8888,
6112 	DRM_FORMAT_ABGR8888,
6113 	DRM_FORMAT_RGB565
6114 };
6115 
6116 static const u32 cursor_formats[] = {
6117 	DRM_FORMAT_ARGB8888
6118 };
6119 
6120 static int get_plane_formats(const struct drm_plane *plane,
6121 			     const struct dc_plane_cap *plane_cap,
6122 			     uint32_t *formats, int max_formats)
6123 {
6124 	int i, num_formats = 0;
6125 
6126 	/*
6127 	 * TODO: Query support for each group of formats directly from
6128 	 * DC plane caps. This will require adding more formats to the
6129 	 * caps list.
6130 	 */
6131 
6132 	switch (plane->type) {
6133 	case DRM_PLANE_TYPE_PRIMARY:
6134 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6135 			if (num_formats >= max_formats)
6136 				break;
6137 
6138 			formats[num_formats++] = rgb_formats[i];
6139 		}
6140 
6141 		if (plane_cap && plane_cap->pixel_format_support.nv12)
6142 			formats[num_formats++] = DRM_FORMAT_NV12;
6143 		if (plane_cap && plane_cap->pixel_format_support.p010)
6144 			formats[num_formats++] = DRM_FORMAT_P010;
6145 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
6146 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6147 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6148 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6149 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6150 		}
6151 		break;
6152 
6153 	case DRM_PLANE_TYPE_OVERLAY:
6154 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6155 			if (num_formats >= max_formats)
6156 				break;
6157 
6158 			formats[num_formats++] = overlay_formats[i];
6159 		}
6160 		break;
6161 
6162 	case DRM_PLANE_TYPE_CURSOR:
6163 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6164 			if (num_formats >= max_formats)
6165 				break;
6166 
6167 			formats[num_formats++] = cursor_formats[i];
6168 		}
6169 		break;
6170 	}
6171 
6172 	return num_formats;
6173 }
6174 
6175 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6176 				struct drm_plane *plane,
6177 				unsigned long possible_crtcs,
6178 				const struct dc_plane_cap *plane_cap)
6179 {
6180 	uint32_t formats[32];
6181 	int num_formats;
6182 	int res = -EPERM;
6183 	unsigned int supported_rotations;
6184 
6185 	num_formats = get_plane_formats(plane, plane_cap, formats,
6186 					ARRAY_SIZE(formats));
6187 
6188 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6189 				       &dm_plane_funcs, formats, num_formats,
6190 				       NULL, plane->type, NULL);
6191 	if (res)
6192 		return res;
6193 
6194 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6195 	    plane_cap && plane_cap->per_pixel_alpha) {
6196 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6197 					  BIT(DRM_MODE_BLEND_PREMULTI);
6198 
6199 		drm_plane_create_alpha_property(plane);
6200 		drm_plane_create_blend_mode_property(plane, blend_caps);
6201 	}
6202 
6203 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6204 	    plane_cap &&
6205 	    (plane_cap->pixel_format_support.nv12 ||
6206 	     plane_cap->pixel_format_support.p010)) {
6207 		/* This only affects YUV formats. */
6208 		drm_plane_create_color_properties(
6209 			plane,
6210 			BIT(DRM_COLOR_YCBCR_BT601) |
6211 			BIT(DRM_COLOR_YCBCR_BT709) |
6212 			BIT(DRM_COLOR_YCBCR_BT2020),
6213 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6214 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6215 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6216 	}
6217 
6218 	supported_rotations =
6219 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6220 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6221 
6222 	if (dm->adev->asic_type >= CHIP_BONAIRE)
6223 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6224 						   supported_rotations);
6225 
6226 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6227 
6228 	/* Create (reset) the plane state */
6229 	if (plane->funcs->reset)
6230 		plane->funcs->reset(plane);
6231 
6232 	return 0;
6233 }
6234 
6235 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6236 			       struct drm_plane *plane,
6237 			       uint32_t crtc_index)
6238 {
6239 	struct amdgpu_crtc *acrtc = NULL;
6240 	struct drm_plane *cursor_plane;
6241 
6242 	int res = -ENOMEM;
6243 
6244 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6245 	if (!cursor_plane)
6246 		goto fail;
6247 
6248 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6249 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6250 
6251 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6252 	if (!acrtc)
6253 		goto fail;
6254 
6255 	res = drm_crtc_init_with_planes(
6256 			dm->ddev,
6257 			&acrtc->base,
6258 			plane,
6259 			cursor_plane,
6260 			&amdgpu_dm_crtc_funcs, NULL);
6261 
6262 	if (res)
6263 		goto fail;
6264 
6265 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6266 
6267 	/* Create (reset) the plane state */
6268 	if (acrtc->base.funcs->reset)
6269 		acrtc->base.funcs->reset(&acrtc->base);
6270 
6271 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6272 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6273 
6274 	acrtc->crtc_id = crtc_index;
6275 	acrtc->base.enabled = false;
6276 	acrtc->otg_inst = -1;
6277 
6278 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6279 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6280 				   true, MAX_COLOR_LUT_ENTRIES);
6281 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6282 
6283 	return 0;
6284 
6285 fail:
6286 	kfree(acrtc);
6287 	kfree(cursor_plane);
6288 	return res;
6289 }
6290 
6291 
6292 static int to_drm_connector_type(enum signal_type st)
6293 {
6294 	switch (st) {
6295 	case SIGNAL_TYPE_HDMI_TYPE_A:
6296 		return DRM_MODE_CONNECTOR_HDMIA;
6297 	case SIGNAL_TYPE_EDP:
6298 		return DRM_MODE_CONNECTOR_eDP;
6299 	case SIGNAL_TYPE_LVDS:
6300 		return DRM_MODE_CONNECTOR_LVDS;
6301 	case SIGNAL_TYPE_RGB:
6302 		return DRM_MODE_CONNECTOR_VGA;
6303 	case SIGNAL_TYPE_DISPLAY_PORT:
6304 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
6305 		return DRM_MODE_CONNECTOR_DisplayPort;
6306 	case SIGNAL_TYPE_DVI_DUAL_LINK:
6307 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
6308 		return DRM_MODE_CONNECTOR_DVID;
6309 	case SIGNAL_TYPE_VIRTUAL:
6310 		return DRM_MODE_CONNECTOR_VIRTUAL;
6311 
6312 	default:
6313 		return DRM_MODE_CONNECTOR_Unknown;
6314 	}
6315 }
6316 
6317 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6318 {
6319 	struct drm_encoder *encoder;
6320 
6321 	/* There is only one encoder per connector */
6322 	drm_connector_for_each_possible_encoder(connector, encoder)
6323 		return encoder;
6324 
6325 	return NULL;
6326 }
6327 
6328 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6329 {
6330 	struct drm_encoder *encoder;
6331 	struct amdgpu_encoder *amdgpu_encoder;
6332 
6333 	encoder = amdgpu_dm_connector_to_encoder(connector);
6334 
6335 	if (encoder == NULL)
6336 		return;
6337 
6338 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6339 
6340 	amdgpu_encoder->native_mode.clock = 0;
6341 
6342 	if (!list_empty(&connector->probed_modes)) {
6343 		struct drm_display_mode *preferred_mode = NULL;
6344 
6345 		list_for_each_entry(preferred_mode,
6346 				    &connector->probed_modes,
6347 				    head) {
6348 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6349 				amdgpu_encoder->native_mode = *preferred_mode;
6350 
6351 			break;
6352 		}
6353 
6354 	}
6355 }
6356 
6357 static struct drm_display_mode *
6358 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6359 			     char *name,
6360 			     int hdisplay, int vdisplay)
6361 {
6362 	struct drm_device *dev = encoder->dev;
6363 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6364 	struct drm_display_mode *mode = NULL;
6365 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6366 
6367 	mode = drm_mode_duplicate(dev, native_mode);
6368 
6369 	if (mode == NULL)
6370 		return NULL;
6371 
6372 	mode->hdisplay = hdisplay;
6373 	mode->vdisplay = vdisplay;
6374 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6375 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6376 
6377 	return mode;
6378 
6379 }
6380 
6381 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6382 						 struct drm_connector *connector)
6383 {
6384 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6385 	struct drm_display_mode *mode = NULL;
6386 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6387 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6388 				to_amdgpu_dm_connector(connector);
6389 	int i;
6390 	int n;
6391 	struct mode_size {
6392 		char name[DRM_DISPLAY_MODE_LEN];
6393 		int w;
6394 		int h;
6395 	} common_modes[] = {
6396 		{  "640x480",  640,  480},
6397 		{  "800x600",  800,  600},
6398 		{ "1024x768", 1024,  768},
6399 		{ "1280x720", 1280,  720},
6400 		{ "1280x800", 1280,  800},
6401 		{"1280x1024", 1280, 1024},
6402 		{ "1440x900", 1440,  900},
6403 		{"1680x1050", 1680, 1050},
6404 		{"1600x1200", 1600, 1200},
6405 		{"1920x1080", 1920, 1080},
6406 		{"1920x1200", 1920, 1200}
6407 	};
6408 
6409 	n = ARRAY_SIZE(common_modes);
6410 
6411 	for (i = 0; i < n; i++) {
6412 		struct drm_display_mode *curmode = NULL;
6413 		bool mode_existed = false;
6414 
6415 		if (common_modes[i].w > native_mode->hdisplay ||
6416 		    common_modes[i].h > native_mode->vdisplay ||
6417 		   (common_modes[i].w == native_mode->hdisplay &&
6418 		    common_modes[i].h == native_mode->vdisplay))
6419 			continue;
6420 
6421 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6422 			if (common_modes[i].w == curmode->hdisplay &&
6423 			    common_modes[i].h == curmode->vdisplay) {
6424 				mode_existed = true;
6425 				break;
6426 			}
6427 		}
6428 
6429 		if (mode_existed)
6430 			continue;
6431 
6432 		mode = amdgpu_dm_create_common_mode(encoder,
6433 				common_modes[i].name, common_modes[i].w,
6434 				common_modes[i].h);
6435 		if (!mode)
6436 			continue;
6437 
6438 		drm_mode_probed_add(connector, mode);
6439 		amdgpu_dm_connector->num_modes++;
6440 	}
6441 }
6442 
6443 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6444 					      struct edid *edid)
6445 {
6446 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6447 			to_amdgpu_dm_connector(connector);
6448 
6449 	if (edid) {
6450 		/* empty probed_modes */
6451 		INIT_LIST_HEAD(&connector->probed_modes);
6452 		amdgpu_dm_connector->num_modes =
6453 				drm_add_edid_modes(connector, edid);
6454 
6455 		/* sorting the probed modes before calling function
6456 		 * amdgpu_dm_get_native_mode() since EDID can have
6457 		 * more than one preferred mode. The modes that are
6458 		 * later in the probed mode list could be of higher
6459 		 * and preferred resolution. For example, 3840x2160
6460 		 * resolution in base EDID preferred timing and 4096x2160
6461 		 * preferred resolution in DID extension block later.
6462 		 */
6463 		drm_mode_sort(&connector->probed_modes);
6464 		amdgpu_dm_get_native_mode(connector);
6465 	} else {
6466 		amdgpu_dm_connector->num_modes = 0;
6467 	}
6468 }
6469 
6470 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6471 {
6472 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6473 			to_amdgpu_dm_connector(connector);
6474 	struct drm_encoder *encoder;
6475 	struct edid *edid = amdgpu_dm_connector->edid;
6476 
6477 	encoder = amdgpu_dm_connector_to_encoder(connector);
6478 
6479 	if (!edid || !drm_edid_is_valid(edid)) {
6480 		amdgpu_dm_connector->num_modes =
6481 				drm_add_modes_noedid(connector, 640, 480);
6482 	} else {
6483 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6484 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6485 	}
6486 	amdgpu_dm_fbc_init(connector);
6487 
6488 	return amdgpu_dm_connector->num_modes;
6489 }
6490 
6491 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6492 				     struct amdgpu_dm_connector *aconnector,
6493 				     int connector_type,
6494 				     struct dc_link *link,
6495 				     int link_index)
6496 {
6497 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6498 
6499 	/*
6500 	 * Some of the properties below require access to state, like bpc.
6501 	 * Allocate some default initial connector state with our reset helper.
6502 	 */
6503 	if (aconnector->base.funcs->reset)
6504 		aconnector->base.funcs->reset(&aconnector->base);
6505 
6506 	aconnector->connector_id = link_index;
6507 	aconnector->dc_link = link;
6508 	aconnector->base.interlace_allowed = false;
6509 	aconnector->base.doublescan_allowed = false;
6510 	aconnector->base.stereo_allowed = false;
6511 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6512 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6513 	aconnector->audio_inst = -1;
6514 	mutex_init(&aconnector->hpd_lock);
6515 
6516 	/*
6517 	 * configure support HPD hot plug connector_>polled default value is 0
6518 	 * which means HPD hot plug not supported
6519 	 */
6520 	switch (connector_type) {
6521 	case DRM_MODE_CONNECTOR_HDMIA:
6522 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6523 		aconnector->base.ycbcr_420_allowed =
6524 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6525 		break;
6526 	case DRM_MODE_CONNECTOR_DisplayPort:
6527 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6528 		aconnector->base.ycbcr_420_allowed =
6529 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6530 		break;
6531 	case DRM_MODE_CONNECTOR_DVID:
6532 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6533 		break;
6534 	default:
6535 		break;
6536 	}
6537 
6538 	drm_object_attach_property(&aconnector->base.base,
6539 				dm->ddev->mode_config.scaling_mode_property,
6540 				DRM_MODE_SCALE_NONE);
6541 
6542 	drm_object_attach_property(&aconnector->base.base,
6543 				adev->mode_info.underscan_property,
6544 				UNDERSCAN_OFF);
6545 	drm_object_attach_property(&aconnector->base.base,
6546 				adev->mode_info.underscan_hborder_property,
6547 				0);
6548 	drm_object_attach_property(&aconnector->base.base,
6549 				adev->mode_info.underscan_vborder_property,
6550 				0);
6551 
6552 	if (!aconnector->mst_port)
6553 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6554 
6555 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6556 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6557 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6558 
6559 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6560 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6561 		drm_object_attach_property(&aconnector->base.base,
6562 				adev->mode_info.abm_level_property, 0);
6563 	}
6564 
6565 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6566 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6567 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6568 		drm_object_attach_property(
6569 			&aconnector->base.base,
6570 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
6571 
6572 		if (!aconnector->mst_port)
6573 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6574 
6575 #ifdef CONFIG_DRM_AMD_DC_HDCP
6576 		if (adev->dm.hdcp_workqueue)
6577 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6578 #endif
6579 	}
6580 }
6581 
6582 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6583 			      struct i2c_msg *msgs, int num)
6584 {
6585 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6586 	struct ddc_service *ddc_service = i2c->ddc_service;
6587 	struct i2c_command cmd;
6588 	int i;
6589 	int result = -EIO;
6590 
6591 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6592 
6593 	if (!cmd.payloads)
6594 		return result;
6595 
6596 	cmd.number_of_payloads = num;
6597 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6598 	cmd.speed = 100;
6599 
6600 	for (i = 0; i < num; i++) {
6601 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6602 		cmd.payloads[i].address = msgs[i].addr;
6603 		cmd.payloads[i].length = msgs[i].len;
6604 		cmd.payloads[i].data = msgs[i].buf;
6605 	}
6606 
6607 	if (dc_submit_i2c(
6608 			ddc_service->ctx->dc,
6609 			ddc_service->ddc_pin->hw_info.ddc_channel,
6610 			&cmd))
6611 		result = num;
6612 
6613 	kfree(cmd.payloads);
6614 	return result;
6615 }
6616 
6617 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6618 {
6619 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6620 }
6621 
6622 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6623 	.master_xfer = amdgpu_dm_i2c_xfer,
6624 	.functionality = amdgpu_dm_i2c_func,
6625 };
6626 
6627 static struct amdgpu_i2c_adapter *
6628 create_i2c(struct ddc_service *ddc_service,
6629 	   int link_index,
6630 	   int *res)
6631 {
6632 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6633 	struct amdgpu_i2c_adapter *i2c;
6634 
6635 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6636 	if (!i2c)
6637 		return NULL;
6638 	i2c->base.owner = THIS_MODULE;
6639 	i2c->base.class = I2C_CLASS_DDC;
6640 	i2c->base.dev.parent = &adev->pdev->dev;
6641 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6642 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6643 	i2c_set_adapdata(&i2c->base, i2c);
6644 	i2c->ddc_service = ddc_service;
6645 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6646 
6647 	return i2c;
6648 }
6649 
6650 
6651 /*
6652  * Note: this function assumes that dc_link_detect() was called for the
6653  * dc_link which will be represented by this aconnector.
6654  */
6655 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6656 				    struct amdgpu_dm_connector *aconnector,
6657 				    uint32_t link_index,
6658 				    struct amdgpu_encoder *aencoder)
6659 {
6660 	int res = 0;
6661 	int connector_type;
6662 	struct dc *dc = dm->dc;
6663 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6664 	struct amdgpu_i2c_adapter *i2c;
6665 
6666 	link->priv = aconnector;
6667 
6668 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6669 
6670 	i2c = create_i2c(link->ddc, link->link_index, &res);
6671 	if (!i2c) {
6672 		DRM_ERROR("Failed to create i2c adapter data\n");
6673 		return -ENOMEM;
6674 	}
6675 
6676 	aconnector->i2c = i2c;
6677 	res = i2c_add_adapter(&i2c->base);
6678 
6679 	if (res) {
6680 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6681 		goto out_free;
6682 	}
6683 
6684 	connector_type = to_drm_connector_type(link->connector_signal);
6685 
6686 	res = drm_connector_init_with_ddc(
6687 			dm->ddev,
6688 			&aconnector->base,
6689 			&amdgpu_dm_connector_funcs,
6690 			connector_type,
6691 			&i2c->base);
6692 
6693 	if (res) {
6694 		DRM_ERROR("connector_init failed\n");
6695 		aconnector->connector_id = -1;
6696 		goto out_free;
6697 	}
6698 
6699 	drm_connector_helper_add(
6700 			&aconnector->base,
6701 			&amdgpu_dm_connector_helper_funcs);
6702 
6703 	amdgpu_dm_connector_init_helper(
6704 		dm,
6705 		aconnector,
6706 		connector_type,
6707 		link,
6708 		link_index);
6709 
6710 	drm_connector_attach_encoder(
6711 		&aconnector->base, &aencoder->base);
6712 
6713 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6714 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6715 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6716 
6717 out_free:
6718 	if (res) {
6719 		kfree(i2c);
6720 		aconnector->i2c = NULL;
6721 	}
6722 	return res;
6723 }
6724 
6725 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6726 {
6727 	switch (adev->mode_info.num_crtc) {
6728 	case 1:
6729 		return 0x1;
6730 	case 2:
6731 		return 0x3;
6732 	case 3:
6733 		return 0x7;
6734 	case 4:
6735 		return 0xf;
6736 	case 5:
6737 		return 0x1f;
6738 	case 6:
6739 	default:
6740 		return 0x3f;
6741 	}
6742 }
6743 
6744 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6745 				  struct amdgpu_encoder *aencoder,
6746 				  uint32_t link_index)
6747 {
6748 	struct amdgpu_device *adev = drm_to_adev(dev);
6749 
6750 	int res = drm_encoder_init(dev,
6751 				   &aencoder->base,
6752 				   &amdgpu_dm_encoder_funcs,
6753 				   DRM_MODE_ENCODER_TMDS,
6754 				   NULL);
6755 
6756 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6757 
6758 	if (!res)
6759 		aencoder->encoder_id = link_index;
6760 	else
6761 		aencoder->encoder_id = -1;
6762 
6763 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6764 
6765 	return res;
6766 }
6767 
6768 static void manage_dm_interrupts(struct amdgpu_device *adev,
6769 				 struct amdgpu_crtc *acrtc,
6770 				 bool enable)
6771 {
6772 	/*
6773 	 * We have no guarantee that the frontend index maps to the same
6774 	 * backend index - some even map to more than one.
6775 	 *
6776 	 * TODO: Use a different interrupt or check DC itself for the mapping.
6777 	 */
6778 	int irq_type =
6779 		amdgpu_display_crtc_idx_to_irq_type(
6780 			adev,
6781 			acrtc->crtc_id);
6782 
6783 	if (enable) {
6784 		drm_crtc_vblank_on(&acrtc->base);
6785 		amdgpu_irq_get(
6786 			adev,
6787 			&adev->pageflip_irq,
6788 			irq_type);
6789 	} else {
6790 
6791 		amdgpu_irq_put(
6792 			adev,
6793 			&adev->pageflip_irq,
6794 			irq_type);
6795 		drm_crtc_vblank_off(&acrtc->base);
6796 	}
6797 }
6798 
6799 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6800 				      struct amdgpu_crtc *acrtc)
6801 {
6802 	int irq_type =
6803 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6804 
6805 	/**
6806 	 * This reads the current state for the IRQ and force reapplies
6807 	 * the setting to hardware.
6808 	 */
6809 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6810 }
6811 
6812 static bool
6813 is_scaling_state_different(const struct dm_connector_state *dm_state,
6814 			   const struct dm_connector_state *old_dm_state)
6815 {
6816 	if (dm_state->scaling != old_dm_state->scaling)
6817 		return true;
6818 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6819 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6820 			return true;
6821 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6822 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6823 			return true;
6824 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6825 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6826 		return true;
6827 	return false;
6828 }
6829 
6830 #ifdef CONFIG_DRM_AMD_DC_HDCP
6831 static bool is_content_protection_different(struct drm_connector_state *state,
6832 					    const struct drm_connector_state *old_state,
6833 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6834 {
6835 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6836 
6837 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6838 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6839 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6840 		return true;
6841 	}
6842 
6843 	/* CP is being re enabled, ignore this */
6844 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6845 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6846 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6847 		return false;
6848 	}
6849 
6850 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6851 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6852 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6853 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6854 
6855 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6856 	 * hot-plug, headless s3, dpms
6857 	 */
6858 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6859 	    aconnector->dc_sink != NULL)
6860 		return true;
6861 
6862 	if (old_state->content_protection == state->content_protection)
6863 		return false;
6864 
6865 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6866 		return true;
6867 
6868 	return false;
6869 }
6870 
6871 #endif
6872 static void remove_stream(struct amdgpu_device *adev,
6873 			  struct amdgpu_crtc *acrtc,
6874 			  struct dc_stream_state *stream)
6875 {
6876 	/* this is the update mode case */
6877 
6878 	acrtc->otg_inst = -1;
6879 	acrtc->enabled = false;
6880 }
6881 
6882 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6883 			       struct dc_cursor_position *position)
6884 {
6885 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6886 	int x, y;
6887 	int xorigin = 0, yorigin = 0;
6888 
6889 	if (!crtc || !plane->state->fb)
6890 		return 0;
6891 
6892 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6893 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6894 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6895 			  __func__,
6896 			  plane->state->crtc_w,
6897 			  plane->state->crtc_h);
6898 		return -EINVAL;
6899 	}
6900 
6901 	x = plane->state->crtc_x;
6902 	y = plane->state->crtc_y;
6903 
6904 	if (x <= -amdgpu_crtc->max_cursor_width ||
6905 	    y <= -amdgpu_crtc->max_cursor_height)
6906 		return 0;
6907 
6908 	if (x < 0) {
6909 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6910 		x = 0;
6911 	}
6912 	if (y < 0) {
6913 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6914 		y = 0;
6915 	}
6916 	position->enable = true;
6917 	position->translate_by_source = true;
6918 	position->x = x;
6919 	position->y = y;
6920 	position->x_hotspot = xorigin;
6921 	position->y_hotspot = yorigin;
6922 
6923 	return 0;
6924 }
6925 
6926 static void handle_cursor_update(struct drm_plane *plane,
6927 				 struct drm_plane_state *old_plane_state)
6928 {
6929 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6930 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6931 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6932 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6933 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6934 	uint64_t address = afb ? afb->address : 0;
6935 	struct dc_cursor_position position = {0};
6936 	struct dc_cursor_attributes attributes;
6937 	int ret;
6938 
6939 	if (!plane->state->fb && !old_plane_state->fb)
6940 		return;
6941 
6942 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6943 			 __func__,
6944 			 amdgpu_crtc->crtc_id,
6945 			 plane->state->crtc_w,
6946 			 plane->state->crtc_h);
6947 
6948 	ret = get_cursor_position(plane, crtc, &position);
6949 	if (ret)
6950 		return;
6951 
6952 	if (!position.enable) {
6953 		/* turn off cursor */
6954 		if (crtc_state && crtc_state->stream) {
6955 			mutex_lock(&adev->dm.dc_lock);
6956 			dc_stream_set_cursor_position(crtc_state->stream,
6957 						      &position);
6958 			mutex_unlock(&adev->dm.dc_lock);
6959 		}
6960 		return;
6961 	}
6962 
6963 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6964 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6965 
6966 	memset(&attributes, 0, sizeof(attributes));
6967 	attributes.address.high_part = upper_32_bits(address);
6968 	attributes.address.low_part  = lower_32_bits(address);
6969 	attributes.width             = plane->state->crtc_w;
6970 	attributes.height            = plane->state->crtc_h;
6971 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6972 	attributes.rotation_angle    = 0;
6973 	attributes.attribute_flags.value = 0;
6974 
6975 	/* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM
6976 	 * legacy gamma setup.
6977 	 */
6978 	if (crtc_state->cm_is_degamma_srgb &&
6979 	    adev->dm.dc->caps.color.dpp.gamma_corr)
6980 		attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
6981 
6982 	attributes.pitch = attributes.width;
6983 
6984 	if (crtc_state->stream) {
6985 		mutex_lock(&adev->dm.dc_lock);
6986 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6987 							 &attributes))
6988 			DRM_ERROR("DC failed to set cursor attributes\n");
6989 
6990 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6991 						   &position))
6992 			DRM_ERROR("DC failed to set cursor position\n");
6993 		mutex_unlock(&adev->dm.dc_lock);
6994 	}
6995 }
6996 
6997 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6998 {
6999 
7000 	assert_spin_locked(&acrtc->base.dev->event_lock);
7001 	WARN_ON(acrtc->event);
7002 
7003 	acrtc->event = acrtc->base.state->event;
7004 
7005 	/* Set the flip status */
7006 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7007 
7008 	/* Mark this event as consumed */
7009 	acrtc->base.state->event = NULL;
7010 
7011 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7012 						 acrtc->crtc_id);
7013 }
7014 
7015 static void update_freesync_state_on_stream(
7016 	struct amdgpu_display_manager *dm,
7017 	struct dm_crtc_state *new_crtc_state,
7018 	struct dc_stream_state *new_stream,
7019 	struct dc_plane_state *surface,
7020 	u32 flip_timestamp_in_us)
7021 {
7022 	struct mod_vrr_params vrr_params;
7023 	struct dc_info_packet vrr_infopacket = {0};
7024 	struct amdgpu_device *adev = dm->adev;
7025 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7026 	unsigned long flags;
7027 
7028 	if (!new_stream)
7029 		return;
7030 
7031 	/*
7032 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7033 	 * For now it's sufficient to just guard against these conditions.
7034 	 */
7035 
7036 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7037 		return;
7038 
7039 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7040         vrr_params = acrtc->dm_irq_params.vrr_params;
7041 
7042 	if (surface) {
7043 		mod_freesync_handle_preflip(
7044 			dm->freesync_module,
7045 			surface,
7046 			new_stream,
7047 			flip_timestamp_in_us,
7048 			&vrr_params);
7049 
7050 		if (adev->family < AMDGPU_FAMILY_AI &&
7051 		    amdgpu_dm_vrr_active(new_crtc_state)) {
7052 			mod_freesync_handle_v_update(dm->freesync_module,
7053 						     new_stream, &vrr_params);
7054 
7055 			/* Need to call this before the frame ends. */
7056 			dc_stream_adjust_vmin_vmax(dm->dc,
7057 						   new_crtc_state->stream,
7058 						   &vrr_params.adjust);
7059 		}
7060 	}
7061 
7062 	mod_freesync_build_vrr_infopacket(
7063 		dm->freesync_module,
7064 		new_stream,
7065 		&vrr_params,
7066 		PACKET_TYPE_VRR,
7067 		TRANSFER_FUNC_UNKNOWN,
7068 		&vrr_infopacket);
7069 
7070 	new_crtc_state->freesync_timing_changed |=
7071 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7072 			&vrr_params.adjust,
7073 			sizeof(vrr_params.adjust)) != 0);
7074 
7075 	new_crtc_state->freesync_vrr_info_changed |=
7076 		(memcmp(&new_crtc_state->vrr_infopacket,
7077 			&vrr_infopacket,
7078 			sizeof(vrr_infopacket)) != 0);
7079 
7080 	acrtc->dm_irq_params.vrr_params = vrr_params;
7081 	new_crtc_state->vrr_infopacket = vrr_infopacket;
7082 
7083 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7084 	new_stream->vrr_infopacket = vrr_infopacket;
7085 
7086 	if (new_crtc_state->freesync_vrr_info_changed)
7087 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7088 			      new_crtc_state->base.crtc->base.id,
7089 			      (int)new_crtc_state->base.vrr_enabled,
7090 			      (int)vrr_params.state);
7091 
7092 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7093 }
7094 
7095 static void update_stream_irq_parameters(
7096 	struct amdgpu_display_manager *dm,
7097 	struct dm_crtc_state *new_crtc_state)
7098 {
7099 	struct dc_stream_state *new_stream = new_crtc_state->stream;
7100 	struct mod_vrr_params vrr_params;
7101 	struct mod_freesync_config config = new_crtc_state->freesync_config;
7102 	struct amdgpu_device *adev = dm->adev;
7103 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7104 	unsigned long flags;
7105 
7106 	if (!new_stream)
7107 		return;
7108 
7109 	/*
7110 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7111 	 * For now it's sufficient to just guard against these conditions.
7112 	 */
7113 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7114 		return;
7115 
7116 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7117 	vrr_params = acrtc->dm_irq_params.vrr_params;
7118 
7119 	if (new_crtc_state->vrr_supported &&
7120 	    config.min_refresh_in_uhz &&
7121 	    config.max_refresh_in_uhz) {
7122 		config.state = new_crtc_state->base.vrr_enabled ?
7123 			VRR_STATE_ACTIVE_VARIABLE :
7124 			VRR_STATE_INACTIVE;
7125 	} else {
7126 		config.state = VRR_STATE_UNSUPPORTED;
7127 	}
7128 
7129 	mod_freesync_build_vrr_params(dm->freesync_module,
7130 				      new_stream,
7131 				      &config, &vrr_params);
7132 
7133 	new_crtc_state->freesync_timing_changed |=
7134 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7135 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7136 
7137 	new_crtc_state->freesync_config = config;
7138 	/* Copy state for access from DM IRQ handler */
7139 	acrtc->dm_irq_params.freesync_config = config;
7140 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7141 	acrtc->dm_irq_params.vrr_params = vrr_params;
7142 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7143 }
7144 
7145 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7146 					    struct dm_crtc_state *new_state)
7147 {
7148 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7149 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7150 
7151 	if (!old_vrr_active && new_vrr_active) {
7152 		/* Transition VRR inactive -> active:
7153 		 * While VRR is active, we must not disable vblank irq, as a
7154 		 * reenable after disable would compute bogus vblank/pflip
7155 		 * timestamps if it likely happened inside display front-porch.
7156 		 *
7157 		 * We also need vupdate irq for the actual core vblank handling
7158 		 * at end of vblank.
7159 		 */
7160 		dm_set_vupdate_irq(new_state->base.crtc, true);
7161 		drm_crtc_vblank_get(new_state->base.crtc);
7162 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7163 				 __func__, new_state->base.crtc->base.id);
7164 	} else if (old_vrr_active && !new_vrr_active) {
7165 		/* Transition VRR active -> inactive:
7166 		 * Allow vblank irq disable again for fixed refresh rate.
7167 		 */
7168 		dm_set_vupdate_irq(new_state->base.crtc, false);
7169 		drm_crtc_vblank_put(new_state->base.crtc);
7170 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7171 				 __func__, new_state->base.crtc->base.id);
7172 	}
7173 }
7174 
7175 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7176 {
7177 	struct drm_plane *plane;
7178 	struct drm_plane_state *old_plane_state, *new_plane_state;
7179 	int i;
7180 
7181 	/*
7182 	 * TODO: Make this per-stream so we don't issue redundant updates for
7183 	 * commits with multiple streams.
7184 	 */
7185 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7186 				       new_plane_state, i)
7187 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7188 			handle_cursor_update(plane, old_plane_state);
7189 }
7190 
7191 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7192 				    struct dc_state *dc_state,
7193 				    struct drm_device *dev,
7194 				    struct amdgpu_display_manager *dm,
7195 				    struct drm_crtc *pcrtc,
7196 				    bool wait_for_vblank)
7197 {
7198 	uint32_t i;
7199 	uint64_t timestamp_ns;
7200 	struct drm_plane *plane;
7201 	struct drm_plane_state *old_plane_state, *new_plane_state;
7202 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7203 	struct drm_crtc_state *new_pcrtc_state =
7204 			drm_atomic_get_new_crtc_state(state, pcrtc);
7205 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7206 	struct dm_crtc_state *dm_old_crtc_state =
7207 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7208 	int planes_count = 0, vpos, hpos;
7209 	long r;
7210 	unsigned long flags;
7211 	struct amdgpu_bo *abo;
7212 	uint32_t target_vblank, last_flip_vblank;
7213 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7214 	bool pflip_present = false;
7215 	struct {
7216 		struct dc_surface_update surface_updates[MAX_SURFACES];
7217 		struct dc_plane_info plane_infos[MAX_SURFACES];
7218 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
7219 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7220 		struct dc_stream_update stream_update;
7221 	} *bundle;
7222 
7223 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7224 
7225 	if (!bundle) {
7226 		dm_error("Failed to allocate update bundle\n");
7227 		goto cleanup;
7228 	}
7229 
7230 	/*
7231 	 * Disable the cursor first if we're disabling all the planes.
7232 	 * It'll remain on the screen after the planes are re-enabled
7233 	 * if we don't.
7234 	 */
7235 	if (acrtc_state->active_planes == 0)
7236 		amdgpu_dm_commit_cursors(state);
7237 
7238 	/* update planes when needed */
7239 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7240 		struct drm_crtc *crtc = new_plane_state->crtc;
7241 		struct drm_crtc_state *new_crtc_state;
7242 		struct drm_framebuffer *fb = new_plane_state->fb;
7243 		bool plane_needs_flip;
7244 		struct dc_plane_state *dc_plane;
7245 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7246 
7247 		/* Cursor plane is handled after stream updates */
7248 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7249 			continue;
7250 
7251 		if (!fb || !crtc || pcrtc != crtc)
7252 			continue;
7253 
7254 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7255 		if (!new_crtc_state->active)
7256 			continue;
7257 
7258 		dc_plane = dm_new_plane_state->dc_state;
7259 		if (!dc_plane)
7260 			continue;
7261 
7262 		bundle->surface_updates[planes_count].surface = dc_plane;
7263 		if (new_pcrtc_state->color_mgmt_changed) {
7264 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7265 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7266 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7267 		}
7268 
7269 		fill_dc_scaling_info(new_plane_state,
7270 				     &bundle->scaling_infos[planes_count]);
7271 
7272 		bundle->surface_updates[planes_count].scaling_info =
7273 			&bundle->scaling_infos[planes_count];
7274 
7275 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7276 
7277 		pflip_present = pflip_present || plane_needs_flip;
7278 
7279 		if (!plane_needs_flip) {
7280 			planes_count += 1;
7281 			continue;
7282 		}
7283 
7284 		abo = gem_to_amdgpu_bo(fb->obj[0]);
7285 
7286 		/*
7287 		 * Wait for all fences on this FB. Do limited wait to avoid
7288 		 * deadlock during GPU reset when this fence will not signal
7289 		 * but we hold reservation lock for the BO.
7290 		 */
7291 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7292 							false,
7293 							msecs_to_jiffies(5000));
7294 		if (unlikely(r <= 0))
7295 			DRM_ERROR("Waiting for fences timed out!");
7296 
7297 		fill_dc_plane_info_and_addr(
7298 			dm->adev, new_plane_state,
7299 			dm_new_plane_state->tiling_flags,
7300 			&bundle->plane_infos[planes_count],
7301 			&bundle->flip_addrs[planes_count].address,
7302 			dm_new_plane_state->tmz_surface, false);
7303 
7304 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7305 				 new_plane_state->plane->index,
7306 				 bundle->plane_infos[planes_count].dcc.enable);
7307 
7308 		bundle->surface_updates[planes_count].plane_info =
7309 			&bundle->plane_infos[planes_count];
7310 
7311 		/*
7312 		 * Only allow immediate flips for fast updates that don't
7313 		 * change FB pitch, DCC state, rotation or mirroing.
7314 		 */
7315 		bundle->flip_addrs[planes_count].flip_immediate =
7316 			crtc->state->async_flip &&
7317 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7318 
7319 		timestamp_ns = ktime_get_ns();
7320 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7321 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7322 		bundle->surface_updates[planes_count].surface = dc_plane;
7323 
7324 		if (!bundle->surface_updates[planes_count].surface) {
7325 			DRM_ERROR("No surface for CRTC: id=%d\n",
7326 					acrtc_attach->crtc_id);
7327 			continue;
7328 		}
7329 
7330 		if (plane == pcrtc->primary)
7331 			update_freesync_state_on_stream(
7332 				dm,
7333 				acrtc_state,
7334 				acrtc_state->stream,
7335 				dc_plane,
7336 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7337 
7338 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7339 				 __func__,
7340 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7341 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7342 
7343 		planes_count += 1;
7344 
7345 	}
7346 
7347 	if (pflip_present) {
7348 		if (!vrr_active) {
7349 			/* Use old throttling in non-vrr fixed refresh rate mode
7350 			 * to keep flip scheduling based on target vblank counts
7351 			 * working in a backwards compatible way, e.g., for
7352 			 * clients using the GLX_OML_sync_control extension or
7353 			 * DRI3/Present extension with defined target_msc.
7354 			 */
7355 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7356 		}
7357 		else {
7358 			/* For variable refresh rate mode only:
7359 			 * Get vblank of last completed flip to avoid > 1 vrr
7360 			 * flips per video frame by use of throttling, but allow
7361 			 * flip programming anywhere in the possibly large
7362 			 * variable vrr vblank interval for fine-grained flip
7363 			 * timing control and more opportunity to avoid stutter
7364 			 * on late submission of flips.
7365 			 */
7366 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7367 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7368 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7369 		}
7370 
7371 		target_vblank = last_flip_vblank + wait_for_vblank;
7372 
7373 		/*
7374 		 * Wait until we're out of the vertical blank period before the one
7375 		 * targeted by the flip
7376 		 */
7377 		while ((acrtc_attach->enabled &&
7378 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7379 							    0, &vpos, &hpos, NULL,
7380 							    NULL, &pcrtc->hwmode)
7381 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7382 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7383 			(int)(target_vblank -
7384 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7385 			usleep_range(1000, 1100);
7386 		}
7387 
7388 		/**
7389 		 * Prepare the flip event for the pageflip interrupt to handle.
7390 		 *
7391 		 * This only works in the case where we've already turned on the
7392 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7393 		 * from 0 -> n planes we have to skip a hardware generated event
7394 		 * and rely on sending it from software.
7395 		 */
7396 		if (acrtc_attach->base.state->event &&
7397 		    acrtc_state->active_planes > 0) {
7398 			drm_crtc_vblank_get(pcrtc);
7399 
7400 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7401 
7402 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7403 			prepare_flip_isr(acrtc_attach);
7404 
7405 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7406 		}
7407 
7408 		if (acrtc_state->stream) {
7409 			if (acrtc_state->freesync_vrr_info_changed)
7410 				bundle->stream_update.vrr_infopacket =
7411 					&acrtc_state->stream->vrr_infopacket;
7412 		}
7413 	}
7414 
7415 	/* Update the planes if changed or disable if we don't have any. */
7416 	if ((planes_count || acrtc_state->active_planes == 0) &&
7417 		acrtc_state->stream) {
7418 		bundle->stream_update.stream = acrtc_state->stream;
7419 		if (new_pcrtc_state->mode_changed) {
7420 			bundle->stream_update.src = acrtc_state->stream->src;
7421 			bundle->stream_update.dst = acrtc_state->stream->dst;
7422 		}
7423 
7424 		if (new_pcrtc_state->color_mgmt_changed) {
7425 			/*
7426 			 * TODO: This isn't fully correct since we've actually
7427 			 * already modified the stream in place.
7428 			 */
7429 			bundle->stream_update.gamut_remap =
7430 				&acrtc_state->stream->gamut_remap_matrix;
7431 			bundle->stream_update.output_csc_transform =
7432 				&acrtc_state->stream->csc_color_matrix;
7433 			bundle->stream_update.out_transfer_func =
7434 				acrtc_state->stream->out_transfer_func;
7435 		}
7436 
7437 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7438 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7439 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7440 
7441 		mutex_lock(&dm->dc_lock);
7442 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7443 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7444 			amdgpu_dm_psr_disable(acrtc_state->stream);
7445 		mutex_unlock(&dm->dc_lock);
7446 
7447 		/*
7448 		 * If FreeSync state on the stream has changed then we need to
7449 		 * re-adjust the min/max bounds now that DC doesn't handle this
7450 		 * as part of commit.
7451 		 */
7452 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7453 		    amdgpu_dm_vrr_active(acrtc_state)) {
7454 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7455 			dc_stream_adjust_vmin_vmax(
7456 				dm->dc, acrtc_state->stream,
7457 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
7458 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7459 		}
7460 		mutex_lock(&dm->dc_lock);
7461 
7462 		dc_commit_updates_for_stream(dm->dc,
7463 						     bundle->surface_updates,
7464 						     planes_count,
7465 						     acrtc_state->stream,
7466 						     &bundle->stream_update,
7467 						     dc_state);
7468 
7469 		/**
7470 		 * Enable or disable the interrupts on the backend.
7471 		 *
7472 		 * Most pipes are put into power gating when unused.
7473 		 *
7474 		 * When power gating is enabled on a pipe we lose the
7475 		 * interrupt enablement state when power gating is disabled.
7476 		 *
7477 		 * So we need to update the IRQ control state in hardware
7478 		 * whenever the pipe turns on (since it could be previously
7479 		 * power gated) or off (since some pipes can't be power gated
7480 		 * on some ASICs).
7481 		 */
7482 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7483 			dm_update_pflip_irq_state(drm_to_adev(dev),
7484 						  acrtc_attach);
7485 
7486 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7487 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7488 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7489 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7490 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7491 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7492 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
7493 			amdgpu_dm_psr_enable(acrtc_state->stream);
7494 		}
7495 
7496 		mutex_unlock(&dm->dc_lock);
7497 	}
7498 
7499 	/*
7500 	 * Update cursor state *after* programming all the planes.
7501 	 * This avoids redundant programming in the case where we're going
7502 	 * to be disabling a single plane - those pipes are being disabled.
7503 	 */
7504 	if (acrtc_state->active_planes)
7505 		amdgpu_dm_commit_cursors(state);
7506 
7507 cleanup:
7508 	kfree(bundle);
7509 }
7510 
7511 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7512 				   struct drm_atomic_state *state)
7513 {
7514 	struct amdgpu_device *adev = drm_to_adev(dev);
7515 	struct amdgpu_dm_connector *aconnector;
7516 	struct drm_connector *connector;
7517 	struct drm_connector_state *old_con_state, *new_con_state;
7518 	struct drm_crtc_state *new_crtc_state;
7519 	struct dm_crtc_state *new_dm_crtc_state;
7520 	const struct dc_stream_status *status;
7521 	int i, inst;
7522 
7523 	/* Notify device removals. */
7524 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7525 		if (old_con_state->crtc != new_con_state->crtc) {
7526 			/* CRTC changes require notification. */
7527 			goto notify;
7528 		}
7529 
7530 		if (!new_con_state->crtc)
7531 			continue;
7532 
7533 		new_crtc_state = drm_atomic_get_new_crtc_state(
7534 			state, new_con_state->crtc);
7535 
7536 		if (!new_crtc_state)
7537 			continue;
7538 
7539 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7540 			continue;
7541 
7542 	notify:
7543 		aconnector = to_amdgpu_dm_connector(connector);
7544 
7545 		mutex_lock(&adev->dm.audio_lock);
7546 		inst = aconnector->audio_inst;
7547 		aconnector->audio_inst = -1;
7548 		mutex_unlock(&adev->dm.audio_lock);
7549 
7550 		amdgpu_dm_audio_eld_notify(adev, inst);
7551 	}
7552 
7553 	/* Notify audio device additions. */
7554 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7555 		if (!new_con_state->crtc)
7556 			continue;
7557 
7558 		new_crtc_state = drm_atomic_get_new_crtc_state(
7559 			state, new_con_state->crtc);
7560 
7561 		if (!new_crtc_state)
7562 			continue;
7563 
7564 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7565 			continue;
7566 
7567 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7568 		if (!new_dm_crtc_state->stream)
7569 			continue;
7570 
7571 		status = dc_stream_get_status(new_dm_crtc_state->stream);
7572 		if (!status)
7573 			continue;
7574 
7575 		aconnector = to_amdgpu_dm_connector(connector);
7576 
7577 		mutex_lock(&adev->dm.audio_lock);
7578 		inst = status->audio_inst;
7579 		aconnector->audio_inst = inst;
7580 		mutex_unlock(&adev->dm.audio_lock);
7581 
7582 		amdgpu_dm_audio_eld_notify(adev, inst);
7583 	}
7584 }
7585 
7586 /*
7587  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7588  * @crtc_state: the DRM CRTC state
7589  * @stream_state: the DC stream state.
7590  *
7591  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7592  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7593  */
7594 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7595 						struct dc_stream_state *stream_state)
7596 {
7597 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7598 }
7599 
7600 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7601 				   struct drm_atomic_state *state,
7602 				   bool nonblock)
7603 {
7604 	/*
7605 	 * Add check here for SoC's that support hardware cursor plane, to
7606 	 * unset legacy_cursor_update
7607 	 */
7608 
7609 	return drm_atomic_helper_commit(dev, state, nonblock);
7610 
7611 	/*TODO Handle EINTR, reenable IRQ*/
7612 }
7613 
7614 /**
7615  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7616  * @state: The atomic state to commit
7617  *
7618  * This will tell DC to commit the constructed DC state from atomic_check,
7619  * programming the hardware. Any failures here implies a hardware failure, since
7620  * atomic check should have filtered anything non-kosher.
7621  */
7622 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7623 {
7624 	struct drm_device *dev = state->dev;
7625 	struct amdgpu_device *adev = drm_to_adev(dev);
7626 	struct amdgpu_display_manager *dm = &adev->dm;
7627 	struct dm_atomic_state *dm_state;
7628 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7629 	uint32_t i, j;
7630 	struct drm_crtc *crtc;
7631 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7632 	unsigned long flags;
7633 	bool wait_for_vblank = true;
7634 	struct drm_connector *connector;
7635 	struct drm_connector_state *old_con_state, *new_con_state;
7636 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7637 	int crtc_disable_count = 0;
7638 	bool mode_set_reset_required = false;
7639 
7640 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7641 
7642 	dm_state = dm_atomic_get_new_state(state);
7643 	if (dm_state && dm_state->context) {
7644 		dc_state = dm_state->context;
7645 	} else {
7646 		/* No state changes, retain current state. */
7647 		dc_state_temp = dc_create_state(dm->dc);
7648 		ASSERT(dc_state_temp);
7649 		dc_state = dc_state_temp;
7650 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7651 	}
7652 
7653 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
7654 				       new_crtc_state, i) {
7655 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7656 
7657 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7658 
7659 		if (old_crtc_state->active &&
7660 		    (!new_crtc_state->active ||
7661 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7662 			manage_dm_interrupts(adev, acrtc, false);
7663 			dc_stream_release(dm_old_crtc_state->stream);
7664 		}
7665 	}
7666 
7667 	drm_atomic_helper_calc_timestamping_constants(state);
7668 
7669 	/* update changed items */
7670 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7671 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7672 
7673 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7674 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7675 
7676 		DRM_DEBUG_DRIVER(
7677 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7678 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7679 			"connectors_changed:%d\n",
7680 			acrtc->crtc_id,
7681 			new_crtc_state->enable,
7682 			new_crtc_state->active,
7683 			new_crtc_state->planes_changed,
7684 			new_crtc_state->mode_changed,
7685 			new_crtc_state->active_changed,
7686 			new_crtc_state->connectors_changed);
7687 
7688 		/* Copy all transient state flags into dc state */
7689 		if (dm_new_crtc_state->stream) {
7690 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7691 							    dm_new_crtc_state->stream);
7692 		}
7693 
7694 		/* handles headless hotplug case, updating new_state and
7695 		 * aconnector as needed
7696 		 */
7697 
7698 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7699 
7700 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7701 
7702 			if (!dm_new_crtc_state->stream) {
7703 				/*
7704 				 * this could happen because of issues with
7705 				 * userspace notifications delivery.
7706 				 * In this case userspace tries to set mode on
7707 				 * display which is disconnected in fact.
7708 				 * dc_sink is NULL in this case on aconnector.
7709 				 * We expect reset mode will come soon.
7710 				 *
7711 				 * This can also happen when unplug is done
7712 				 * during resume sequence ended
7713 				 *
7714 				 * In this case, we want to pretend we still
7715 				 * have a sink to keep the pipe running so that
7716 				 * hw state is consistent with the sw state
7717 				 */
7718 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7719 						__func__, acrtc->base.base.id);
7720 				continue;
7721 			}
7722 
7723 			if (dm_old_crtc_state->stream)
7724 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7725 
7726 			pm_runtime_get_noresume(dev->dev);
7727 
7728 			acrtc->enabled = true;
7729 			acrtc->hw_mode = new_crtc_state->mode;
7730 			crtc->hwmode = new_crtc_state->mode;
7731 			mode_set_reset_required = true;
7732 		} else if (modereset_required(new_crtc_state)) {
7733 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7734 			/* i.e. reset mode */
7735 			if (dm_old_crtc_state->stream)
7736 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7737 			mode_set_reset_required = true;
7738 		}
7739 	} /* for_each_crtc_in_state() */
7740 
7741 	if (dc_state) {
7742 		/* if there mode set or reset, disable eDP PSR */
7743 		if (mode_set_reset_required)
7744 			amdgpu_dm_psr_disable_all(dm);
7745 
7746 		dm_enable_per_frame_crtc_master_sync(dc_state);
7747 		mutex_lock(&dm->dc_lock);
7748 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7749 		mutex_unlock(&dm->dc_lock);
7750 	}
7751 
7752 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7753 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7754 
7755 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7756 
7757 		if (dm_new_crtc_state->stream != NULL) {
7758 			const struct dc_stream_status *status =
7759 					dc_stream_get_status(dm_new_crtc_state->stream);
7760 
7761 			if (!status)
7762 				status = dc_stream_get_status_from_state(dc_state,
7763 									 dm_new_crtc_state->stream);
7764 			if (!status)
7765 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7766 			else
7767 				acrtc->otg_inst = status->primary_otg_inst;
7768 		}
7769 	}
7770 #ifdef CONFIG_DRM_AMD_DC_HDCP
7771 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7772 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7773 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7774 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7775 
7776 		new_crtc_state = NULL;
7777 
7778 		if (acrtc)
7779 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7780 
7781 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7782 
7783 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7784 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7785 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7786 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7787 			continue;
7788 		}
7789 
7790 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7791 			hdcp_update_display(
7792 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7793 				new_con_state->hdcp_content_type,
7794 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7795 													 : false);
7796 	}
7797 #endif
7798 
7799 	/* Handle connector state changes */
7800 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7801 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7802 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7803 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7804 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7805 		struct dc_stream_update stream_update;
7806 		struct dc_info_packet hdr_packet;
7807 		struct dc_stream_status *status = NULL;
7808 		bool abm_changed, hdr_changed, scaling_changed;
7809 
7810 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7811 		memset(&stream_update, 0, sizeof(stream_update));
7812 
7813 		if (acrtc) {
7814 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7815 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7816 		}
7817 
7818 		/* Skip any modesets/resets */
7819 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7820 			continue;
7821 
7822 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7823 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7824 
7825 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7826 							     dm_old_con_state);
7827 
7828 		abm_changed = dm_new_crtc_state->abm_level !=
7829 			      dm_old_crtc_state->abm_level;
7830 
7831 		hdr_changed =
7832 			is_hdr_metadata_different(old_con_state, new_con_state);
7833 
7834 		if (!scaling_changed && !abm_changed && !hdr_changed)
7835 			continue;
7836 
7837 		stream_update.stream = dm_new_crtc_state->stream;
7838 		if (scaling_changed) {
7839 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7840 					dm_new_con_state, dm_new_crtc_state->stream);
7841 
7842 			stream_update.src = dm_new_crtc_state->stream->src;
7843 			stream_update.dst = dm_new_crtc_state->stream->dst;
7844 		}
7845 
7846 		if (abm_changed) {
7847 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7848 
7849 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7850 		}
7851 
7852 		if (hdr_changed) {
7853 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7854 			stream_update.hdr_static_metadata = &hdr_packet;
7855 		}
7856 
7857 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7858 		WARN_ON(!status);
7859 		WARN_ON(!status->plane_count);
7860 
7861 		/*
7862 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7863 		 * Here we create an empty update on each plane.
7864 		 * To fix this, DC should permit updating only stream properties.
7865 		 */
7866 		for (j = 0; j < status->plane_count; j++)
7867 			dummy_updates[j].surface = status->plane_states[0];
7868 
7869 
7870 		mutex_lock(&dm->dc_lock);
7871 		dc_commit_updates_for_stream(dm->dc,
7872 						     dummy_updates,
7873 						     status->plane_count,
7874 						     dm_new_crtc_state->stream,
7875 						     &stream_update,
7876 						     dc_state);
7877 		mutex_unlock(&dm->dc_lock);
7878 	}
7879 
7880 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7881 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7882 				      new_crtc_state, i) {
7883 		if (old_crtc_state->active && !new_crtc_state->active)
7884 			crtc_disable_count++;
7885 
7886 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7887 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7888 
7889 		/* For freesync config update on crtc state and params for irq */
7890 		update_stream_irq_parameters(dm, dm_new_crtc_state);
7891 
7892 		/* Handle vrr on->off / off->on transitions */
7893 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7894 						dm_new_crtc_state);
7895 	}
7896 
7897 	/**
7898 	 * Enable interrupts for CRTCs that are newly enabled or went through
7899 	 * a modeset. It was intentionally deferred until after the front end
7900 	 * state was modified to wait until the OTG was on and so the IRQ
7901 	 * handlers didn't access stale or invalid state.
7902 	 */
7903 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7904 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7905 
7906 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7907 
7908 		if (new_crtc_state->active &&
7909 		    (!old_crtc_state->active ||
7910 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7911 			dc_stream_retain(dm_new_crtc_state->stream);
7912 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
7913 			manage_dm_interrupts(adev, acrtc, true);
7914 
7915 #ifdef CONFIG_DEBUG_FS
7916 			/**
7917 			 * Frontend may have changed so reapply the CRC capture
7918 			 * settings for the stream.
7919 			 */
7920 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7921 
7922 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7923 				amdgpu_dm_crtc_configure_crc_source(
7924 					crtc, dm_new_crtc_state,
7925 					dm_new_crtc_state->crc_src);
7926 			}
7927 #endif
7928 		}
7929 	}
7930 
7931 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7932 		if (new_crtc_state->async_flip)
7933 			wait_for_vblank = false;
7934 
7935 	/* update planes when needed per crtc*/
7936 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7937 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7938 
7939 		if (dm_new_crtc_state->stream)
7940 			amdgpu_dm_commit_planes(state, dc_state, dev,
7941 						dm, crtc, wait_for_vblank);
7942 	}
7943 
7944 	/* Update audio instances for each connector. */
7945 	amdgpu_dm_commit_audio(dev, state);
7946 
7947 	/*
7948 	 * send vblank event on all events not handled in flip and
7949 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7950 	 */
7951 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7952 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7953 
7954 		if (new_crtc_state->event)
7955 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7956 
7957 		new_crtc_state->event = NULL;
7958 	}
7959 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7960 
7961 	/* Signal HW programming completion */
7962 	drm_atomic_helper_commit_hw_done(state);
7963 
7964 	if (wait_for_vblank)
7965 		drm_atomic_helper_wait_for_flip_done(dev, state);
7966 
7967 	drm_atomic_helper_cleanup_planes(dev, state);
7968 
7969 	/*
7970 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7971 	 * so we can put the GPU into runtime suspend if we're not driving any
7972 	 * displays anymore
7973 	 */
7974 	for (i = 0; i < crtc_disable_count; i++)
7975 		pm_runtime_put_autosuspend(dev->dev);
7976 	pm_runtime_mark_last_busy(dev->dev);
7977 
7978 	if (dc_state_temp)
7979 		dc_release_state(dc_state_temp);
7980 }
7981 
7982 
7983 static int dm_force_atomic_commit(struct drm_connector *connector)
7984 {
7985 	int ret = 0;
7986 	struct drm_device *ddev = connector->dev;
7987 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7988 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7989 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7990 	struct drm_connector_state *conn_state;
7991 	struct drm_crtc_state *crtc_state;
7992 	struct drm_plane_state *plane_state;
7993 
7994 	if (!state)
7995 		return -ENOMEM;
7996 
7997 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7998 
7999 	/* Construct an atomic state to restore previous display setting */
8000 
8001 	/*
8002 	 * Attach connectors to drm_atomic_state
8003 	 */
8004 	conn_state = drm_atomic_get_connector_state(state, connector);
8005 
8006 	ret = PTR_ERR_OR_ZERO(conn_state);
8007 	if (ret)
8008 		goto out;
8009 
8010 	/* Attach crtc to drm_atomic_state*/
8011 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8012 
8013 	ret = PTR_ERR_OR_ZERO(crtc_state);
8014 	if (ret)
8015 		goto out;
8016 
8017 	/* force a restore */
8018 	crtc_state->mode_changed = true;
8019 
8020 	/* Attach plane to drm_atomic_state */
8021 	plane_state = drm_atomic_get_plane_state(state, plane);
8022 
8023 	ret = PTR_ERR_OR_ZERO(plane_state);
8024 	if (ret)
8025 		goto out;
8026 
8027 	/* Call commit internally with the state we just constructed */
8028 	ret = drm_atomic_commit(state);
8029 
8030 out:
8031 	drm_atomic_state_put(state);
8032 	if (ret)
8033 		DRM_ERROR("Restoring old state failed with %i\n", ret);
8034 
8035 	return ret;
8036 }
8037 
8038 /*
8039  * This function handles all cases when set mode does not come upon hotplug.
8040  * This includes when a display is unplugged then plugged back into the
8041  * same port and when running without usermode desktop manager supprot
8042  */
8043 void dm_restore_drm_connector_state(struct drm_device *dev,
8044 				    struct drm_connector *connector)
8045 {
8046 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8047 	struct amdgpu_crtc *disconnected_acrtc;
8048 	struct dm_crtc_state *acrtc_state;
8049 
8050 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8051 		return;
8052 
8053 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8054 	if (!disconnected_acrtc)
8055 		return;
8056 
8057 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8058 	if (!acrtc_state->stream)
8059 		return;
8060 
8061 	/*
8062 	 * If the previous sink is not released and different from the current,
8063 	 * we deduce we are in a state where we can not rely on usermode call
8064 	 * to turn on the display, so we do it here
8065 	 */
8066 	if (acrtc_state->stream->sink != aconnector->dc_sink)
8067 		dm_force_atomic_commit(&aconnector->base);
8068 }
8069 
8070 /*
8071  * Grabs all modesetting locks to serialize against any blocking commits,
8072  * Waits for completion of all non blocking commits.
8073  */
8074 static int do_aquire_global_lock(struct drm_device *dev,
8075 				 struct drm_atomic_state *state)
8076 {
8077 	struct drm_crtc *crtc;
8078 	struct drm_crtc_commit *commit;
8079 	long ret;
8080 
8081 	/*
8082 	 * Adding all modeset locks to aquire_ctx will
8083 	 * ensure that when the framework release it the
8084 	 * extra locks we are locking here will get released to
8085 	 */
8086 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8087 	if (ret)
8088 		return ret;
8089 
8090 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8091 		spin_lock(&crtc->commit_lock);
8092 		commit = list_first_entry_or_null(&crtc->commit_list,
8093 				struct drm_crtc_commit, commit_entry);
8094 		if (commit)
8095 			drm_crtc_commit_get(commit);
8096 		spin_unlock(&crtc->commit_lock);
8097 
8098 		if (!commit)
8099 			continue;
8100 
8101 		/*
8102 		 * Make sure all pending HW programming completed and
8103 		 * page flips done
8104 		 */
8105 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8106 
8107 		if (ret > 0)
8108 			ret = wait_for_completion_interruptible_timeout(
8109 					&commit->flip_done, 10*HZ);
8110 
8111 		if (ret == 0)
8112 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8113 				  "timed out\n", crtc->base.id, crtc->name);
8114 
8115 		drm_crtc_commit_put(commit);
8116 	}
8117 
8118 	return ret < 0 ? ret : 0;
8119 }
8120 
8121 static void get_freesync_config_for_crtc(
8122 	struct dm_crtc_state *new_crtc_state,
8123 	struct dm_connector_state *new_con_state)
8124 {
8125 	struct mod_freesync_config config = {0};
8126 	struct amdgpu_dm_connector *aconnector =
8127 			to_amdgpu_dm_connector(new_con_state->base.connector);
8128 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
8129 	int vrefresh = drm_mode_vrefresh(mode);
8130 
8131 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8132 					vrefresh >= aconnector->min_vfreq &&
8133 					vrefresh <= aconnector->max_vfreq;
8134 
8135 	if (new_crtc_state->vrr_supported) {
8136 		new_crtc_state->stream->ignore_msa_timing_param = true;
8137 		config.state = new_crtc_state->base.vrr_enabled ?
8138 				VRR_STATE_ACTIVE_VARIABLE :
8139 				VRR_STATE_INACTIVE;
8140 		config.min_refresh_in_uhz =
8141 				aconnector->min_vfreq * 1000000;
8142 		config.max_refresh_in_uhz =
8143 				aconnector->max_vfreq * 1000000;
8144 		config.vsif_supported = true;
8145 		config.btr = true;
8146 	}
8147 
8148 	new_crtc_state->freesync_config = config;
8149 }
8150 
8151 static void reset_freesync_config_for_crtc(
8152 	struct dm_crtc_state *new_crtc_state)
8153 {
8154 	new_crtc_state->vrr_supported = false;
8155 
8156 	memset(&new_crtc_state->vrr_infopacket, 0,
8157 	       sizeof(new_crtc_state->vrr_infopacket));
8158 }
8159 
8160 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8161 				struct drm_atomic_state *state,
8162 				struct drm_crtc *crtc,
8163 				struct drm_crtc_state *old_crtc_state,
8164 				struct drm_crtc_state *new_crtc_state,
8165 				bool enable,
8166 				bool *lock_and_validation_needed)
8167 {
8168 	struct dm_atomic_state *dm_state = NULL;
8169 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8170 	struct dc_stream_state *new_stream;
8171 	int ret = 0;
8172 
8173 	/*
8174 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8175 	 * update changed items
8176 	 */
8177 	struct amdgpu_crtc *acrtc = NULL;
8178 	struct amdgpu_dm_connector *aconnector = NULL;
8179 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8180 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8181 
8182 	new_stream = NULL;
8183 
8184 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8185 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8186 	acrtc = to_amdgpu_crtc(crtc);
8187 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8188 
8189 	/* TODO This hack should go away */
8190 	if (aconnector && enable) {
8191 		/* Make sure fake sink is created in plug-in scenario */
8192 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8193 							    &aconnector->base);
8194 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8195 							    &aconnector->base);
8196 
8197 		if (IS_ERR(drm_new_conn_state)) {
8198 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8199 			goto fail;
8200 		}
8201 
8202 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8203 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8204 
8205 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8206 			goto skip_modeset;
8207 
8208 		new_stream = create_validate_stream_for_sink(aconnector,
8209 							     &new_crtc_state->mode,
8210 							     dm_new_conn_state,
8211 							     dm_old_crtc_state->stream);
8212 
8213 		/*
8214 		 * we can have no stream on ACTION_SET if a display
8215 		 * was disconnected during S3, in this case it is not an
8216 		 * error, the OS will be updated after detection, and
8217 		 * will do the right thing on next atomic commit
8218 		 */
8219 
8220 		if (!new_stream) {
8221 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8222 					__func__, acrtc->base.base.id);
8223 			ret = -ENOMEM;
8224 			goto fail;
8225 		}
8226 
8227 		/*
8228 		 * TODO: Check VSDB bits to decide whether this should
8229 		 * be enabled or not.
8230 		 */
8231 		new_stream->triggered_crtc_reset.enabled =
8232 			dm->force_timing_sync;
8233 
8234 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8235 
8236 		ret = fill_hdr_info_packet(drm_new_conn_state,
8237 					   &new_stream->hdr_static_metadata);
8238 		if (ret)
8239 			goto fail;
8240 
8241 		/*
8242 		 * If we already removed the old stream from the context
8243 		 * (and set the new stream to NULL) then we can't reuse
8244 		 * the old stream even if the stream and scaling are unchanged.
8245 		 * We'll hit the BUG_ON and black screen.
8246 		 *
8247 		 * TODO: Refactor this function to allow this check to work
8248 		 * in all conditions.
8249 		 */
8250 		if (dm_new_crtc_state->stream &&
8251 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8252 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8253 			new_crtc_state->mode_changed = false;
8254 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8255 					 new_crtc_state->mode_changed);
8256 		}
8257 	}
8258 
8259 	/* mode_changed flag may get updated above, need to check again */
8260 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8261 		goto skip_modeset;
8262 
8263 	DRM_DEBUG_DRIVER(
8264 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8265 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8266 		"connectors_changed:%d\n",
8267 		acrtc->crtc_id,
8268 		new_crtc_state->enable,
8269 		new_crtc_state->active,
8270 		new_crtc_state->planes_changed,
8271 		new_crtc_state->mode_changed,
8272 		new_crtc_state->active_changed,
8273 		new_crtc_state->connectors_changed);
8274 
8275 	/* Remove stream for any changed/disabled CRTC */
8276 	if (!enable) {
8277 
8278 		if (!dm_old_crtc_state->stream)
8279 			goto skip_modeset;
8280 
8281 		ret = dm_atomic_get_state(state, &dm_state);
8282 		if (ret)
8283 			goto fail;
8284 
8285 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8286 				crtc->base.id);
8287 
8288 		/* i.e. reset mode */
8289 		if (dc_remove_stream_from_ctx(
8290 				dm->dc,
8291 				dm_state->context,
8292 				dm_old_crtc_state->stream) != DC_OK) {
8293 			ret = -EINVAL;
8294 			goto fail;
8295 		}
8296 
8297 		dc_stream_release(dm_old_crtc_state->stream);
8298 		dm_new_crtc_state->stream = NULL;
8299 
8300 		reset_freesync_config_for_crtc(dm_new_crtc_state);
8301 
8302 		*lock_and_validation_needed = true;
8303 
8304 	} else {/* Add stream for any updated/enabled CRTC */
8305 		/*
8306 		 * Quick fix to prevent NULL pointer on new_stream when
8307 		 * added MST connectors not found in existing crtc_state in the chained mode
8308 		 * TODO: need to dig out the root cause of that
8309 		 */
8310 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8311 			goto skip_modeset;
8312 
8313 		if (modereset_required(new_crtc_state))
8314 			goto skip_modeset;
8315 
8316 		if (modeset_required(new_crtc_state, new_stream,
8317 				     dm_old_crtc_state->stream)) {
8318 
8319 			WARN_ON(dm_new_crtc_state->stream);
8320 
8321 			ret = dm_atomic_get_state(state, &dm_state);
8322 			if (ret)
8323 				goto fail;
8324 
8325 			dm_new_crtc_state->stream = new_stream;
8326 
8327 			dc_stream_retain(new_stream);
8328 
8329 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8330 						crtc->base.id);
8331 
8332 			if (dc_add_stream_to_ctx(
8333 					dm->dc,
8334 					dm_state->context,
8335 					dm_new_crtc_state->stream) != DC_OK) {
8336 				ret = -EINVAL;
8337 				goto fail;
8338 			}
8339 
8340 			*lock_and_validation_needed = true;
8341 		}
8342 	}
8343 
8344 skip_modeset:
8345 	/* Release extra reference */
8346 	if (new_stream)
8347 		 dc_stream_release(new_stream);
8348 
8349 	/*
8350 	 * We want to do dc stream updates that do not require a
8351 	 * full modeset below.
8352 	 */
8353 	if (!(enable && aconnector && new_crtc_state->active))
8354 		return 0;
8355 	/*
8356 	 * Given above conditions, the dc state cannot be NULL because:
8357 	 * 1. We're in the process of enabling CRTCs (just been added
8358 	 *    to the dc context, or already is on the context)
8359 	 * 2. Has a valid connector attached, and
8360 	 * 3. Is currently active and enabled.
8361 	 * => The dc stream state currently exists.
8362 	 */
8363 	BUG_ON(dm_new_crtc_state->stream == NULL);
8364 
8365 	/* Scaling or underscan settings */
8366 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
8367 				drm_atomic_crtc_needs_modeset(new_crtc_state))
8368 		update_stream_scaling_settings(
8369 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8370 
8371 	/* ABM settings */
8372 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8373 
8374 	/*
8375 	 * Color management settings. We also update color properties
8376 	 * when a modeset is needed, to ensure it gets reprogrammed.
8377 	 */
8378 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8379 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8380 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8381 		if (ret)
8382 			goto fail;
8383 	}
8384 
8385 	/* Update Freesync settings. */
8386 	get_freesync_config_for_crtc(dm_new_crtc_state,
8387 				     dm_new_conn_state);
8388 
8389 	return ret;
8390 
8391 fail:
8392 	if (new_stream)
8393 		dc_stream_release(new_stream);
8394 	return ret;
8395 }
8396 
8397 static bool should_reset_plane(struct drm_atomic_state *state,
8398 			       struct drm_plane *plane,
8399 			       struct drm_plane_state *old_plane_state,
8400 			       struct drm_plane_state *new_plane_state)
8401 {
8402 	struct drm_plane *other;
8403 	struct drm_plane_state *old_other_state, *new_other_state;
8404 	struct drm_crtc_state *new_crtc_state;
8405 	int i;
8406 
8407 	/*
8408 	 * TODO: Remove this hack once the checks below are sufficient
8409 	 * enough to determine when we need to reset all the planes on
8410 	 * the stream.
8411 	 */
8412 	if (state->allow_modeset)
8413 		return true;
8414 
8415 	/* Exit early if we know that we're adding or removing the plane. */
8416 	if (old_plane_state->crtc != new_plane_state->crtc)
8417 		return true;
8418 
8419 	/* old crtc == new_crtc == NULL, plane not in context. */
8420 	if (!new_plane_state->crtc)
8421 		return false;
8422 
8423 	new_crtc_state =
8424 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8425 
8426 	if (!new_crtc_state)
8427 		return true;
8428 
8429 	/* CRTC Degamma changes currently require us to recreate planes. */
8430 	if (new_crtc_state->color_mgmt_changed)
8431 		return true;
8432 
8433 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8434 		return true;
8435 
8436 	/*
8437 	 * If there are any new primary or overlay planes being added or
8438 	 * removed then the z-order can potentially change. To ensure
8439 	 * correct z-order and pipe acquisition the current DC architecture
8440 	 * requires us to remove and recreate all existing planes.
8441 	 *
8442 	 * TODO: Come up with a more elegant solution for this.
8443 	 */
8444 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8445 		struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
8446 
8447 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8448 			continue;
8449 
8450 		if (old_other_state->crtc != new_plane_state->crtc &&
8451 		    new_other_state->crtc != new_plane_state->crtc)
8452 			continue;
8453 
8454 		if (old_other_state->crtc != new_other_state->crtc)
8455 			return true;
8456 
8457 		/* Src/dst size and scaling updates. */
8458 		if (old_other_state->src_w != new_other_state->src_w ||
8459 		    old_other_state->src_h != new_other_state->src_h ||
8460 		    old_other_state->crtc_w != new_other_state->crtc_w ||
8461 		    old_other_state->crtc_h != new_other_state->crtc_h)
8462 			return true;
8463 
8464 		/* Rotation / mirroring updates. */
8465 		if (old_other_state->rotation != new_other_state->rotation)
8466 			return true;
8467 
8468 		/* Blending updates. */
8469 		if (old_other_state->pixel_blend_mode !=
8470 		    new_other_state->pixel_blend_mode)
8471 			return true;
8472 
8473 		/* Alpha updates. */
8474 		if (old_other_state->alpha != new_other_state->alpha)
8475 			return true;
8476 
8477 		/* Colorspace changes. */
8478 		if (old_other_state->color_range != new_other_state->color_range ||
8479 		    old_other_state->color_encoding != new_other_state->color_encoding)
8480 			return true;
8481 
8482 		/* Framebuffer checks fall at the end. */
8483 		if (!old_other_state->fb || !new_other_state->fb)
8484 			continue;
8485 
8486 		/* Pixel format changes can require bandwidth updates. */
8487 		if (old_other_state->fb->format != new_other_state->fb->format)
8488 			return true;
8489 
8490 		old_dm_plane_state = to_dm_plane_state(old_other_state);
8491 		new_dm_plane_state = to_dm_plane_state(new_other_state);
8492 
8493 		/* Tiling and DCC changes also require bandwidth updates. */
8494 		if (old_dm_plane_state->tiling_flags !=
8495 		    new_dm_plane_state->tiling_flags)
8496 			return true;
8497 	}
8498 
8499 	return false;
8500 }
8501 
8502 static int dm_update_plane_state(struct dc *dc,
8503 				 struct drm_atomic_state *state,
8504 				 struct drm_plane *plane,
8505 				 struct drm_plane_state *old_plane_state,
8506 				 struct drm_plane_state *new_plane_state,
8507 				 bool enable,
8508 				 bool *lock_and_validation_needed)
8509 {
8510 
8511 	struct dm_atomic_state *dm_state = NULL;
8512 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8513 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8514 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8515 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8516 	struct amdgpu_crtc *new_acrtc;
8517 	bool needs_reset;
8518 	int ret = 0;
8519 
8520 
8521 	new_plane_crtc = new_plane_state->crtc;
8522 	old_plane_crtc = old_plane_state->crtc;
8523 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
8524 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
8525 
8526 	/*TODO Implement better atomic check for cursor plane */
8527 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8528 		if (!enable || !new_plane_crtc ||
8529 			drm_atomic_plane_disabling(plane->state, new_plane_state))
8530 			return 0;
8531 
8532 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8533 
8534 		if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8535 			(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8536 			DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8537 							 new_plane_state->crtc_w, new_plane_state->crtc_h);
8538 			return -EINVAL;
8539 		}
8540 
8541 		return 0;
8542 	}
8543 
8544 	needs_reset = should_reset_plane(state, plane, old_plane_state,
8545 					 new_plane_state);
8546 
8547 	/* Remove any changed/removed planes */
8548 	if (!enable) {
8549 		if (!needs_reset)
8550 			return 0;
8551 
8552 		if (!old_plane_crtc)
8553 			return 0;
8554 
8555 		old_crtc_state = drm_atomic_get_old_crtc_state(
8556 				state, old_plane_crtc);
8557 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8558 
8559 		if (!dm_old_crtc_state->stream)
8560 			return 0;
8561 
8562 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8563 				plane->base.id, old_plane_crtc->base.id);
8564 
8565 		ret = dm_atomic_get_state(state, &dm_state);
8566 		if (ret)
8567 			return ret;
8568 
8569 		if (!dc_remove_plane_from_context(
8570 				dc,
8571 				dm_old_crtc_state->stream,
8572 				dm_old_plane_state->dc_state,
8573 				dm_state->context)) {
8574 
8575 			return -EINVAL;
8576 		}
8577 
8578 		if (dm_old_plane_state->dc_state)
8579 			dc_plane_state_release(dm_old_plane_state->dc_state);
8580 
8581 		dm_new_plane_state->dc_state = NULL;
8582 
8583 		*lock_and_validation_needed = true;
8584 
8585 	} else { /* Add new planes */
8586 		struct dc_plane_state *dc_new_plane_state;
8587 
8588 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8589 			return 0;
8590 
8591 		if (!new_plane_crtc)
8592 			return 0;
8593 
8594 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8595 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8596 
8597 		if (!dm_new_crtc_state->stream)
8598 			return 0;
8599 
8600 		if (!needs_reset)
8601 			return 0;
8602 
8603 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8604 		if (ret)
8605 			return ret;
8606 
8607 		WARN_ON(dm_new_plane_state->dc_state);
8608 
8609 		dc_new_plane_state = dc_create_plane_state(dc);
8610 		if (!dc_new_plane_state)
8611 			return -ENOMEM;
8612 
8613 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8614 				plane->base.id, new_plane_crtc->base.id);
8615 
8616 		ret = fill_dc_plane_attributes(
8617 			drm_to_adev(new_plane_crtc->dev),
8618 			dc_new_plane_state,
8619 			new_plane_state,
8620 			new_crtc_state);
8621 		if (ret) {
8622 			dc_plane_state_release(dc_new_plane_state);
8623 			return ret;
8624 		}
8625 
8626 		ret = dm_atomic_get_state(state, &dm_state);
8627 		if (ret) {
8628 			dc_plane_state_release(dc_new_plane_state);
8629 			return ret;
8630 		}
8631 
8632 		/*
8633 		 * Any atomic check errors that occur after this will
8634 		 * not need a release. The plane state will be attached
8635 		 * to the stream, and therefore part of the atomic
8636 		 * state. It'll be released when the atomic state is
8637 		 * cleaned.
8638 		 */
8639 		if (!dc_add_plane_to_context(
8640 				dc,
8641 				dm_new_crtc_state->stream,
8642 				dc_new_plane_state,
8643 				dm_state->context)) {
8644 
8645 			dc_plane_state_release(dc_new_plane_state);
8646 			return -EINVAL;
8647 		}
8648 
8649 		dm_new_plane_state->dc_state = dc_new_plane_state;
8650 
8651 		/* Tell DC to do a full surface update every time there
8652 		 * is a plane change. Inefficient, but works for now.
8653 		 */
8654 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8655 
8656 		*lock_and_validation_needed = true;
8657 	}
8658 
8659 
8660 	return ret;
8661 }
8662 
8663 #if defined(CONFIG_DRM_AMD_DC_DCN)
8664 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8665 {
8666 	struct drm_connector *connector;
8667 	struct drm_connector_state *conn_state, *old_conn_state;
8668 	struct amdgpu_dm_connector *aconnector = NULL;
8669 	int i;
8670 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
8671 		if (!conn_state->crtc)
8672 			conn_state = old_conn_state;
8673 
8674 		if (conn_state->crtc != crtc)
8675 			continue;
8676 
8677 		aconnector = to_amdgpu_dm_connector(connector);
8678 		if (!aconnector->port || !aconnector->mst_port)
8679 			aconnector = NULL;
8680 		else
8681 			break;
8682 	}
8683 
8684 	if (!aconnector)
8685 		return 0;
8686 
8687 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8688 }
8689 #endif
8690 
8691 static int validate_overlay(struct drm_atomic_state *state)
8692 {
8693 	int i;
8694 	struct drm_plane *plane;
8695 	struct drm_plane_state *old_plane_state, *new_plane_state;
8696 	struct drm_plane_state *primary_state, *overlay_state = NULL;
8697 
8698 	/* Check if primary plane is contained inside overlay */
8699 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8700 		if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
8701 			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8702 				return 0;
8703 
8704 			overlay_state = new_plane_state;
8705 			continue;
8706 		}
8707 	}
8708 
8709 	/* check if we're making changes to the overlay plane */
8710 	if (!overlay_state)
8711 		return 0;
8712 
8713 	/* check if overlay plane is enabled */
8714 	if (!overlay_state->crtc)
8715 		return 0;
8716 
8717 	/* find the primary plane for the CRTC that the overlay is enabled on */
8718 	primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
8719 	if (IS_ERR(primary_state))
8720 		return PTR_ERR(primary_state);
8721 
8722 	/* check if primary plane is enabled */
8723 	if (!primary_state->crtc)
8724 		return 0;
8725 
8726 	/* Perform the bounds check to ensure the overlay plane covers the primary */
8727 	if (primary_state->crtc_x < overlay_state->crtc_x ||
8728 	    primary_state->crtc_y < overlay_state->crtc_y ||
8729 	    primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
8730 	    primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
8731 		DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
8732 		return -EINVAL;
8733 	}
8734 
8735 	return 0;
8736 }
8737 
8738 /**
8739  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8740  * @dev: The DRM device
8741  * @state: The atomic state to commit
8742  *
8743  * Validate that the given atomic state is programmable by DC into hardware.
8744  * This involves constructing a &struct dc_state reflecting the new hardware
8745  * state we wish to commit, then querying DC to see if it is programmable. It's
8746  * important not to modify the existing DC state. Otherwise, atomic_check
8747  * may unexpectedly commit hardware changes.
8748  *
8749  * When validating the DC state, it's important that the right locks are
8750  * acquired. For full updates case which removes/adds/updates streams on one
8751  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8752  * that any such full update commit will wait for completion of any outstanding
8753  * flip using DRMs synchronization events.
8754  *
8755  * Note that DM adds the affected connectors for all CRTCs in state, when that
8756  * might not seem necessary. This is because DC stream creation requires the
8757  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8758  * be possible but non-trivial - a possible TODO item.
8759  *
8760  * Return: -Error code if validation failed.
8761  */
8762 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8763 				  struct drm_atomic_state *state)
8764 {
8765 	struct amdgpu_device *adev = drm_to_adev(dev);
8766 	struct dm_atomic_state *dm_state = NULL;
8767 	struct dc *dc = adev->dm.dc;
8768 	struct drm_connector *connector;
8769 	struct drm_connector_state *old_con_state, *new_con_state;
8770 	struct drm_crtc *crtc;
8771 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8772 	struct drm_plane *plane;
8773 	struct drm_plane_state *old_plane_state, *new_plane_state;
8774 	enum dc_status status;
8775 	int ret, i;
8776 	bool lock_and_validation_needed = false;
8777 
8778 	amdgpu_check_debugfs_connector_property_change(adev, state);
8779 
8780 	ret = drm_atomic_helper_check_modeset(dev, state);
8781 	if (ret)
8782 		goto fail;
8783 
8784 	/* Check connector changes */
8785 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8786 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8787 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8788 
8789 		/* Skip connectors that are disabled or part of modeset already. */
8790 		if (!old_con_state->crtc && !new_con_state->crtc)
8791 			continue;
8792 
8793 		if (!new_con_state->crtc)
8794 			continue;
8795 
8796 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8797 		if (IS_ERR(new_crtc_state)) {
8798 			ret = PTR_ERR(new_crtc_state);
8799 			goto fail;
8800 		}
8801 
8802 		if (dm_old_con_state->abm_level != dm_new_con_state->abm_level ||
8803 		    dm_old_con_state->scaling != dm_new_con_state->scaling)
8804 			new_crtc_state->connectors_changed = true;
8805 	}
8806 
8807 #if defined(CONFIG_DRM_AMD_DC_DCN)
8808 	if (dc_resource_is_dsc_encoding_supported(dc)) {
8809 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8810 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8811 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8812 				if (ret)
8813 					goto fail;
8814 			}
8815 		}
8816 	}
8817 #endif
8818 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8819 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8820 		    !new_crtc_state->color_mgmt_changed &&
8821 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8822 			continue;
8823 
8824 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
8825 		if (ret)
8826 			goto fail;
8827 
8828 		if (!new_crtc_state->enable)
8829 			continue;
8830 
8831 		ret = drm_atomic_add_affected_connectors(state, crtc);
8832 		if (ret)
8833 			return ret;
8834 
8835 		ret = drm_atomic_add_affected_planes(state, crtc);
8836 		if (ret)
8837 			goto fail;
8838 	}
8839 
8840 	/*
8841 	 * Add all primary and overlay planes on the CRTC to the state
8842 	 * whenever a plane is enabled to maintain correct z-ordering
8843 	 * and to enable fast surface updates.
8844 	 */
8845 	drm_for_each_crtc(crtc, dev) {
8846 		bool modified = false;
8847 
8848 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8849 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8850 				continue;
8851 
8852 			if (new_plane_state->crtc == crtc ||
8853 			    old_plane_state->crtc == crtc) {
8854 				modified = true;
8855 				break;
8856 			}
8857 		}
8858 
8859 		if (!modified)
8860 			continue;
8861 
8862 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8863 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8864 				continue;
8865 
8866 			new_plane_state =
8867 				drm_atomic_get_plane_state(state, plane);
8868 
8869 			if (IS_ERR(new_plane_state)) {
8870 				ret = PTR_ERR(new_plane_state);
8871 				goto fail;
8872 			}
8873 		}
8874 	}
8875 
8876 	/* Prepass for updating tiling flags on new planes. */
8877 	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
8878 		struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state);
8879 		struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb);
8880 
8881 		ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
8882 				  &new_dm_plane_state->tmz_surface);
8883 		if (ret)
8884 			goto fail;
8885 	}
8886 
8887 	/* Remove exiting planes if they are modified */
8888 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8889 		ret = dm_update_plane_state(dc, state, plane,
8890 					    old_plane_state,
8891 					    new_plane_state,
8892 					    false,
8893 					    &lock_and_validation_needed);
8894 		if (ret)
8895 			goto fail;
8896 	}
8897 
8898 	/* Disable all crtcs which require disable */
8899 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8900 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8901 					   old_crtc_state,
8902 					   new_crtc_state,
8903 					   false,
8904 					   &lock_and_validation_needed);
8905 		if (ret)
8906 			goto fail;
8907 	}
8908 
8909 	/* Enable all crtcs which require enable */
8910 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8911 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8912 					   old_crtc_state,
8913 					   new_crtc_state,
8914 					   true,
8915 					   &lock_and_validation_needed);
8916 		if (ret)
8917 			goto fail;
8918 	}
8919 
8920 	ret = validate_overlay(state);
8921 	if (ret)
8922 		goto fail;
8923 
8924 	/* Add new/modified planes */
8925 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8926 		ret = dm_update_plane_state(dc, state, plane,
8927 					    old_plane_state,
8928 					    new_plane_state,
8929 					    true,
8930 					    &lock_and_validation_needed);
8931 		if (ret)
8932 			goto fail;
8933 	}
8934 
8935 	/* Run this here since we want to validate the streams we created */
8936 	ret = drm_atomic_helper_check_planes(dev, state);
8937 	if (ret)
8938 		goto fail;
8939 
8940 	if (state->legacy_cursor_update) {
8941 		/*
8942 		 * This is a fast cursor update coming from the plane update
8943 		 * helper, check if it can be done asynchronously for better
8944 		 * performance.
8945 		 */
8946 		state->async_update =
8947 			!drm_atomic_helper_async_check(dev, state);
8948 
8949 		/*
8950 		 * Skip the remaining global validation if this is an async
8951 		 * update. Cursor updates can be done without affecting
8952 		 * state or bandwidth calcs and this avoids the performance
8953 		 * penalty of locking the private state object and
8954 		 * allocating a new dc_state.
8955 		 */
8956 		if (state->async_update)
8957 			return 0;
8958 	}
8959 
8960 	/* Check scaling and underscan changes*/
8961 	/* TODO Removed scaling changes validation due to inability to commit
8962 	 * new stream into context w\o causing full reset. Need to
8963 	 * decide how to handle.
8964 	 */
8965 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8966 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8967 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8968 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8969 
8970 		/* Skip any modesets/resets */
8971 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8972 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8973 			continue;
8974 
8975 		/* Skip any thing not scale or underscan changes */
8976 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8977 			continue;
8978 
8979 		lock_and_validation_needed = true;
8980 	}
8981 
8982 	/**
8983 	 * Streams and planes are reset when there are changes that affect
8984 	 * bandwidth. Anything that affects bandwidth needs to go through
8985 	 * DC global validation to ensure that the configuration can be applied
8986 	 * to hardware.
8987 	 *
8988 	 * We have to currently stall out here in atomic_check for outstanding
8989 	 * commits to finish in this case because our IRQ handlers reference
8990 	 * DRM state directly - we can end up disabling interrupts too early
8991 	 * if we don't.
8992 	 *
8993 	 * TODO: Remove this stall and drop DM state private objects.
8994 	 */
8995 	if (lock_and_validation_needed) {
8996 		ret = dm_atomic_get_state(state, &dm_state);
8997 		if (ret)
8998 			goto fail;
8999 
9000 		ret = do_aquire_global_lock(dev, state);
9001 		if (ret)
9002 			goto fail;
9003 
9004 #if defined(CONFIG_DRM_AMD_DC_DCN)
9005 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9006 			goto fail;
9007 
9008 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9009 		if (ret)
9010 			goto fail;
9011 #endif
9012 
9013 		/*
9014 		 * Perform validation of MST topology in the state:
9015 		 * We need to perform MST atomic check before calling
9016 		 * dc_validate_global_state(), or there is a chance
9017 		 * to get stuck in an infinite loop and hang eventually.
9018 		 */
9019 		ret = drm_dp_mst_atomic_check(state);
9020 		if (ret)
9021 			goto fail;
9022 		status = dc_validate_global_state(dc, dm_state->context, false);
9023 		if (status != DC_OK) {
9024 			drm_dbg_atomic(dev,
9025 				       "DC global validation failure: %s (%d)",
9026 				       dc_status_to_str(status), status);
9027 			ret = -EINVAL;
9028 			goto fail;
9029 		}
9030 	} else {
9031 		/*
9032 		 * The commit is a fast update. Fast updates shouldn't change
9033 		 * the DC context, affect global validation, and can have their
9034 		 * commit work done in parallel with other commits not touching
9035 		 * the same resource. If we have a new DC context as part of
9036 		 * the DM atomic state from validation we need to free it and
9037 		 * retain the existing one instead.
9038 		 *
9039 		 * Furthermore, since the DM atomic state only contains the DC
9040 		 * context and can safely be annulled, we can free the state
9041 		 * and clear the associated private object now to free
9042 		 * some memory and avoid a possible use-after-free later.
9043 		 */
9044 
9045 		for (i = 0; i < state->num_private_objs; i++) {
9046 			struct drm_private_obj *obj = state->private_objs[i].ptr;
9047 
9048 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
9049 				int j = state->num_private_objs-1;
9050 
9051 				dm_atomic_destroy_state(obj,
9052 						state->private_objs[i].state);
9053 
9054 				/* If i is not at the end of the array then the
9055 				 * last element needs to be moved to where i was
9056 				 * before the array can safely be truncated.
9057 				 */
9058 				if (i != j)
9059 					state->private_objs[i] =
9060 						state->private_objs[j];
9061 
9062 				state->private_objs[j].ptr = NULL;
9063 				state->private_objs[j].state = NULL;
9064 				state->private_objs[j].old_state = NULL;
9065 				state->private_objs[j].new_state = NULL;
9066 
9067 				state->num_private_objs = j;
9068 				break;
9069 			}
9070 		}
9071 	}
9072 
9073 	/* Store the overall update type for use later in atomic check. */
9074 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9075 		struct dm_crtc_state *dm_new_crtc_state =
9076 			to_dm_crtc_state(new_crtc_state);
9077 
9078 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
9079 							 UPDATE_TYPE_FULL :
9080 							 UPDATE_TYPE_FAST;
9081 	}
9082 
9083 	/* Must be success */
9084 	WARN_ON(ret);
9085 	return ret;
9086 
9087 fail:
9088 	if (ret == -EDEADLK)
9089 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9090 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9091 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9092 	else
9093 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9094 
9095 	return ret;
9096 }
9097 
9098 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9099 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
9100 {
9101 	uint8_t dpcd_data;
9102 	bool capable = false;
9103 
9104 	if (amdgpu_dm_connector->dc_link &&
9105 		dm_helpers_dp_read_dpcd(
9106 				NULL,
9107 				amdgpu_dm_connector->dc_link,
9108 				DP_DOWN_STREAM_PORT_COUNT,
9109 				&dpcd_data,
9110 				sizeof(dpcd_data))) {
9111 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9112 	}
9113 
9114 	return capable;
9115 }
9116 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9117 					struct edid *edid)
9118 {
9119 	int i;
9120 	bool edid_check_required;
9121 	struct detailed_timing *timing;
9122 	struct detailed_non_pixel *data;
9123 	struct detailed_data_monitor_range *range;
9124 	struct amdgpu_dm_connector *amdgpu_dm_connector =
9125 			to_amdgpu_dm_connector(connector);
9126 	struct dm_connector_state *dm_con_state = NULL;
9127 
9128 	struct drm_device *dev = connector->dev;
9129 	struct amdgpu_device *adev = drm_to_adev(dev);
9130 	bool freesync_capable = false;
9131 
9132 	if (!connector->state) {
9133 		DRM_ERROR("%s - Connector has no state", __func__);
9134 		goto update;
9135 	}
9136 
9137 	if (!edid) {
9138 		dm_con_state = to_dm_connector_state(connector->state);
9139 
9140 		amdgpu_dm_connector->min_vfreq = 0;
9141 		amdgpu_dm_connector->max_vfreq = 0;
9142 		amdgpu_dm_connector->pixel_clock_mhz = 0;
9143 
9144 		goto update;
9145 	}
9146 
9147 	dm_con_state = to_dm_connector_state(connector->state);
9148 
9149 	edid_check_required = false;
9150 	if (!amdgpu_dm_connector->dc_sink) {
9151 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9152 		goto update;
9153 	}
9154 	if (!adev->dm.freesync_module)
9155 		goto update;
9156 	/*
9157 	 * if edid non zero restrict freesync only for dp and edp
9158 	 */
9159 	if (edid) {
9160 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9161 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9162 			edid_check_required = is_dp_capable_without_timing_msa(
9163 						adev->dm.dc,
9164 						amdgpu_dm_connector);
9165 		}
9166 	}
9167 	if (edid_check_required == true && (edid->version > 1 ||
9168 	   (edid->version == 1 && edid->revision > 1))) {
9169 		for (i = 0; i < 4; i++) {
9170 
9171 			timing	= &edid->detailed_timings[i];
9172 			data	= &timing->data.other_data;
9173 			range	= &data->data.range;
9174 			/*
9175 			 * Check if monitor has continuous frequency mode
9176 			 */
9177 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
9178 				continue;
9179 			/*
9180 			 * Check for flag range limits only. If flag == 1 then
9181 			 * no additional timing information provided.
9182 			 * Default GTF, GTF Secondary curve and CVT are not
9183 			 * supported
9184 			 */
9185 			if (range->flags != 1)
9186 				continue;
9187 
9188 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9189 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9190 			amdgpu_dm_connector->pixel_clock_mhz =
9191 				range->pixel_clock_mhz * 10;
9192 			break;
9193 		}
9194 
9195 		if (amdgpu_dm_connector->max_vfreq -
9196 		    amdgpu_dm_connector->min_vfreq > 10) {
9197 
9198 			freesync_capable = true;
9199 		}
9200 	}
9201 
9202 update:
9203 	if (dm_con_state)
9204 		dm_con_state->freesync_capable = freesync_capable;
9205 
9206 	if (connector->vrr_capable_property)
9207 		drm_connector_set_vrr_capable_property(connector,
9208 						       freesync_capable);
9209 }
9210 
9211 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9212 {
9213 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9214 
9215 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9216 		return;
9217 	if (link->type == dc_connection_none)
9218 		return;
9219 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9220 					dpcd_data, sizeof(dpcd_data))) {
9221 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9222 
9223 		if (dpcd_data[0] == 0) {
9224 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9225 			link->psr_settings.psr_feature_enabled = false;
9226 		} else {
9227 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
9228 			link->psr_settings.psr_feature_enabled = true;
9229 		}
9230 
9231 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9232 	}
9233 }
9234 
9235 /*
9236  * amdgpu_dm_link_setup_psr() - configure psr link
9237  * @stream: stream state
9238  *
9239  * Return: true if success
9240  */
9241 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9242 {
9243 	struct dc_link *link = NULL;
9244 	struct psr_config psr_config = {0};
9245 	struct psr_context psr_context = {0};
9246 	bool ret = false;
9247 
9248 	if (stream == NULL)
9249 		return false;
9250 
9251 	link = stream->link;
9252 
9253 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9254 
9255 	if (psr_config.psr_version > 0) {
9256 		psr_config.psr_exit_link_training_required = 0x1;
9257 		psr_config.psr_frame_capture_indication_req = 0;
9258 		psr_config.psr_rfb_setup_time = 0x37;
9259 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9260 		psr_config.allow_smu_optimizations = 0x0;
9261 
9262 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9263 
9264 	}
9265 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
9266 
9267 	return ret;
9268 }
9269 
9270 /*
9271  * amdgpu_dm_psr_enable() - enable psr f/w
9272  * @stream: stream state
9273  *
9274  * Return: true if success
9275  */
9276 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9277 {
9278 	struct dc_link *link = stream->link;
9279 	unsigned int vsync_rate_hz = 0;
9280 	struct dc_static_screen_params params = {0};
9281 	/* Calculate number of static frames before generating interrupt to
9282 	 * enter PSR.
9283 	 */
9284 	// Init fail safe of 2 frames static
9285 	unsigned int num_frames_static = 2;
9286 
9287 	DRM_DEBUG_DRIVER("Enabling psr...\n");
9288 
9289 	vsync_rate_hz = div64_u64(div64_u64((
9290 			stream->timing.pix_clk_100hz * 100),
9291 			stream->timing.v_total),
9292 			stream->timing.h_total);
9293 
9294 	/* Round up
9295 	 * Calculate number of frames such that at least 30 ms of time has
9296 	 * passed.
9297 	 */
9298 	if (vsync_rate_hz != 0) {
9299 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9300 		num_frames_static = (30000 / frame_time_microsec) + 1;
9301 	}
9302 
9303 	params.triggers.cursor_update = true;
9304 	params.triggers.overlay_update = true;
9305 	params.triggers.surface_update = true;
9306 	params.num_frames = num_frames_static;
9307 
9308 	dc_stream_set_static_screen_params(link->ctx->dc,
9309 					   &stream, 1,
9310 					   &params);
9311 
9312 	return dc_link_set_psr_allow_active(link, true, false);
9313 }
9314 
9315 /*
9316  * amdgpu_dm_psr_disable() - disable psr f/w
9317  * @stream:  stream state
9318  *
9319  * Return: true if success
9320  */
9321 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9322 {
9323 
9324 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9325 
9326 	return dc_link_set_psr_allow_active(stream->link, false, true);
9327 }
9328 
9329 /*
9330  * amdgpu_dm_psr_disable() - disable psr f/w
9331  * if psr is enabled on any stream
9332  *
9333  * Return: true if success
9334  */
9335 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9336 {
9337 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9338 	return dc_set_psr_allow_active(dm->dc, false);
9339 }
9340 
9341 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9342 {
9343 	struct amdgpu_device *adev = drm_to_adev(dev);
9344 	struct dc *dc = adev->dm.dc;
9345 	int i;
9346 
9347 	mutex_lock(&adev->dm.dc_lock);
9348 	if (dc->current_state) {
9349 		for (i = 0; i < dc->current_state->stream_count; ++i)
9350 			dc->current_state->streams[i]
9351 				->triggered_crtc_reset.enabled =
9352 				adev->dm.force_timing_sync;
9353 
9354 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
9355 		dc_trigger_sync(dc, dc->current_state);
9356 	}
9357 	mutex_unlock(&adev->dm.dc_lock);
9358 }
9359