1/* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28#include <linux/export.h> 29#include <linux/i2c.h> 30#include <linux/notifier.h> 31#include <linux/reboot.h> 32#include <linux/slab.h> 33#include <linux/types.h> 34 35#include <asm/byteorder.h> 36 37#include <drm/drm_atomic_helper.h> 38#include <drm/drm_crtc.h> 39#include <drm/drm_dp_helper.h> 40#include <drm/drm_edid.h> 41#include <drm/drm_probe_helper.h> 42 43#include "i915_debugfs.h" 44#include "i915_drv.h" 45#include "i915_trace.h" 46#include "intel_atomic.h" 47#include "intel_audio.h" 48#include "intel_connector.h" 49#include "intel_ddi.h" 50#include "intel_display_types.h" 51#include "intel_dp.h" 52#include "intel_dp_link_training.h" 53#include "intel_dp_mst.h" 54#include "intel_dpio_phy.h" 55#include "intel_fifo_underrun.h" 56#include "intel_hdcp.h" 57#include "intel_hdmi.h" 58#include "intel_hotplug.h" 59#include "intel_lspcon.h" 60#include "intel_lvds.h" 61#include "intel_panel.h" 62#include "intel_psr.h" 63#include "intel_sideband.h" 64#include "intel_tc.h" 65#include "intel_vdsc.h" 66 67#define DP_DPRX_ESI_LEN 14 68 69/* DP DSC throughput values used for slice count calculations KPixels/s */ 70#define DP_DSC_PEAK_PIXEL_RATE 2720000 71#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 72#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 73 74/* DP DSC FEC Overhead factor = 1/(0.972261) */ 75#define DP_DSC_FEC_OVERHEAD_FACTOR 972261 76 77/* Compliance test status bits */ 78#define INTEL_DP_RESOLUTION_SHIFT_MASK 0 79#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) 80#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) 81#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) 82 83struct dp_link_dpll { 84 int clock; 85 struct dpll dpll; 86}; 87 88static const struct dp_link_dpll g4x_dpll[] = { 89 { 162000, 90 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } }, 91 { 270000, 92 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } } 93}; 94 95static const struct dp_link_dpll pch_dpll[] = { 96 { 162000, 97 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } }, 98 { 270000, 99 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } } 100}; 101 102static const struct dp_link_dpll vlv_dpll[] = { 103 { 162000, 104 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } }, 105 { 270000, 106 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } } 107}; 108 109/* 110 * CHV supports eDP 1.4 that have more link rates. 111 * Below only provides the fixed rate but exclude variable rate. 112 */ 113static const struct dp_link_dpll chv_dpll[] = { 114 /* 115 * CHV requires to program fractional division for m2. 116 * m2 is stored in fixed point format using formula below 117 * (m2_int << 22) | m2_fraction 118 */ 119 { 162000, /* m2_int = 32, m2_fraction = 1677722 */ 120 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } }, 121 { 270000, /* m2_int = 27, m2_fraction = 0 */ 122 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }, 123}; 124 125/* Constants for DP DSC configurations */ 126static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; 127 128/* With Single pipe configuration, HW is capable of supporting maximum 129 * of 4 slices per line. 130 */ 131static const u8 valid_dsc_slicecount[] = {1, 2, 4}; 132 133/** 134 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 135 * @intel_dp: DP struct 136 * 137 * If a CPU or PCH DP output is attached to an eDP panel, this function 138 * will return true, and false otherwise. 139 */ 140bool intel_dp_is_edp(struct intel_dp *intel_dp) 141{ 142 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 143 144 return dig_port->base.type == INTEL_OUTPUT_EDP; 145} 146 147static void intel_dp_link_down(struct intel_encoder *encoder, 148 const struct intel_crtc_state *old_crtc_state); 149static bool edp_panel_vdd_on(struct intel_dp *intel_dp); 150static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 151static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, 152 const struct intel_crtc_state *crtc_state); 153static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, 154 enum pipe pipe); 155static void intel_dp_unset_edid(struct intel_dp *intel_dp); 156 157static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp) 158{ 159 intel_dp->sink_rates[0] = 162000; 160 intel_dp->num_sink_rates = 1; 161} 162 163/* update sink rates from dpcd */ 164static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 165{ 166 static const int dp_rates[] = { 167 162000, 270000, 540000, 810000 168 }; 169 int i, max_rate; 170 171 if (drm_dp_has_quirk(&intel_dp->desc, 0, 172 DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) { 173 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */ 174 static const int quirk_rates[] = { 162000, 270000, 324000 }; 175 176 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates)); 177 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates); 178 179 return; 180 } 181 182 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); 183 184 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { 185 if (dp_rates[i] > max_rate) 186 break; 187 intel_dp->sink_rates[i] = dp_rates[i]; 188 } 189 190 intel_dp->num_sink_rates = i; 191} 192 193/* Get length of rates array potentially limited by max_rate. */ 194static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) 195{ 196 int i; 197 198 /* Limit results by potentially reduced max rate */ 199 for (i = 0; i < len; i++) { 200 if (rates[len - i - 1] <= max_rate) 201 return len - i; 202 } 203 204 return 0; 205} 206 207/* Get length of common rates array potentially limited by max_rate. */ 208static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, 209 int max_rate) 210{ 211 return intel_dp_rate_limit_len(intel_dp->common_rates, 212 intel_dp->num_common_rates, max_rate); 213} 214 215/* Theoretical max between source and sink */ 216static int intel_dp_max_common_rate(struct intel_dp *intel_dp) 217{ 218 return intel_dp->common_rates[intel_dp->num_common_rates - 1]; 219} 220 221/* Theoretical max between source and sink */ 222static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) 223{ 224 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 225 int source_max = dig_port->max_lanes; 226 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); 227 int fia_max = intel_tc_port_fia_max_lane_count(dig_port); 228 229 return min3(source_max, sink_max, fia_max); 230} 231 232int intel_dp_max_lane_count(struct intel_dp *intel_dp) 233{ 234 return intel_dp->max_link_lane_count; 235} 236 237int 238intel_dp_link_required(int pixel_clock, int bpp) 239{ 240 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ 241 return DIV_ROUND_UP(pixel_clock * bpp, 8); 242} 243 244int 245intel_dp_max_data_rate(int max_link_clock, int max_lanes) 246{ 247 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the 248 * link rate that is generally expressed in Gbps. Since, 8 bits of data 249 * is transmitted every LS_Clk per lane, there is no need to account for 250 * the channel encoding that is done in the PHY layer here. 251 */ 252 253 return max_link_clock * max_lanes; 254} 255 256static int cnl_max_source_rate(struct intel_dp *intel_dp) 257{ 258 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 259 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 260 enum port port = dig_port->base.port; 261 262 u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; 263 264 /* Low voltage SKUs are limited to max of 5.4G */ 265 if (voltage == VOLTAGE_INFO_0_85V) 266 return 540000; 267 268 /* For this SKU 8.1G is supported in all ports */ 269 if (IS_CNL_WITH_PORT_F(dev_priv)) 270 return 810000; 271 272 /* For other SKUs, max rate on ports A and D is 5.4G */ 273 if (port == PORT_A || port == PORT_D) 274 return 540000; 275 276 return 810000; 277} 278 279static int icl_max_source_rate(struct intel_dp *intel_dp) 280{ 281 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 282 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 283 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 284 285 if (intel_phy_is_combo(dev_priv, phy) && 286 !IS_ELKHARTLAKE(dev_priv) && 287 !intel_dp_is_edp(intel_dp)) 288 return 540000; 289 290 return 810000; 291} 292 293static void 294intel_dp_set_source_rates(struct intel_dp *intel_dp) 295{ 296 /* The values must be in increasing order */ 297 static const int cnl_rates[] = { 298 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000 299 }; 300 static const int bxt_rates[] = { 301 162000, 216000, 243000, 270000, 324000, 432000, 540000 302 }; 303 static const int skl_rates[] = { 304 162000, 216000, 270000, 324000, 432000, 540000 305 }; 306 static const int hsw_rates[] = { 307 162000, 270000, 540000 308 }; 309 static const int g4x_rates[] = { 310 162000, 270000 311 }; 312 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 313 struct intel_encoder *encoder = &dig_port->base; 314 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 315 const int *source_rates; 316 int size, max_rate = 0, vbt_max_rate; 317 318 /* This should only be done once */ 319 drm_WARN_ON(&dev_priv->drm, 320 intel_dp->source_rates || intel_dp->num_source_rates); 321 322 if (INTEL_GEN(dev_priv) >= 10) { 323 source_rates = cnl_rates; 324 size = ARRAY_SIZE(cnl_rates); 325 if (IS_GEN(dev_priv, 10)) 326 max_rate = cnl_max_source_rate(intel_dp); 327 else 328 max_rate = icl_max_source_rate(intel_dp); 329 } else if (IS_GEN9_LP(dev_priv)) { 330 source_rates = bxt_rates; 331 size = ARRAY_SIZE(bxt_rates); 332 } else if (IS_GEN9_BC(dev_priv)) { 333 source_rates = skl_rates; 334 size = ARRAY_SIZE(skl_rates); 335 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) || 336 IS_BROADWELL(dev_priv)) { 337 source_rates = hsw_rates; 338 size = ARRAY_SIZE(hsw_rates); 339 } else { 340 source_rates = g4x_rates; 341 size = ARRAY_SIZE(g4x_rates); 342 } 343 344 vbt_max_rate = intel_bios_dp_max_link_rate(encoder); 345 if (max_rate && vbt_max_rate) 346 max_rate = min(max_rate, vbt_max_rate); 347 else if (vbt_max_rate) 348 max_rate = vbt_max_rate; 349 350 if (max_rate) 351 size = intel_dp_rate_limit_len(source_rates, size, max_rate); 352 353 intel_dp->source_rates = source_rates; 354 intel_dp->num_source_rates = size; 355} 356 357static int intersect_rates(const int *source_rates, int source_len, 358 const int *sink_rates, int sink_len, 359 int *common_rates) 360{ 361 int i = 0, j = 0, k = 0; 362 363 while (i < source_len && j < sink_len) { 364 if (source_rates[i] == sink_rates[j]) { 365 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) 366 return k; 367 common_rates[k] = source_rates[i]; 368 ++k; 369 ++i; 370 ++j; 371 } else if (source_rates[i] < sink_rates[j]) { 372 ++i; 373 } else { 374 ++j; 375 } 376 } 377 return k; 378} 379 380/* return index of rate in rates array, or -1 if not found */ 381static int intel_dp_rate_index(const int *rates, int len, int rate) 382{ 383 int i; 384 385 for (i = 0; i < len; i++) 386 if (rate == rates[i]) 387 return i; 388 389 return -1; 390} 391 392static void intel_dp_set_common_rates(struct intel_dp *intel_dp) 393{ 394 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 395 396 drm_WARN_ON(&i915->drm, 397 !intel_dp->num_source_rates || !intel_dp->num_sink_rates); 398 399 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, 400 intel_dp->num_source_rates, 401 intel_dp->sink_rates, 402 intel_dp->num_sink_rates, 403 intel_dp->common_rates); 404 405 /* Paranoia, there should always be something in common. */ 406 if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) { 407 intel_dp->common_rates[0] = 162000; 408 intel_dp->num_common_rates = 1; 409 } 410} 411 412static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, 413 u8 lane_count) 414{ 415 /* 416 * FIXME: we need to synchronize the current link parameters with 417 * hardware readout. Currently fast link training doesn't work on 418 * boot-up. 419 */ 420 if (link_rate == 0 || 421 link_rate > intel_dp->max_link_rate) 422 return false; 423 424 if (lane_count == 0 || 425 lane_count > intel_dp_max_lane_count(intel_dp)) 426 return false; 427 428 return true; 429} 430 431static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, 432 int link_rate, 433 u8 lane_count) 434{ 435 const struct drm_display_mode *fixed_mode = 436 intel_dp->attached_connector->panel.fixed_mode; 437 int mode_rate, max_rate; 438 439 mode_rate = intel_dp_link_required(fixed_mode->clock, 18); 440 max_rate = intel_dp_max_data_rate(link_rate, lane_count); 441 if (mode_rate > max_rate) 442 return false; 443 444 return true; 445} 446 447int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, 448 int link_rate, u8 lane_count) 449{ 450 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 451 int index; 452 453 /* 454 * TODO: Enable fallback on MST links once MST link compute can handle 455 * the fallback params. 456 */ 457 if (intel_dp->is_mst) { 458 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 459 return -1; 460 } 461 462 index = intel_dp_rate_index(intel_dp->common_rates, 463 intel_dp->num_common_rates, 464 link_rate); 465 if (index > 0) { 466 if (intel_dp_is_edp(intel_dp) && 467 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 468 intel_dp->common_rates[index - 1], 469 lane_count)) { 470 drm_dbg_kms(&i915->drm, 471 "Retrying Link training for eDP with same parameters\n"); 472 return 0; 473 } 474 intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; 475 intel_dp->max_link_lane_count = lane_count; 476 } else if (lane_count > 1) { 477 if (intel_dp_is_edp(intel_dp) && 478 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 479 intel_dp_max_common_rate(intel_dp), 480 lane_count >> 1)) { 481 drm_dbg_kms(&i915->drm, 482 "Retrying Link training for eDP with same parameters\n"); 483 return 0; 484 } 485 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 486 intel_dp->max_link_lane_count = lane_count >> 1; 487 } else { 488 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 489 return -1; 490 } 491 492 return 0; 493} 494 495u32 intel_dp_mode_to_fec_clock(u32 mode_clock) 496{ 497 return div_u64(mul_u32_u32(mode_clock, 1000000U), 498 DP_DSC_FEC_OVERHEAD_FACTOR); 499} 500 501static int 502small_joiner_ram_size_bits(struct drm_i915_private *i915) 503{ 504 if (INTEL_GEN(i915) >= 11) 505 return 7680 * 8; 506 else 507 return 6144 * 8; 508} 509 510static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, 511 u32 link_clock, u32 lane_count, 512 u32 mode_clock, u32 mode_hdisplay) 513{ 514 u32 bits_per_pixel, max_bpp_small_joiner_ram; 515 int i; 516 517 /* 518 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* 519 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP) 520 * for SST -> TimeSlotsPerMTP is 1, 521 * for MST -> TimeSlotsPerMTP has to be calculated 522 */ 523 bits_per_pixel = (link_clock * lane_count * 8) / 524 intel_dp_mode_to_fec_clock(mode_clock); 525 drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel); 526 527 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ 528 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / 529 mode_hdisplay; 530 drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n", 531 max_bpp_small_joiner_ram); 532 533 /* 534 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW 535 * check, output bpp from small joiner RAM check) 536 */ 537 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); 538 539 /* Error out if the max bpp is less than smallest allowed valid bpp */ 540 if (bits_per_pixel < valid_dsc_bpp[0]) { 541 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n", 542 bits_per_pixel, valid_dsc_bpp[0]); 543 return 0; 544 } 545 546 /* Find the nearest match in the array of known BPPs from VESA */ 547 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { 548 if (bits_per_pixel < valid_dsc_bpp[i + 1]) 549 break; 550 } 551 bits_per_pixel = valid_dsc_bpp[i]; 552 553 /* 554 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, 555 * fractional part is 0 556 */ 557 return bits_per_pixel << 4; 558} 559 560static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, 561 int mode_clock, int mode_hdisplay) 562{ 563 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 564 u8 min_slice_count, i; 565 int max_slice_width; 566 567 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) 568 min_slice_count = DIV_ROUND_UP(mode_clock, 569 DP_DSC_MAX_ENC_THROUGHPUT_0); 570 else 571 min_slice_count = DIV_ROUND_UP(mode_clock, 572 DP_DSC_MAX_ENC_THROUGHPUT_1); 573 574 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); 575 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { 576 drm_dbg_kms(&i915->drm, 577 "Unsupported slice width %d by DP DSC Sink device\n", 578 max_slice_width); 579 return 0; 580 } 581 /* Also take into account max slice width */ 582 min_slice_count = max_t(u8, min_slice_count, 583 DIV_ROUND_UP(mode_hdisplay, 584 max_slice_width)); 585 586 /* Find the closest match to the valid slice count values */ 587 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { 588 if (valid_dsc_slicecount[i] > 589 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 590 false)) 591 break; 592 if (min_slice_count <= valid_dsc_slicecount[i]) 593 return valid_dsc_slicecount[i]; 594 } 595 596 drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n", 597 min_slice_count); 598 return 0; 599} 600 601static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, 602 int hdisplay) 603{ 604 /* 605 * Older platforms don't like hdisplay==4096 with DP. 606 * 607 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline 608 * and frame counter increment), but we don't get vblank interrupts, 609 * and the pipe underruns immediately. The link also doesn't seem 610 * to get trained properly. 611 * 612 * On CHV the vblank interrupts don't seem to disappear but 613 * otherwise the symptoms are similar. 614 * 615 * TODO: confirm the behaviour on HSW+ 616 */ 617 return hdisplay == 4096 && !HAS_DDI(dev_priv); 618} 619 620static enum drm_mode_status 621intel_dp_mode_valid_downstream(struct intel_connector *connector, 622 const struct drm_display_mode *mode, 623 int target_clock) 624{ 625 struct intel_dp *intel_dp = intel_attached_dp(connector); 626 const struct drm_display_info *info = &connector->base.display_info; 627 int tmds_clock; 628 629 if (intel_dp->dfp.max_dotclock && 630 target_clock > intel_dp->dfp.max_dotclock) 631 return MODE_CLOCK_HIGH; 632 633 /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */ 634 tmds_clock = target_clock; 635 if (drm_mode_is_420_only(info, mode)) 636 tmds_clock /= 2; 637 638 if (intel_dp->dfp.min_tmds_clock && 639 tmds_clock < intel_dp->dfp.min_tmds_clock) 640 return MODE_CLOCK_LOW; 641 if (intel_dp->dfp.max_tmds_clock && 642 tmds_clock > intel_dp->dfp.max_tmds_clock) 643 return MODE_CLOCK_HIGH; 644 645 return MODE_OK; 646} 647 648static enum drm_mode_status 649intel_dp_mode_valid(struct drm_connector *connector, 650 struct drm_display_mode *mode) 651{ 652 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 653 struct intel_connector *intel_connector = to_intel_connector(connector); 654 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 655 struct drm_i915_private *dev_priv = to_i915(connector->dev); 656 int target_clock = mode->clock; 657 int max_rate, mode_rate, max_lanes, max_link_clock; 658 int max_dotclk = dev_priv->max_dotclk_freq; 659 u16 dsc_max_output_bpp = 0; 660 u8 dsc_slice_count = 0; 661 enum drm_mode_status status; 662 663 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 664 return MODE_NO_DBLESCAN; 665 666 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 667 if (mode->hdisplay > fixed_mode->hdisplay) 668 return MODE_PANEL; 669 670 if (mode->vdisplay > fixed_mode->vdisplay) 671 return MODE_PANEL; 672 673 target_clock = fixed_mode->clock; 674 } 675 676 max_link_clock = intel_dp_max_link_rate(intel_dp); 677 max_lanes = intel_dp_max_lane_count(intel_dp); 678 679 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 680 mode_rate = intel_dp_link_required(target_clock, 18); 681 682 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) 683 return MODE_H_ILLEGAL; 684 685 /* 686 * Output bpp is stored in 6.4 format so right shift by 4 to get the 687 * integer value since we support only integer values of bpp. 688 */ 689 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) && 690 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) { 691 if (intel_dp_is_edp(intel_dp)) { 692 dsc_max_output_bpp = 693 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4; 694 dsc_slice_count = 695 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 696 true); 697 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) { 698 dsc_max_output_bpp = 699 intel_dp_dsc_get_output_bpp(dev_priv, 700 max_link_clock, 701 max_lanes, 702 target_clock, 703 mode->hdisplay) >> 4; 704 dsc_slice_count = 705 intel_dp_dsc_get_slice_count(intel_dp, 706 target_clock, 707 mode->hdisplay); 708 } 709 } 710 711 if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) || 712 target_clock > max_dotclk) 713 return MODE_CLOCK_HIGH; 714 715 if (mode->clock < 10000) 716 return MODE_CLOCK_LOW; 717 718 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 719 return MODE_H_ILLEGAL; 720 721 status = intel_dp_mode_valid_downstream(intel_connector, 722 mode, target_clock); 723 if (status != MODE_OK) 724 return status; 725 726 return intel_mode_valid_max_plane_size(dev_priv, mode); 727} 728 729u32 intel_dp_pack_aux(const u8 *src, int src_bytes) 730{ 731 int i; 732 u32 v = 0; 733 734 if (src_bytes > 4) 735 src_bytes = 4; 736 for (i = 0; i < src_bytes; i++) 737 v |= ((u32)src[i]) << ((3 - i) * 8); 738 return v; 739} 740 741static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes) 742{ 743 int i; 744 if (dst_bytes > 4) 745 dst_bytes = 4; 746 for (i = 0; i < dst_bytes; i++) 747 dst[i] = src >> ((3-i) * 8); 748} 749 750static void 751intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp); 752static void 753intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, 754 bool force_disable_vdd); 755static void 756intel_dp_pps_init(struct intel_dp *intel_dp); 757 758static intel_wakeref_t 759pps_lock(struct intel_dp *intel_dp) 760{ 761 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 762 intel_wakeref_t wakeref; 763 764 /* 765 * See intel_power_sequencer_reset() why we need 766 * a power domain reference here. 767 */ 768 wakeref = intel_display_power_get(dev_priv, 769 intel_aux_power_domain(dp_to_dig_port(intel_dp))); 770 771 mutex_lock(&dev_priv->pps_mutex); 772 773 return wakeref; 774} 775 776static intel_wakeref_t 777pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref) 778{ 779 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 780 781 mutex_unlock(&dev_priv->pps_mutex); 782 intel_display_power_put(dev_priv, 783 intel_aux_power_domain(dp_to_dig_port(intel_dp)), 784 wakeref); 785 return 0; 786} 787 788#define with_pps_lock(dp, wf) \ 789 for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf))) 790 791static void 792vlv_power_sequencer_kick(struct intel_dp *intel_dp) 793{ 794 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 795 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 796 enum pipe pipe = intel_dp->pps_pipe; 797 bool pll_enabled, release_cl_override = false; 798 enum dpio_phy phy = DPIO_PHY(pipe); 799 enum dpio_channel ch = vlv_pipe_to_channel(pipe); 800 u32 DP; 801 802 if (drm_WARN(&dev_priv->drm, 803 intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN, 804 "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n", 805 pipe_name(pipe), dig_port->base.base.base.id, 806 dig_port->base.base.name)) 807 return; 808 809 drm_dbg_kms(&dev_priv->drm, 810 "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n", 811 pipe_name(pipe), dig_port->base.base.base.id, 812 dig_port->base.base.name); 813 814 /* Preserve the BIOS-computed detected bit. This is 815 * supposed to be read-only. 816 */ 817 DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; 818 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 819 DP |= DP_PORT_WIDTH(1); 820 DP |= DP_LINK_TRAIN_PAT_1; 821 822 if (IS_CHERRYVIEW(dev_priv)) 823 DP |= DP_PIPE_SEL_CHV(pipe); 824 else 825 DP |= DP_PIPE_SEL(pipe); 826 827 pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE; 828 829 /* 830 * The DPLL for the pipe must be enabled for this to work. 831 * So enable temporarily it if it's not already enabled. 832 */ 833 if (!pll_enabled) { 834 release_cl_override = IS_CHERRYVIEW(dev_priv) && 835 !chv_phy_powergate_ch(dev_priv, phy, ch, true); 836 837 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ? 838 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) { 839 drm_err(&dev_priv->drm, 840 "Failed to force on pll for pipe %c!\n", 841 pipe_name(pipe)); 842 return; 843 } 844 } 845 846 /* 847 * Similar magic as in intel_dp_enable_port(). 848 * We _must_ do this port enable + disable trick 849 * to make this power sequencer lock onto the port. 850 * Otherwise even VDD force bit won't work. 851 */ 852 intel_de_write(dev_priv, intel_dp->output_reg, DP); 853 intel_de_posting_read(dev_priv, intel_dp->output_reg); 854 855 intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN); 856 intel_de_posting_read(dev_priv, intel_dp->output_reg); 857 858 intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN); 859 intel_de_posting_read(dev_priv, intel_dp->output_reg); 860 861 if (!pll_enabled) { 862 vlv_force_pll_off(dev_priv, pipe); 863 864 if (release_cl_override) 865 chv_phy_powergate_ch(dev_priv, phy, ch, false); 866 } 867} 868 869static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv) 870{ 871 struct intel_encoder *encoder; 872 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B); 873 874 /* 875 * We don't have power sequencer currently. 876 * Pick one that's not used by other ports. 877 */ 878 for_each_intel_dp(&dev_priv->drm, encoder) { 879 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 880 881 if (encoder->type == INTEL_OUTPUT_EDP) { 882 drm_WARN_ON(&dev_priv->drm, 883 intel_dp->active_pipe != INVALID_PIPE && 884 intel_dp->active_pipe != 885 intel_dp->pps_pipe); 886 887 if (intel_dp->pps_pipe != INVALID_PIPE) 888 pipes &= ~(1 << intel_dp->pps_pipe); 889 } else { 890 drm_WARN_ON(&dev_priv->drm, 891 intel_dp->pps_pipe != INVALID_PIPE); 892 893 if (intel_dp->active_pipe != INVALID_PIPE) 894 pipes &= ~(1 << intel_dp->active_pipe); 895 } 896 } 897 898 if (pipes == 0) 899 return INVALID_PIPE; 900 901 return ffs(pipes) - 1; 902} 903 904static enum pipe 905vlv_power_sequencer_pipe(struct intel_dp *intel_dp) 906{ 907 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 908 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 909 enum pipe pipe; 910 911 lockdep_assert_held(&dev_priv->pps_mutex); 912 913 /* We should never land here with regular DP ports */ 914 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); 915 916 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE && 917 intel_dp->active_pipe != intel_dp->pps_pipe); 918 919 if (intel_dp->pps_pipe != INVALID_PIPE) 920 return intel_dp->pps_pipe; 921 922 pipe = vlv_find_free_pps(dev_priv); 923 924 /* 925 * Didn't find one. This should not happen since there 926 * are two power sequencers and up to two eDP ports. 927 */ 928 if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE)) 929 pipe = PIPE_A; 930 931 vlv_steal_power_sequencer(dev_priv, pipe); 932 intel_dp->pps_pipe = pipe; 933 934 drm_dbg_kms(&dev_priv->drm, 935 "picked pipe %c power sequencer for [ENCODER:%d:%s]\n", 936 pipe_name(intel_dp->pps_pipe), 937 dig_port->base.base.base.id, 938 dig_port->base.base.name); 939 940 /* init power sequencer on this pipe and port */ 941 intel_dp_init_panel_power_sequencer(intel_dp); 942 intel_dp_init_panel_power_sequencer_registers(intel_dp, true); 943 944 /* 945 * Even vdd force doesn't work until we've made 946 * the power sequencer lock in on the port. 947 */ 948 vlv_power_sequencer_kick(intel_dp); 949 950 return intel_dp->pps_pipe; 951} 952 953static int 954bxt_power_sequencer_idx(struct intel_dp *intel_dp) 955{ 956 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 957 int backlight_controller = dev_priv->vbt.backlight.controller; 958 959 lockdep_assert_held(&dev_priv->pps_mutex); 960 961 /* We should never land here with regular DP ports */ 962 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); 963 964 if (!intel_dp->pps_reset) 965 return backlight_controller; 966 967 intel_dp->pps_reset = false; 968 969 /* 970 * Only the HW needs to be reprogrammed, the SW state is fixed and 971 * has been setup during connector init. 972 */ 973 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 974 975 return backlight_controller; 976} 977 978typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, 979 enum pipe pipe); 980 981static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv, 982 enum pipe pipe) 983{ 984 return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON; 985} 986 987static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv, 988 enum pipe pipe) 989{ 990 return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD; 991} 992 993static bool vlv_pipe_any(struct drm_i915_private *dev_priv, 994 enum pipe pipe) 995{ 996 return true; 997} 998 999static enum pipe 1000vlv_initial_pps_pipe(struct drm_i915_private *dev_priv, 1001 enum port port, 1002 vlv_pipe_check pipe_check) 1003{ 1004 enum pipe pipe; 1005 1006 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { 1007 u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) & 1008 PANEL_PORT_SELECT_MASK; 1009 1010 if (port_sel != PANEL_PORT_SELECT_VLV(port)) 1011 continue; 1012 1013 if (!pipe_check(dev_priv, pipe)) 1014 continue; 1015 1016 return pipe; 1017 } 1018 1019 return INVALID_PIPE; 1020} 1021 1022static void 1023vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) 1024{ 1025 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1026 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1027 enum port port = dig_port->base.port; 1028 1029 lockdep_assert_held(&dev_priv->pps_mutex); 1030 1031 /* try to find a pipe with this port selected */ 1032 /* first pick one where the panel is on */ 1033 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1034 vlv_pipe_has_pp_on); 1035 /* didn't find one? pick one where vdd is on */ 1036 if (intel_dp->pps_pipe == INVALID_PIPE) 1037 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1038 vlv_pipe_has_vdd_on); 1039 /* didn't find one? pick one with just the correct port */ 1040 if (intel_dp->pps_pipe == INVALID_PIPE) 1041 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1042 vlv_pipe_any); 1043 1044 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */ 1045 if (intel_dp->pps_pipe == INVALID_PIPE) { 1046 drm_dbg_kms(&dev_priv->drm, 1047 "no initial power sequencer for [ENCODER:%d:%s]\n", 1048 dig_port->base.base.base.id, 1049 dig_port->base.base.name); 1050 return; 1051 } 1052 1053 drm_dbg_kms(&dev_priv->drm, 1054 "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n", 1055 dig_port->base.base.base.id, 1056 dig_port->base.base.name, 1057 pipe_name(intel_dp->pps_pipe)); 1058 1059 intel_dp_init_panel_power_sequencer(intel_dp); 1060 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 1061} 1062 1063void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) 1064{ 1065 struct intel_encoder *encoder; 1066 1067 if (drm_WARN_ON(&dev_priv->drm, 1068 !(IS_VALLEYVIEW(dev_priv) || 1069 IS_CHERRYVIEW(dev_priv) || 1070 IS_GEN9_LP(dev_priv)))) 1071 return; 1072 1073 /* 1074 * We can't grab pps_mutex here due to deadlock with power_domain 1075 * mutex when power_domain functions are called while holding pps_mutex. 1076 * That also means that in order to use pps_pipe the code needs to 1077 * hold both a power domain reference and pps_mutex, and the power domain 1078 * reference get/put must be done while _not_ holding pps_mutex. 1079 * pps_{lock,unlock}() do these steps in the correct order, so one 1080 * should use them always. 1081 */ 1082 1083 for_each_intel_dp(&dev_priv->drm, encoder) { 1084 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1085 1086 drm_WARN_ON(&dev_priv->drm, 1087 intel_dp->active_pipe != INVALID_PIPE); 1088 1089 if (encoder->type != INTEL_OUTPUT_EDP) 1090 continue; 1091 1092 if (IS_GEN9_LP(dev_priv)) 1093 intel_dp->pps_reset = true; 1094 else 1095 intel_dp->pps_pipe = INVALID_PIPE; 1096 } 1097} 1098 1099struct pps_registers { 1100 i915_reg_t pp_ctrl; 1101 i915_reg_t pp_stat; 1102 i915_reg_t pp_on; 1103 i915_reg_t pp_off; 1104 i915_reg_t pp_div; 1105}; 1106 1107static void intel_pps_get_registers(struct intel_dp *intel_dp, 1108 struct pps_registers *regs) 1109{ 1110 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1111 int pps_idx = 0; 1112 1113 memset(regs, 0, sizeof(*regs)); 1114 1115 if (IS_GEN9_LP(dev_priv)) 1116 pps_idx = bxt_power_sequencer_idx(intel_dp); 1117 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1118 pps_idx = vlv_power_sequencer_pipe(intel_dp); 1119 1120 regs->pp_ctrl = PP_CONTROL(pps_idx); 1121 regs->pp_stat = PP_STATUS(pps_idx); 1122 regs->pp_on = PP_ON_DELAYS(pps_idx); 1123 regs->pp_off = PP_OFF_DELAYS(pps_idx); 1124 1125 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */ 1126 if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) 1127 regs->pp_div = INVALID_MMIO_REG; 1128 else 1129 regs->pp_div = PP_DIVISOR(pps_idx); 1130} 1131 1132static i915_reg_t 1133_pp_ctrl_reg(struct intel_dp *intel_dp) 1134{ 1135 struct pps_registers regs; 1136 1137 intel_pps_get_registers(intel_dp, ®s); 1138 1139 return regs.pp_ctrl; 1140} 1141 1142static i915_reg_t 1143_pp_stat_reg(struct intel_dp *intel_dp) 1144{ 1145 struct pps_registers regs; 1146 1147 intel_pps_get_registers(intel_dp, ®s); 1148 1149 return regs.pp_stat; 1150} 1151 1152/* Reboot notifier handler to shutdown panel power to guarantee T12 timing 1153 This function only applicable when panel PM state is not to be tracked */ 1154static int edp_notify_handler(struct notifier_block *this, unsigned long code, 1155 void *unused) 1156{ 1157 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp), 1158 edp_notifier); 1159 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1160 intel_wakeref_t wakeref; 1161 1162 if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART) 1163 return 0; 1164 1165 with_pps_lock(intel_dp, wakeref) { 1166 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1167 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); 1168 i915_reg_t pp_ctrl_reg, pp_div_reg; 1169 u32 pp_div; 1170 1171 pp_ctrl_reg = PP_CONTROL(pipe); 1172 pp_div_reg = PP_DIVISOR(pipe); 1173 pp_div = intel_de_read(dev_priv, pp_div_reg); 1174 pp_div &= PP_REFERENCE_DIVIDER_MASK; 1175 1176 /* 0x1F write to PP_DIV_REG sets max cycle delay */ 1177 intel_de_write(dev_priv, pp_div_reg, pp_div | 0x1F); 1178 intel_de_write(dev_priv, pp_ctrl_reg, 1179 PANEL_UNLOCK_REGS); 1180 msleep(intel_dp->panel_power_cycle_delay); 1181 } 1182 } 1183 1184 return 0; 1185} 1186 1187static bool edp_have_panel_power(struct intel_dp *intel_dp) 1188{ 1189 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1190 1191 lockdep_assert_held(&dev_priv->pps_mutex); 1192 1193 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1194 intel_dp->pps_pipe == INVALID_PIPE) 1195 return false; 1196 1197 return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0; 1198} 1199 1200static bool edp_have_panel_vdd(struct intel_dp *intel_dp) 1201{ 1202 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1203 1204 lockdep_assert_held(&dev_priv->pps_mutex); 1205 1206 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1207 intel_dp->pps_pipe == INVALID_PIPE) 1208 return false; 1209 1210 return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD; 1211} 1212 1213static void 1214intel_dp_check_edp(struct intel_dp *intel_dp) 1215{ 1216 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1217 1218 if (!intel_dp_is_edp(intel_dp)) 1219 return; 1220 1221 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { 1222 drm_WARN(&dev_priv->drm, 1, 1223 "eDP powered off while attempting aux channel communication.\n"); 1224 drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n", 1225 intel_de_read(dev_priv, _pp_stat_reg(intel_dp)), 1226 intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp))); 1227 } 1228} 1229 1230static u32 1231intel_dp_aux_wait_done(struct intel_dp *intel_dp) 1232{ 1233 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1234 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 1235 const unsigned int timeout_ms = 10; 1236 u32 status; 1237 bool done; 1238 1239#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 1240 done = wait_event_timeout(i915->gmbus_wait_queue, C, 1241 msecs_to_jiffies_timeout(timeout_ms)); 1242 1243 /* just trace the final value */ 1244 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 1245 1246 if (!done) 1247 drm_err(&i915->drm, 1248 "%s: did not complete or timeout within %ums (status 0x%08x)\n", 1249 intel_dp->aux.name, timeout_ms, status); 1250#undef C 1251 1252 return status; 1253} 1254 1255static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1256{ 1257 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1258 1259 if (index) 1260 return 0; 1261 1262 /* 1263 * The clock divider is based off the hrawclk, and would like to run at 1264 * 2MHz. So, take the hrawclk value and divide by 2000 and use that 1265 */ 1266 return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000); 1267} 1268 1269static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1270{ 1271 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1272 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1273 u32 freq; 1274 1275 if (index) 1276 return 0; 1277 1278 /* 1279 * The clock divider is based off the cdclk or PCH rawclk, and would 1280 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and 1281 * divide by 2000 and use that 1282 */ 1283 if (dig_port->aux_ch == AUX_CH_A) 1284 freq = dev_priv->cdclk.hw.cdclk; 1285 else 1286 freq = RUNTIME_INFO(dev_priv)->rawclk_freq; 1287 return DIV_ROUND_CLOSEST(freq, 2000); 1288} 1289 1290static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1291{ 1292 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1293 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1294 1295 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { 1296 /* Workaround for non-ULT HSW */ 1297 switch (index) { 1298 case 0: return 63; 1299 case 1: return 72; 1300 default: return 0; 1301 } 1302 } 1303 1304 return ilk_get_aux_clock_divider(intel_dp, index); 1305} 1306 1307static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1308{ 1309 /* 1310 * SKL doesn't need us to program the AUX clock divider (Hardware will 1311 * derive the clock from CDCLK automatically). We still implement the 1312 * get_aux_clock_divider vfunc to plug-in into the existing code. 1313 */ 1314 return index ? 0 : 1; 1315} 1316 1317static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, 1318 int send_bytes, 1319 u32 aux_clock_divider) 1320{ 1321 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1322 struct drm_i915_private *dev_priv = 1323 to_i915(dig_port->base.base.dev); 1324 u32 precharge, timeout; 1325 1326 if (IS_GEN(dev_priv, 6)) 1327 precharge = 3; 1328 else 1329 precharge = 5; 1330 1331 if (IS_BROADWELL(dev_priv)) 1332 timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 1333 else 1334 timeout = DP_AUX_CH_CTL_TIME_OUT_400us; 1335 1336 return DP_AUX_CH_CTL_SEND_BUSY | 1337 DP_AUX_CH_CTL_DONE | 1338 DP_AUX_CH_CTL_INTERRUPT | 1339 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1340 timeout | 1341 DP_AUX_CH_CTL_RECEIVE_ERROR | 1342 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1343 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 1344 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); 1345} 1346 1347static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, 1348 int send_bytes, 1349 u32 unused) 1350{ 1351 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1352 struct drm_i915_private *i915 = 1353 to_i915(dig_port->base.base.dev); 1354 enum phy phy = intel_port_to_phy(i915, dig_port->base.port); 1355 u32 ret; 1356 1357 ret = DP_AUX_CH_CTL_SEND_BUSY | 1358 DP_AUX_CH_CTL_DONE | 1359 DP_AUX_CH_CTL_INTERRUPT | 1360 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1361 DP_AUX_CH_CTL_TIME_OUT_MAX | 1362 DP_AUX_CH_CTL_RECEIVE_ERROR | 1363 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1364 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | 1365 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); 1366 1367 if (intel_phy_is_tc(i915, phy) && 1368 dig_port->tc_mode == TC_PORT_TBT_ALT) 1369 ret |= DP_AUX_CH_CTL_TBT_IO; 1370 1371 return ret; 1372} 1373 1374static int 1375intel_dp_aux_xfer(struct intel_dp *intel_dp, 1376 const u8 *send, int send_bytes, 1377 u8 *recv, int recv_size, 1378 u32 aux_send_ctl_flags) 1379{ 1380 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1381 struct drm_i915_private *i915 = 1382 to_i915(dig_port->base.base.dev); 1383 struct intel_uncore *uncore = &i915->uncore; 1384 enum phy phy = intel_port_to_phy(i915, dig_port->base.port); 1385 bool is_tc_port = intel_phy_is_tc(i915, phy); 1386 i915_reg_t ch_ctl, ch_data[5]; 1387 u32 aux_clock_divider; 1388 enum intel_display_power_domain aux_domain; 1389 intel_wakeref_t aux_wakeref; 1390 intel_wakeref_t pps_wakeref; 1391 int i, ret, recv_bytes; 1392 int try, clock = 0; 1393 u32 status; 1394 bool vdd; 1395 1396 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 1397 for (i = 0; i < ARRAY_SIZE(ch_data); i++) 1398 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); 1399 1400 if (is_tc_port) 1401 intel_tc_port_lock(dig_port); 1402 1403 aux_domain = intel_aux_power_domain(dig_port); 1404 1405 aux_wakeref = intel_display_power_get(i915, aux_domain); 1406 pps_wakeref = pps_lock(intel_dp); 1407 1408 /* 1409 * We will be called with VDD already enabled for dpcd/edid/oui reads. 1410 * In such cases we want to leave VDD enabled and it's up to upper layers 1411 * to turn it off. But for eg. i2c-dev access we need to turn it on/off 1412 * ourselves. 1413 */ 1414 vdd = edp_panel_vdd_on(intel_dp); 1415 1416 /* dp aux is extremely sensitive to irq latency, hence request the 1417 * lowest possible wakeup latency and so prevent the cpu from going into 1418 * deep sleep states. 1419 */ 1420 cpu_latency_qos_update_request(&intel_dp->pm_qos, 0); 1421 1422 intel_dp_check_edp(intel_dp); 1423 1424 /* Try to wait for any previous AUX channel activity */ 1425 for (try = 0; try < 3; try++) { 1426 status = intel_uncore_read_notrace(uncore, ch_ctl); 1427 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 1428 break; 1429 msleep(1); 1430 } 1431 /* just trace the final value */ 1432 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 1433 1434 if (try == 3) { 1435 const u32 status = intel_uncore_read(uncore, ch_ctl); 1436 1437 if (status != intel_dp->aux_busy_last_status) { 1438 drm_WARN(&i915->drm, 1, 1439 "%s: not started (status 0x%08x)\n", 1440 intel_dp->aux.name, status); 1441 intel_dp->aux_busy_last_status = status; 1442 } 1443 1444 ret = -EBUSY; 1445 goto out; 1446 } 1447 1448 /* Only 5 data registers! */ 1449 if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) { 1450 ret = -E2BIG; 1451 goto out; 1452 } 1453 1454 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { 1455 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, 1456 send_bytes, 1457 aux_clock_divider); 1458 1459 send_ctl |= aux_send_ctl_flags; 1460 1461 /* Must try at least 3 times according to DP spec */ 1462 for (try = 0; try < 5; try++) { 1463 /* Load the send data into the aux channel data registers */ 1464 for (i = 0; i < send_bytes; i += 4) 1465 intel_uncore_write(uncore, 1466 ch_data[i >> 2], 1467 intel_dp_pack_aux(send + i, 1468 send_bytes - i)); 1469 1470 /* Send the command and wait for it to complete */ 1471 intel_uncore_write(uncore, ch_ctl, send_ctl); 1472 1473 status = intel_dp_aux_wait_done(intel_dp); 1474 1475 /* Clear done status and any errors */ 1476 intel_uncore_write(uncore, 1477 ch_ctl, 1478 status | 1479 DP_AUX_CH_CTL_DONE | 1480 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1481 DP_AUX_CH_CTL_RECEIVE_ERROR); 1482 1483 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2 1484 * 400us delay required for errors and timeouts 1485 * Timeout errors from the HW already meet this 1486 * requirement so skip to next iteration 1487 */ 1488 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) 1489 continue; 1490 1491 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 1492 usleep_range(400, 500); 1493 continue; 1494 } 1495 if (status & DP_AUX_CH_CTL_DONE) 1496 goto done; 1497 } 1498 } 1499 1500 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 1501 drm_err(&i915->drm, "%s: not done (status 0x%08x)\n", 1502 intel_dp->aux.name, status); 1503 ret = -EBUSY; 1504 goto out; 1505 } 1506 1507done: 1508 /* Check for timeout or receive error. 1509 * Timeouts occur when the sink is not connected 1510 */ 1511 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 1512 drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n", 1513 intel_dp->aux.name, status); 1514 ret = -EIO; 1515 goto out; 1516 } 1517 1518 /* Timeouts occur when the device isn't connected, so they're 1519 * "normal" -- don't fill the kernel log with these */ 1520 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 1521 drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n", 1522 intel_dp->aux.name, status); 1523 ret = -ETIMEDOUT; 1524 goto out; 1525 } 1526 1527 /* Unload any bytes sent back from the other side */ 1528 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 1529 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 1530 1531 /* 1532 * By BSpec: "Message sizes of 0 or >20 are not allowed." 1533 * We have no idea of what happened so we return -EBUSY so 1534 * drm layer takes care for the necessary retries. 1535 */ 1536 if (recv_bytes == 0 || recv_bytes > 20) { 1537 drm_dbg_kms(&i915->drm, 1538 "%s: Forbidden recv_bytes = %d on aux transaction\n", 1539 intel_dp->aux.name, recv_bytes); 1540 ret = -EBUSY; 1541 goto out; 1542 } 1543 1544 if (recv_bytes > recv_size) 1545 recv_bytes = recv_size; 1546 1547 for (i = 0; i < recv_bytes; i += 4) 1548 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]), 1549 recv + i, recv_bytes - i); 1550 1551 ret = recv_bytes; 1552out: 1553 cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE); 1554 1555 if (vdd) 1556 edp_panel_vdd_off(intel_dp, false); 1557 1558 pps_unlock(intel_dp, pps_wakeref); 1559 intel_display_power_put_async(i915, aux_domain, aux_wakeref); 1560 1561 if (is_tc_port) 1562 intel_tc_port_unlock(dig_port); 1563 1564 return ret; 1565} 1566 1567#define BARE_ADDRESS_SIZE 3 1568#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) 1569 1570static void 1571intel_dp_aux_header(u8 txbuf[HEADER_SIZE], 1572 const struct drm_dp_aux_msg *msg) 1573{ 1574 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf); 1575 txbuf[1] = (msg->address >> 8) & 0xff; 1576 txbuf[2] = msg->address & 0xff; 1577 txbuf[3] = msg->size - 1; 1578} 1579 1580static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg) 1581{ 1582 /* 1583 * If we're trying to send the HDCP Aksv, we need to set a the Aksv 1584 * select bit to inform the hardware to send the Aksv after our header 1585 * since we can't access that data from software. 1586 */ 1587 if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE && 1588 msg->address == DP_AUX_HDCP_AKSV) 1589 return DP_AUX_CH_CTL_AUX_AKSV_SELECT; 1590 1591 return 0; 1592} 1593 1594static ssize_t 1595intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) 1596{ 1597 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); 1598 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1599 u8 txbuf[20], rxbuf[20]; 1600 size_t txsize, rxsize; 1601 u32 flags = intel_dp_aux_xfer_flags(msg); 1602 int ret; 1603 1604 intel_dp_aux_header(txbuf, msg); 1605 1606 switch (msg->request & ~DP_AUX_I2C_MOT) { 1607 case DP_AUX_NATIVE_WRITE: 1608 case DP_AUX_I2C_WRITE: 1609 case DP_AUX_I2C_WRITE_STATUS_UPDATE: 1610 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; 1611 rxsize = 2; /* 0 or 1 data bytes */ 1612 1613 if (drm_WARN_ON(&i915->drm, txsize > 20)) 1614 return -E2BIG; 1615 1616 drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size); 1617 1618 if (msg->buffer) 1619 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); 1620 1621 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 1622 rxbuf, rxsize, flags); 1623 if (ret > 0) { 1624 msg->reply = rxbuf[0] >> 4; 1625 1626 if (ret > 1) { 1627 /* Number of bytes written in a short write. */ 1628 ret = clamp_t(int, rxbuf[1], 0, msg->size); 1629 } else { 1630 /* Return payload size. */ 1631 ret = msg->size; 1632 } 1633 } 1634 break; 1635 1636 case DP_AUX_NATIVE_READ: 1637 case DP_AUX_I2C_READ: 1638 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE; 1639 rxsize = msg->size + 1; 1640 1641 if (drm_WARN_ON(&i915->drm, rxsize > 20)) 1642 return -E2BIG; 1643 1644 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 1645 rxbuf, rxsize, flags); 1646 if (ret > 0) { 1647 msg->reply = rxbuf[0] >> 4; 1648 /* 1649 * Assume happy day, and copy the data. The caller is 1650 * expected to check msg->reply before touching it. 1651 * 1652 * Return payload size. 1653 */ 1654 ret--; 1655 memcpy(msg->buffer, rxbuf + 1, ret); 1656 } 1657 break; 1658 1659 default: 1660 ret = -EINVAL; 1661 break; 1662 } 1663 1664 return ret; 1665} 1666 1667 1668static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) 1669{ 1670 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1671 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1672 enum aux_ch aux_ch = dig_port->aux_ch; 1673 1674 switch (aux_ch) { 1675 case AUX_CH_B: 1676 case AUX_CH_C: 1677 case AUX_CH_D: 1678 return DP_AUX_CH_CTL(aux_ch); 1679 default: 1680 MISSING_CASE(aux_ch); 1681 return DP_AUX_CH_CTL(AUX_CH_B); 1682 } 1683} 1684 1685static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) 1686{ 1687 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1688 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1689 enum aux_ch aux_ch = dig_port->aux_ch; 1690 1691 switch (aux_ch) { 1692 case AUX_CH_B: 1693 case AUX_CH_C: 1694 case AUX_CH_D: 1695 return DP_AUX_CH_DATA(aux_ch, index); 1696 default: 1697 MISSING_CASE(aux_ch); 1698 return DP_AUX_CH_DATA(AUX_CH_B, index); 1699 } 1700} 1701 1702static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) 1703{ 1704 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1705 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1706 enum aux_ch aux_ch = dig_port->aux_ch; 1707 1708 switch (aux_ch) { 1709 case AUX_CH_A: 1710 return DP_AUX_CH_CTL(aux_ch); 1711 case AUX_CH_B: 1712 case AUX_CH_C: 1713 case AUX_CH_D: 1714 return PCH_DP_AUX_CH_CTL(aux_ch); 1715 default: 1716 MISSING_CASE(aux_ch); 1717 return DP_AUX_CH_CTL(AUX_CH_A); 1718 } 1719} 1720 1721static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) 1722{ 1723 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1724 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1725 enum aux_ch aux_ch = dig_port->aux_ch; 1726 1727 switch (aux_ch) { 1728 case AUX_CH_A: 1729 return DP_AUX_CH_DATA(aux_ch, index); 1730 case AUX_CH_B: 1731 case AUX_CH_C: 1732 case AUX_CH_D: 1733 return PCH_DP_AUX_CH_DATA(aux_ch, index); 1734 default: 1735 MISSING_CASE(aux_ch); 1736 return DP_AUX_CH_DATA(AUX_CH_A, index); 1737 } 1738} 1739 1740static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) 1741{ 1742 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1743 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1744 enum aux_ch aux_ch = dig_port->aux_ch; 1745 1746 switch (aux_ch) { 1747 case AUX_CH_A: 1748 case AUX_CH_B: 1749 case AUX_CH_C: 1750 case AUX_CH_D: 1751 case AUX_CH_E: 1752 case AUX_CH_F: 1753 case AUX_CH_G: 1754 return DP_AUX_CH_CTL(aux_ch); 1755 default: 1756 MISSING_CASE(aux_ch); 1757 return DP_AUX_CH_CTL(AUX_CH_A); 1758 } 1759} 1760 1761static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) 1762{ 1763 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1764 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1765 enum aux_ch aux_ch = dig_port->aux_ch; 1766 1767 switch (aux_ch) { 1768 case AUX_CH_A: 1769 case AUX_CH_B: 1770 case AUX_CH_C: 1771 case AUX_CH_D: 1772 case AUX_CH_E: 1773 case AUX_CH_F: 1774 case AUX_CH_G: 1775 return DP_AUX_CH_DATA(aux_ch, index); 1776 default: 1777 MISSING_CASE(aux_ch); 1778 return DP_AUX_CH_DATA(AUX_CH_A, index); 1779 } 1780} 1781 1782static void 1783intel_dp_aux_fini(struct intel_dp *intel_dp) 1784{ 1785 if (cpu_latency_qos_request_active(&intel_dp->pm_qos)) 1786 cpu_latency_qos_remove_request(&intel_dp->pm_qos); 1787 1788 kfree(intel_dp->aux.name); 1789} 1790 1791static void 1792intel_dp_aux_init(struct intel_dp *intel_dp) 1793{ 1794 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1795 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1796 struct intel_encoder *encoder = &dig_port->base; 1797 1798 if (INTEL_GEN(dev_priv) >= 9) { 1799 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; 1800 intel_dp->aux_ch_data_reg = skl_aux_data_reg; 1801 } else if (HAS_PCH_SPLIT(dev_priv)) { 1802 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg; 1803 intel_dp->aux_ch_data_reg = ilk_aux_data_reg; 1804 } else { 1805 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg; 1806 intel_dp->aux_ch_data_reg = g4x_aux_data_reg; 1807 } 1808 1809 if (INTEL_GEN(dev_priv) >= 9) 1810 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; 1811 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 1812 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 1813 else if (HAS_PCH_SPLIT(dev_priv)) 1814 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; 1815 else 1816 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; 1817 1818 if (INTEL_GEN(dev_priv) >= 9) 1819 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; 1820 else 1821 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; 1822 1823 drm_dp_aux_init(&intel_dp->aux); 1824 1825 /* Failure to allocate our preferred name is not critical */ 1826 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/port %c", 1827 aux_ch_name(dig_port->aux_ch), 1828 port_name(encoder->port)); 1829 intel_dp->aux.transfer = intel_dp_aux_transfer; 1830 cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE); 1831} 1832 1833bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp) 1834{ 1835 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 1836 1837 return max_rate >= 540000; 1838} 1839 1840bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp) 1841{ 1842 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 1843 1844 return max_rate >= 810000; 1845} 1846 1847static void 1848intel_dp_set_clock(struct intel_encoder *encoder, 1849 struct intel_crtc_state *pipe_config) 1850{ 1851 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1852 const struct dp_link_dpll *divisor = NULL; 1853 int i, count = 0; 1854 1855 if (IS_G4X(dev_priv)) { 1856 divisor = g4x_dpll; 1857 count = ARRAY_SIZE(g4x_dpll); 1858 } else if (HAS_PCH_SPLIT(dev_priv)) { 1859 divisor = pch_dpll; 1860 count = ARRAY_SIZE(pch_dpll); 1861 } else if (IS_CHERRYVIEW(dev_priv)) { 1862 divisor = chv_dpll; 1863 count = ARRAY_SIZE(chv_dpll); 1864 } else if (IS_VALLEYVIEW(dev_priv)) { 1865 divisor = vlv_dpll; 1866 count = ARRAY_SIZE(vlv_dpll); 1867 } 1868 1869 if (divisor && count) { 1870 for (i = 0; i < count; i++) { 1871 if (pipe_config->port_clock == divisor[i].clock) { 1872 pipe_config->dpll = divisor[i].dpll; 1873 pipe_config->clock_set = true; 1874 break; 1875 } 1876 } 1877 } 1878} 1879 1880static void snprintf_int_array(char *str, size_t len, 1881 const int *array, int nelem) 1882{ 1883 int i; 1884 1885 str[0] = '\0'; 1886 1887 for (i = 0; i < nelem; i++) { 1888 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]); 1889 if (r >= len) 1890 return; 1891 str += r; 1892 len -= r; 1893 } 1894} 1895 1896static void intel_dp_print_rates(struct intel_dp *intel_dp) 1897{ 1898 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1899 char str[128]; /* FIXME: too big for stack? */ 1900 1901 if (!drm_debug_enabled(DRM_UT_KMS)) 1902 return; 1903 1904 snprintf_int_array(str, sizeof(str), 1905 intel_dp->source_rates, intel_dp->num_source_rates); 1906 drm_dbg_kms(&i915->drm, "source rates: %s\n", str); 1907 1908 snprintf_int_array(str, sizeof(str), 1909 intel_dp->sink_rates, intel_dp->num_sink_rates); 1910 drm_dbg_kms(&i915->drm, "sink rates: %s\n", str); 1911 1912 snprintf_int_array(str, sizeof(str), 1913 intel_dp->common_rates, intel_dp->num_common_rates); 1914 drm_dbg_kms(&i915->drm, "common rates: %s\n", str); 1915} 1916 1917int 1918intel_dp_max_link_rate(struct intel_dp *intel_dp) 1919{ 1920 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1921 int len; 1922 1923 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); 1924 if (drm_WARN_ON(&i915->drm, len <= 0)) 1925 return 162000; 1926 1927 return intel_dp->common_rates[len - 1]; 1928} 1929 1930int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) 1931{ 1932 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1933 int i = intel_dp_rate_index(intel_dp->sink_rates, 1934 intel_dp->num_sink_rates, rate); 1935 1936 if (drm_WARN_ON(&i915->drm, i < 0)) 1937 i = 0; 1938 1939 return i; 1940} 1941 1942void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 1943 u8 *link_bw, u8 *rate_select) 1944{ 1945 /* eDP 1.4 rate select method. */ 1946 if (intel_dp->use_rate_select) { 1947 *link_bw = 0; 1948 *rate_select = 1949 intel_dp_rate_select(intel_dp, port_clock); 1950 } else { 1951 *link_bw = drm_dp_link_rate_to_bw_code(port_clock); 1952 *rate_select = 0; 1953 } 1954} 1955 1956static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, 1957 const struct intel_crtc_state *pipe_config) 1958{ 1959 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1960 1961 /* On TGL, FEC is supported on all Pipes */ 1962 if (INTEL_GEN(dev_priv) >= 12) 1963 return true; 1964 1965 if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A) 1966 return true; 1967 1968 return false; 1969} 1970 1971static bool intel_dp_supports_fec(struct intel_dp *intel_dp, 1972 const struct intel_crtc_state *pipe_config) 1973{ 1974 return intel_dp_source_supports_fec(intel_dp, pipe_config) && 1975 drm_dp_sink_supports_fec(intel_dp->fec_capable); 1976} 1977 1978static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, 1979 const struct intel_crtc_state *crtc_state) 1980{ 1981 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1982 1983 if (!intel_dp_is_edp(intel_dp) && !crtc_state->fec_enable) 1984 return false; 1985 1986 return intel_dsc_source_support(encoder, crtc_state) && 1987 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); 1988} 1989 1990static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp, 1991 const struct intel_crtc_state *crtc_state) 1992{ 1993 return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 1994 (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && 1995 intel_dp->dfp.ycbcr_444_to_420); 1996} 1997 1998static int intel_dp_hdmi_tmds_clock(struct intel_dp *intel_dp, 1999 const struct intel_crtc_state *crtc_state, int bpc) 2000{ 2001 int clock = crtc_state->hw.adjusted_mode.crtc_clock * bpc / 8; 2002 2003 if (intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) 2004 clock /= 2; 2005 2006 return clock; 2007} 2008 2009static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp, 2010 const struct intel_crtc_state *crtc_state, int bpc) 2011{ 2012 int tmds_clock = intel_dp_hdmi_tmds_clock(intel_dp, crtc_state, bpc); 2013 2014 if (intel_dp->dfp.min_tmds_clock && 2015 tmds_clock < intel_dp->dfp.min_tmds_clock) 2016 return false; 2017 2018 if (intel_dp->dfp.max_tmds_clock && 2019 tmds_clock > intel_dp->dfp.max_tmds_clock) 2020 return false; 2021 2022 return true; 2023} 2024 2025static bool intel_dp_hdmi_deep_color_possible(struct intel_dp *intel_dp, 2026 const struct intel_crtc_state *crtc_state, 2027 int bpc) 2028{ 2029 2030 return intel_hdmi_deep_color_possible(crtc_state, bpc, 2031 intel_dp->has_hdmi_sink, 2032 intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) && 2033 intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc); 2034} 2035 2036static int intel_dp_max_bpp(struct intel_dp *intel_dp, 2037 const struct intel_crtc_state *crtc_state) 2038{ 2039 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2040 struct intel_connector *intel_connector = intel_dp->attached_connector; 2041 int bpp, bpc; 2042 2043 bpc = crtc_state->pipe_bpp / 3; 2044 2045 if (intel_dp->dfp.max_bpc) 2046 bpc = min_t(int, bpc, intel_dp->dfp.max_bpc); 2047 2048 if (intel_dp->dfp.min_tmds_clock) { 2049 for (; bpc >= 10; bpc -= 2) { 2050 if (intel_dp_hdmi_deep_color_possible(intel_dp, crtc_state, bpc)) 2051 break; 2052 } 2053 } 2054 2055 bpp = bpc * 3; 2056 if (intel_dp_is_edp(intel_dp)) { 2057 /* Get bpp from vbt only for panels that dont have bpp in edid */ 2058 if (intel_connector->base.display_info.bpc == 0 && 2059 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) { 2060 drm_dbg_kms(&dev_priv->drm, 2061 "clamping bpp for eDP panel to BIOS-provided %i\n", 2062 dev_priv->vbt.edp.bpp); 2063 bpp = dev_priv->vbt.edp.bpp; 2064 } 2065 } 2066 2067 return bpp; 2068} 2069 2070/* Adjust link config limits based on compliance test requests. */ 2071void 2072intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, 2073 struct intel_crtc_state *pipe_config, 2074 struct link_config_limits *limits) 2075{ 2076 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2077 2078 /* For DP Compliance we override the computed bpp for the pipe */ 2079 if (intel_dp->compliance.test_data.bpc != 0) { 2080 int bpp = 3 * intel_dp->compliance.test_data.bpc; 2081 2082 limits->min_bpp = limits->max_bpp = bpp; 2083 pipe_config->dither_force_disable = bpp == 6 * 3; 2084 2085 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp); 2086 } 2087 2088 /* Use values requested by Compliance Test Request */ 2089 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 2090 int index; 2091 2092 /* Validate the compliance test data since max values 2093 * might have changed due to link train fallback. 2094 */ 2095 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, 2096 intel_dp->compliance.test_lane_count)) { 2097 index = intel_dp_rate_index(intel_dp->common_rates, 2098 intel_dp->num_common_rates, 2099 intel_dp->compliance.test_link_rate); 2100 if (index >= 0) 2101 limits->min_clock = limits->max_clock = index; 2102 limits->min_lane_count = limits->max_lane_count = 2103 intel_dp->compliance.test_lane_count; 2104 } 2105 } 2106} 2107 2108static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp) 2109{ 2110 /* 2111 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output 2112 * format of the number of bytes per pixel will be half the number 2113 * of bytes of RGB pixel. 2114 */ 2115 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2116 bpp /= 2; 2117 2118 return bpp; 2119} 2120 2121/* Optimize link config in order: max bpp, min clock, min lanes */ 2122static int 2123intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, 2124 struct intel_crtc_state *pipe_config, 2125 const struct link_config_limits *limits) 2126{ 2127 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2128 int bpp, clock, lane_count; 2129 int mode_rate, link_clock, link_avail; 2130 2131 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { 2132 int output_bpp = intel_dp_output_bpp(pipe_config, bpp); 2133 2134 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 2135 output_bpp); 2136 2137 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { 2138 for (lane_count = limits->min_lane_count; 2139 lane_count <= limits->max_lane_count; 2140 lane_count <<= 1) { 2141 link_clock = intel_dp->common_rates[clock]; 2142 link_avail = intel_dp_max_data_rate(link_clock, 2143 lane_count); 2144 2145 if (mode_rate <= link_avail) { 2146 pipe_config->lane_count = lane_count; 2147 pipe_config->pipe_bpp = bpp; 2148 pipe_config->port_clock = link_clock; 2149 2150 return 0; 2151 } 2152 } 2153 } 2154 } 2155 2156 return -EINVAL; 2157} 2158 2159static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) 2160{ 2161 int i, num_bpc; 2162 u8 dsc_bpc[3] = {0}; 2163 2164 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd, 2165 dsc_bpc); 2166 for (i = 0; i < num_bpc; i++) { 2167 if (dsc_max_bpc >= dsc_bpc[i]) 2168 return dsc_bpc[i] * 3; 2169 } 2170 2171 return 0; 2172} 2173 2174#define DSC_SUPPORTED_VERSION_MIN 1 2175 2176static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, 2177 struct intel_crtc_state *crtc_state) 2178{ 2179 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2180 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2181 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 2182 u8 line_buf_depth; 2183 int ret; 2184 2185 ret = intel_dsc_compute_params(encoder, crtc_state); 2186 if (ret) 2187 return ret; 2188 2189 /* 2190 * Slice Height of 8 works for all currently available panels. So start 2191 * with that if pic_height is an integral multiple of 8. Eventually add 2192 * logic to try multiple slice heights. 2193 */ 2194 if (vdsc_cfg->pic_height % 8 == 0) 2195 vdsc_cfg->slice_height = 8; 2196 else if (vdsc_cfg->pic_height % 4 == 0) 2197 vdsc_cfg->slice_height = 4; 2198 else 2199 vdsc_cfg->slice_height = 2; 2200 2201 vdsc_cfg->dsc_version_major = 2202 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 2203 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; 2204 vdsc_cfg->dsc_version_minor = 2205 min(DSC_SUPPORTED_VERSION_MIN, 2206 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 2207 DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT); 2208 2209 vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & 2210 DP_DSC_RGB; 2211 2212 line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd); 2213 if (!line_buf_depth) { 2214 drm_dbg_kms(&i915->drm, 2215 "DSC Sink Line Buffer Depth invalid\n"); 2216 return -EINVAL; 2217 } 2218 2219 if (vdsc_cfg->dsc_version_minor == 2) 2220 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? 2221 DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; 2222 else 2223 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? 2224 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; 2225 2226 vdsc_cfg->block_pred_enable = 2227 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & 2228 DP_DSC_BLK_PREDICTION_IS_SUPPORTED; 2229 2230 return drm_dsc_compute_rc_parameters(vdsc_cfg); 2231} 2232 2233static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, 2234 struct intel_crtc_state *pipe_config, 2235 struct drm_connector_state *conn_state, 2236 struct link_config_limits *limits) 2237{ 2238 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2239 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 2240 const struct drm_display_mode *adjusted_mode = 2241 &pipe_config->hw.adjusted_mode; 2242 u8 dsc_max_bpc; 2243 int pipe_bpp; 2244 int ret; 2245 2246 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && 2247 intel_dp_supports_fec(intel_dp, pipe_config); 2248 2249 if (!intel_dp_supports_dsc(intel_dp, pipe_config)) 2250 return -EINVAL; 2251 2252 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ 2253 if (INTEL_GEN(dev_priv) >= 12) 2254 dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc); 2255 else 2256 dsc_max_bpc = min_t(u8, 10, 2257 conn_state->max_requested_bpc); 2258 2259 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc); 2260 2261 /* Min Input BPC for ICL+ is 8 */ 2262 if (pipe_bpp < 8 * 3) { 2263 drm_dbg_kms(&dev_priv->drm, 2264 "No DSC support for less than 8bpc\n"); 2265 return -EINVAL; 2266 } 2267 2268 /* 2269 * For now enable DSC for max bpp, max link rate, max lane count. 2270 * Optimize this later for the minimum possible link rate/lane count 2271 * with DSC enabled for the requested mode. 2272 */ 2273 pipe_config->pipe_bpp = pipe_bpp; 2274 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock]; 2275 pipe_config->lane_count = limits->max_lane_count; 2276 2277 if (intel_dp_is_edp(intel_dp)) { 2278 pipe_config->dsc.compressed_bpp = 2279 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4, 2280 pipe_config->pipe_bpp); 2281 pipe_config->dsc.slice_count = 2282 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 2283 true); 2284 if (!pipe_config->dsc.slice_count) { 2285 drm_dbg_kms(&dev_priv->drm, "Unsupported Slice Count %d\n", 2286 pipe_config->dsc.slice_count); 2287 return -EINVAL; 2288 } 2289 } else { 2290 u16 dsc_max_output_bpp; 2291 u8 dsc_dp_slice_count; 2292 2293 dsc_max_output_bpp = 2294 intel_dp_dsc_get_output_bpp(dev_priv, 2295 pipe_config->port_clock, 2296 pipe_config->lane_count, 2297 adjusted_mode->crtc_clock, 2298 adjusted_mode->crtc_hdisplay); 2299 dsc_dp_slice_count = 2300 intel_dp_dsc_get_slice_count(intel_dp, 2301 adjusted_mode->crtc_clock, 2302 adjusted_mode->crtc_hdisplay); 2303 if (!dsc_max_output_bpp || !dsc_dp_slice_count) { 2304 drm_dbg_kms(&dev_priv->drm, 2305 "Compressed BPP/Slice Count not supported\n"); 2306 return -EINVAL; 2307 } 2308 pipe_config->dsc.compressed_bpp = min_t(u16, 2309 dsc_max_output_bpp >> 4, 2310 pipe_config->pipe_bpp); 2311 pipe_config->dsc.slice_count = dsc_dp_slice_count; 2312 } 2313 /* 2314 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate 2315 * is greater than the maximum Cdclock and if slice count is even 2316 * then we need to use 2 VDSC instances. 2317 */ 2318 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) { 2319 if (pipe_config->dsc.slice_count > 1) { 2320 pipe_config->dsc.dsc_split = true; 2321 } else { 2322 drm_dbg_kms(&dev_priv->drm, 2323 "Cannot split stream to use 2 VDSC instances\n"); 2324 return -EINVAL; 2325 } 2326 } 2327 2328 ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config); 2329 if (ret < 0) { 2330 drm_dbg_kms(&dev_priv->drm, 2331 "Cannot compute valid DSC parameters for Input Bpp = %d " 2332 "Compressed BPP = %d\n", 2333 pipe_config->pipe_bpp, 2334 pipe_config->dsc.compressed_bpp); 2335 return ret; 2336 } 2337 2338 pipe_config->dsc.compression_enable = true; 2339 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " 2340 "Compressed Bpp = %d Slice Count = %d\n", 2341 pipe_config->pipe_bpp, 2342 pipe_config->dsc.compressed_bpp, 2343 pipe_config->dsc.slice_count); 2344 2345 return 0; 2346} 2347 2348int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state) 2349{ 2350 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) 2351 return 6 * 3; 2352 else 2353 return 8 * 3; 2354} 2355 2356static int 2357intel_dp_compute_link_config(struct intel_encoder *encoder, 2358 struct intel_crtc_state *pipe_config, 2359 struct drm_connector_state *conn_state) 2360{ 2361 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2362 const struct drm_display_mode *adjusted_mode = 2363 &pipe_config->hw.adjusted_mode; 2364 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2365 struct link_config_limits limits; 2366 int common_len; 2367 int ret; 2368 2369 common_len = intel_dp_common_len_rate_limit(intel_dp, 2370 intel_dp->max_link_rate); 2371 2372 /* No common link rates between source and sink */ 2373 drm_WARN_ON(encoder->base.dev, common_len <= 0); 2374 2375 limits.min_clock = 0; 2376 limits.max_clock = common_len - 1; 2377 2378 limits.min_lane_count = 1; 2379 limits.max_lane_count = intel_dp_max_lane_count(intel_dp); 2380 2381 limits.min_bpp = intel_dp_min_bpp(pipe_config); 2382 limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config); 2383 2384 if (intel_dp_is_edp(intel_dp)) { 2385 /* 2386 * Use the maximum clock and number of lanes the eDP panel 2387 * advertizes being capable of. The panels are generally 2388 * designed to support only a single clock and lane 2389 * configuration, and typically these values correspond to the 2390 * native resolution of the panel. 2391 */ 2392 limits.min_lane_count = limits.max_lane_count; 2393 limits.min_clock = limits.max_clock; 2394 } 2395 2396 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); 2397 2398 drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i " 2399 "max rate %d max bpp %d pixel clock %iKHz\n", 2400 limits.max_lane_count, 2401 intel_dp->common_rates[limits.max_clock], 2402 limits.max_bpp, adjusted_mode->crtc_clock); 2403 2404 /* 2405 * Optimize for slow and wide. This is the place to add alternative 2406 * optimization policy. 2407 */ 2408 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); 2409 2410 /* enable compression if the mode doesn't fit available BW */ 2411 drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en); 2412 if (ret || intel_dp->force_dsc_en) { 2413 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 2414 conn_state, &limits); 2415 if (ret < 0) 2416 return ret; 2417 } 2418 2419 if (pipe_config->dsc.compression_enable) { 2420 drm_dbg_kms(&i915->drm, 2421 "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", 2422 pipe_config->lane_count, pipe_config->port_clock, 2423 pipe_config->pipe_bpp, 2424 pipe_config->dsc.compressed_bpp); 2425 2426 drm_dbg_kms(&i915->drm, 2427 "DP link rate required %i available %i\n", 2428 intel_dp_link_required(adjusted_mode->crtc_clock, 2429 pipe_config->dsc.compressed_bpp), 2430 intel_dp_max_data_rate(pipe_config->port_clock, 2431 pipe_config->lane_count)); 2432 } else { 2433 drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n", 2434 pipe_config->lane_count, pipe_config->port_clock, 2435 pipe_config->pipe_bpp); 2436 2437 drm_dbg_kms(&i915->drm, 2438 "DP link rate required %i available %i\n", 2439 intel_dp_link_required(adjusted_mode->crtc_clock, 2440 pipe_config->pipe_bpp), 2441 intel_dp_max_data_rate(pipe_config->port_clock, 2442 pipe_config->lane_count)); 2443 } 2444 return 0; 2445} 2446 2447static int 2448intel_dp_ycbcr420_config(struct intel_dp *intel_dp, 2449 struct intel_crtc_state *crtc_state, 2450 const struct drm_connector_state *conn_state) 2451{ 2452 struct drm_connector *connector = conn_state->connector; 2453 const struct drm_display_info *info = &connector->display_info; 2454 const struct drm_display_mode *adjusted_mode = 2455 &crtc_state->hw.adjusted_mode; 2456 2457 if (!connector->ycbcr_420_allowed) 2458 return 0; 2459 2460 if (!drm_mode_is_420_only(info, adjusted_mode)) 2461 return 0; 2462 2463 if (intel_dp->dfp.ycbcr_444_to_420) { 2464 crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 2465 return 0; 2466 } 2467 2468 crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; 2469 2470 return intel_pch_panel_fitting(crtc_state, conn_state); 2471} 2472 2473bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, 2474 const struct drm_connector_state *conn_state) 2475{ 2476 const struct intel_digital_connector_state *intel_conn_state = 2477 to_intel_digital_connector_state(conn_state); 2478 const struct drm_display_mode *adjusted_mode = 2479 &crtc_state->hw.adjusted_mode; 2480 2481 /* 2482 * Our YCbCr output is always limited range. 2483 * crtc_state->limited_color_range only applies to RGB, 2484 * and it must never be set for YCbCr or we risk setting 2485 * some conflicting bits in PIPECONF which will mess up 2486 * the colors on the monitor. 2487 */ 2488 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 2489 return false; 2490 2491 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { 2492 /* 2493 * See: 2494 * CEA-861-E - 5.1 Default Encoding Parameters 2495 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 2496 */ 2497 return crtc_state->pipe_bpp != 18 && 2498 drm_default_rgb_quant_range(adjusted_mode) == 2499 HDMI_QUANTIZATION_RANGE_LIMITED; 2500 } else { 2501 return intel_conn_state->broadcast_rgb == 2502 INTEL_BROADCAST_RGB_LIMITED; 2503 } 2504} 2505 2506static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv, 2507 enum port port) 2508{ 2509 if (IS_G4X(dev_priv)) 2510 return false; 2511 if (INTEL_GEN(dev_priv) < 12 && port == PORT_A) 2512 return false; 2513 2514 return true; 2515} 2516 2517static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state, 2518 const struct drm_connector_state *conn_state, 2519 struct drm_dp_vsc_sdp *vsc) 2520{ 2521 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2522 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2523 2524 /* 2525 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 2526 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ 2527 * Colorimetry Format indication. 2528 */ 2529 vsc->revision = 0x5; 2530 vsc->length = 0x13; 2531 2532 /* DP 1.4a spec, Table 2-120 */ 2533 switch (crtc_state->output_format) { 2534 case INTEL_OUTPUT_FORMAT_YCBCR444: 2535 vsc->pixelformat = DP_PIXELFORMAT_YUV444; 2536 break; 2537 case INTEL_OUTPUT_FORMAT_YCBCR420: 2538 vsc->pixelformat = DP_PIXELFORMAT_YUV420; 2539 break; 2540 case INTEL_OUTPUT_FORMAT_RGB: 2541 default: 2542 vsc->pixelformat = DP_PIXELFORMAT_RGB; 2543 } 2544 2545 switch (conn_state->colorspace) { 2546 case DRM_MODE_COLORIMETRY_BT709_YCC: 2547 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 2548 break; 2549 case DRM_MODE_COLORIMETRY_XVYCC_601: 2550 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601; 2551 break; 2552 case DRM_MODE_COLORIMETRY_XVYCC_709: 2553 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709; 2554 break; 2555 case DRM_MODE_COLORIMETRY_SYCC_601: 2556 vsc->colorimetry = DP_COLORIMETRY_SYCC_601; 2557 break; 2558 case DRM_MODE_COLORIMETRY_OPYCC_601: 2559 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601; 2560 break; 2561 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 2562 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC; 2563 break; 2564 case DRM_MODE_COLORIMETRY_BT2020_RGB: 2565 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB; 2566 break; 2567 case DRM_MODE_COLORIMETRY_BT2020_YCC: 2568 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC; 2569 break; 2570 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: 2571 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: 2572 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB; 2573 break; 2574 default: 2575 /* 2576 * RGB->YCBCR color conversion uses the BT.709 2577 * color space. 2578 */ 2579 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2580 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 2581 else 2582 vsc->colorimetry = DP_COLORIMETRY_DEFAULT; 2583 break; 2584 } 2585 2586 vsc->bpc = crtc_state->pipe_bpp / 3; 2587 2588 /* only RGB pixelformat supports 6 bpc */ 2589 drm_WARN_ON(&dev_priv->drm, 2590 vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB); 2591 2592 /* all YCbCr are always limited range */ 2593 vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA; 2594 vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED; 2595} 2596 2597static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, 2598 struct intel_crtc_state *crtc_state, 2599 const struct drm_connector_state *conn_state) 2600{ 2601 struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc; 2602 2603 /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */ 2604 if (crtc_state->has_psr) 2605 return; 2606 2607 if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state)) 2608 return; 2609 2610 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 2611 vsc->sdp_type = DP_SDP_VSC; 2612 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 2613 &crtc_state->infoframes.vsc); 2614} 2615 2616void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp, 2617 const struct intel_crtc_state *crtc_state, 2618 const struct drm_connector_state *conn_state, 2619 struct drm_dp_vsc_sdp *vsc) 2620{ 2621 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2622 2623 vsc->sdp_type = DP_SDP_VSC; 2624 2625 if (dev_priv->psr.psr2_enabled) { 2626 if (dev_priv->psr.colorimetry_support && 2627 intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { 2628 /* [PSR2, +Colorimetry] */ 2629 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 2630 vsc); 2631 } else { 2632 /* 2633 * [PSR2, -Colorimetry] 2634 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11 2635 * 3D stereo + PSR/PSR2 + Y-coordinate. 2636 */ 2637 vsc->revision = 0x4; 2638 vsc->length = 0xe; 2639 } 2640 } else { 2641 /* 2642 * [PSR1] 2643 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 2644 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or 2645 * higher). 2646 */ 2647 vsc->revision = 0x2; 2648 vsc->length = 0x8; 2649 } 2650} 2651 2652static void 2653intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, 2654 struct intel_crtc_state *crtc_state, 2655 const struct drm_connector_state *conn_state) 2656{ 2657 int ret; 2658 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2659 struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm; 2660 2661 if (!conn_state->hdr_output_metadata) 2662 return; 2663 2664 ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state); 2665 2666 if (ret) { 2667 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n"); 2668 return; 2669 } 2670 2671 crtc_state->infoframes.enable |= 2672 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); 2673} 2674 2675static void 2676intel_dp_drrs_compute_config(struct intel_dp *intel_dp, 2677 struct intel_crtc_state *pipe_config, 2678 int output_bpp, bool constant_n) 2679{ 2680 struct intel_connector *intel_connector = intel_dp->attached_connector; 2681 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2682 2683 /* 2684 * DRRS and PSR can't be enable together, so giving preference to PSR 2685 * as it allows more power-savings by complete shutting down display, 2686 * so to guarantee this, intel_dp_drrs_compute_config() must be called 2687 * after intel_psr_compute_config(). 2688 */ 2689 if (pipe_config->has_psr) 2690 return; 2691 2692 if (!intel_connector->panel.downclock_mode || 2693 dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) 2694 return; 2695 2696 pipe_config->has_drrs = true; 2697 intel_link_compute_m_n(output_bpp, pipe_config->lane_count, 2698 intel_connector->panel.downclock_mode->clock, 2699 pipe_config->port_clock, &pipe_config->dp_m2_n2, 2700 constant_n, pipe_config->fec_enable); 2701} 2702 2703int 2704intel_dp_compute_config(struct intel_encoder *encoder, 2705 struct intel_crtc_state *pipe_config, 2706 struct drm_connector_state *conn_state) 2707{ 2708 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2709 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2710 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2711 struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder); 2712 enum port port = encoder->port; 2713 struct intel_connector *intel_connector = intel_dp->attached_connector; 2714 struct intel_digital_connector_state *intel_conn_state = 2715 to_intel_digital_connector_state(conn_state); 2716 bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0, 2717 DP_DPCD_QUIRK_CONSTANT_N); 2718 int ret = 0, output_bpp; 2719 2720 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) 2721 pipe_config->has_pch_encoder = true; 2722 2723 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 2724 2725 if (lspcon->active) 2726 lspcon_ycbcr420_config(&intel_connector->base, pipe_config); 2727 else 2728 ret = intel_dp_ycbcr420_config(intel_dp, pipe_config, 2729 conn_state); 2730 if (ret) 2731 return ret; 2732 2733 if (!intel_dp_port_has_audio(dev_priv, port)) 2734 pipe_config->has_audio = false; 2735 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 2736 pipe_config->has_audio = intel_dp->has_audio; 2737 else 2738 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON; 2739 2740 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 2741 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 2742 adjusted_mode); 2743 2744 if (HAS_GMCH(dev_priv)) 2745 ret = intel_gmch_panel_fitting(pipe_config, conn_state); 2746 else 2747 ret = intel_pch_panel_fitting(pipe_config, conn_state); 2748 if (ret) 2749 return ret; 2750 } 2751 2752 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 2753 return -EINVAL; 2754 2755 if (HAS_GMCH(dev_priv) && 2756 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 2757 return -EINVAL; 2758 2759 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 2760 return -EINVAL; 2761 2762 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay)) 2763 return -EINVAL; 2764 2765 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); 2766 if (ret < 0) 2767 return ret; 2768 2769 pipe_config->limited_color_range = 2770 intel_dp_limited_color_range(pipe_config, conn_state); 2771 2772 if (pipe_config->dsc.compression_enable) 2773 output_bpp = pipe_config->dsc.compressed_bpp; 2774 else 2775 output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp); 2776 2777 intel_link_compute_m_n(output_bpp, 2778 pipe_config->lane_count, 2779 adjusted_mode->crtc_clock, 2780 pipe_config->port_clock, 2781 &pipe_config->dp_m_n, 2782 constant_n, pipe_config->fec_enable); 2783 2784 if (!HAS_DDI(dev_priv)) 2785 intel_dp_set_clock(encoder, pipe_config); 2786 2787 intel_psr_compute_config(intel_dp, pipe_config); 2788 intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp, 2789 constant_n); 2790 intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); 2791 intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); 2792 2793 return 0; 2794} 2795 2796void intel_dp_set_link_params(struct intel_dp *intel_dp, 2797 int link_rate, u8 lane_count, 2798 bool link_mst) 2799{ 2800 intel_dp->link_trained = false; 2801 intel_dp->link_rate = link_rate; 2802 intel_dp->lane_count = lane_count; 2803 intel_dp->link_mst = link_mst; 2804} 2805 2806static void intel_dp_prepare(struct intel_encoder *encoder, 2807 const struct intel_crtc_state *pipe_config) 2808{ 2809 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2810 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2811 enum port port = encoder->port; 2812 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 2813 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2814 2815 intel_dp_set_link_params(intel_dp, pipe_config->port_clock, 2816 pipe_config->lane_count, 2817 intel_crtc_has_type(pipe_config, 2818 INTEL_OUTPUT_DP_MST)); 2819 2820 /* 2821 * There are four kinds of DP registers: 2822 * 2823 * IBX PCH 2824 * SNB CPU 2825 * IVB CPU 2826 * CPT PCH 2827 * 2828 * IBX PCH and CPU are the same for almost everything, 2829 * except that the CPU DP PLL is configured in this 2830 * register 2831 * 2832 * CPT PCH is quite different, having many bits moved 2833 * to the TRANS_DP_CTL register instead. That 2834 * configuration happens (oddly) in ilk_pch_enable 2835 */ 2836 2837 /* Preserve the BIOS-computed detected bit. This is 2838 * supposed to be read-only. 2839 */ 2840 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; 2841 2842 /* Handle DP bits in common between all three register formats */ 2843 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 2844 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count); 2845 2846 /* Split out the IBX/CPU vs CPT settings */ 2847 2848 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { 2849 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 2850 intel_dp->DP |= DP_SYNC_HS_HIGH; 2851 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 2852 intel_dp->DP |= DP_SYNC_VS_HIGH; 2853 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 2854 2855 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2856 intel_dp->DP |= DP_ENHANCED_FRAMING; 2857 2858 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe); 2859 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 2860 u32 trans_dp; 2861 2862 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 2863 2864 trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe)); 2865 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2866 trans_dp |= TRANS_DP_ENH_FRAMING; 2867 else 2868 trans_dp &= ~TRANS_DP_ENH_FRAMING; 2869 intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp); 2870 } else { 2871 if (IS_G4X(dev_priv) && pipe_config->limited_color_range) 2872 intel_dp->DP |= DP_COLOR_RANGE_16_235; 2873 2874 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 2875 intel_dp->DP |= DP_SYNC_HS_HIGH; 2876 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 2877 intel_dp->DP |= DP_SYNC_VS_HIGH; 2878 intel_dp->DP |= DP_LINK_TRAIN_OFF; 2879 2880 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2881 intel_dp->DP |= DP_ENHANCED_FRAMING; 2882 2883 if (IS_CHERRYVIEW(dev_priv)) 2884 intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe); 2885 else 2886 intel_dp->DP |= DP_PIPE_SEL(crtc->pipe); 2887 } 2888} 2889 2890#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 2891#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 2892 2893#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0) 2894#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0) 2895 2896#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 2897#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 2898 2899static void intel_pps_verify_state(struct intel_dp *intel_dp); 2900 2901static void wait_panel_status(struct intel_dp *intel_dp, 2902 u32 mask, 2903 u32 value) 2904{ 2905 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2906 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2907 2908 lockdep_assert_held(&dev_priv->pps_mutex); 2909 2910 intel_pps_verify_state(intel_dp); 2911 2912 pp_stat_reg = _pp_stat_reg(intel_dp); 2913 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2914 2915 drm_dbg_kms(&dev_priv->drm, 2916 "mask %08x value %08x status %08x control %08x\n", 2917 mask, value, 2918 intel_de_read(dev_priv, pp_stat_reg), 2919 intel_de_read(dev_priv, pp_ctrl_reg)); 2920 2921 if (intel_de_wait_for_register(dev_priv, pp_stat_reg, 2922 mask, value, 5000)) 2923 drm_err(&dev_priv->drm, 2924 "Panel status timeout: status %08x control %08x\n", 2925 intel_de_read(dev_priv, pp_stat_reg), 2926 intel_de_read(dev_priv, pp_ctrl_reg)); 2927 2928 drm_dbg_kms(&dev_priv->drm, "Wait complete\n"); 2929} 2930 2931static void wait_panel_on(struct intel_dp *intel_dp) 2932{ 2933 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2934 2935 drm_dbg_kms(&i915->drm, "Wait for panel power on\n"); 2936 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 2937} 2938 2939static void wait_panel_off(struct intel_dp *intel_dp) 2940{ 2941 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2942 2943 drm_dbg_kms(&i915->drm, "Wait for panel power off time\n"); 2944 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 2945} 2946 2947static void wait_panel_power_cycle(struct intel_dp *intel_dp) 2948{ 2949 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2950 ktime_t panel_power_on_time; 2951 s64 panel_power_off_duration; 2952 2953 drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n"); 2954 2955 /* take the difference of currrent time and panel power off time 2956 * and then make panel wait for t11_t12 if needed. */ 2957 panel_power_on_time = ktime_get_boottime(); 2958 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time); 2959 2960 /* When we disable the VDD override bit last we have to do the manual 2961 * wait. */ 2962 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay) 2963 wait_remaining_ms_from_jiffies(jiffies, 2964 intel_dp->panel_power_cycle_delay - panel_power_off_duration); 2965 2966 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 2967} 2968 2969static void wait_backlight_on(struct intel_dp *intel_dp) 2970{ 2971 wait_remaining_ms_from_jiffies(intel_dp->last_power_on, 2972 intel_dp->backlight_on_delay); 2973} 2974 2975static void edp_wait_backlight_off(struct intel_dp *intel_dp) 2976{ 2977 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off, 2978 intel_dp->backlight_off_delay); 2979} 2980 2981/* Read the current pp_control value, unlocking the register if it 2982 * is locked 2983 */ 2984 2985static u32 ilk_get_pp_control(struct intel_dp *intel_dp) 2986{ 2987 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2988 u32 control; 2989 2990 lockdep_assert_held(&dev_priv->pps_mutex); 2991 2992 control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)); 2993 if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) && 2994 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) { 2995 control &= ~PANEL_UNLOCK_MASK; 2996 control |= PANEL_UNLOCK_REGS; 2997 } 2998 return control; 2999} 3000 3001/* 3002 * Must be paired with edp_panel_vdd_off(). 3003 * Must hold pps_mutex around the whole on/off sequence. 3004 * Can be nested with intel_edp_panel_vdd_{on,off}() calls. 3005 */ 3006static bool edp_panel_vdd_on(struct intel_dp *intel_dp) 3007{ 3008 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3009 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3010 u32 pp; 3011 i915_reg_t pp_stat_reg, pp_ctrl_reg; 3012 bool need_to_disable = !intel_dp->want_panel_vdd; 3013 3014 lockdep_assert_held(&dev_priv->pps_mutex); 3015 3016 if (!intel_dp_is_edp(intel_dp)) 3017 return false; 3018 3019 cancel_delayed_work(&intel_dp->panel_vdd_work); 3020 intel_dp->want_panel_vdd = true; 3021 3022 if (edp_have_panel_vdd(intel_dp)) 3023 return need_to_disable; 3024 3025 intel_display_power_get(dev_priv, 3026 intel_aux_power_domain(dig_port)); 3027 3028 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n", 3029 dig_port->base.base.base.id, 3030 dig_port->base.base.name); 3031 3032 if (!edp_have_panel_power(intel_dp)) 3033 wait_panel_power_cycle(intel_dp); 3034 3035 pp = ilk_get_pp_control(intel_dp); 3036 pp |= EDP_FORCE_VDD; 3037 3038 pp_stat_reg = _pp_stat_reg(intel_dp); 3039 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3040 3041 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3042 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3043 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 3044 intel_de_read(dev_priv, pp_stat_reg), 3045 intel_de_read(dev_priv, pp_ctrl_reg)); 3046 /* 3047 * If the panel wasn't on, delay before accessing aux channel 3048 */ 3049 if (!edp_have_panel_power(intel_dp)) { 3050 drm_dbg_kms(&dev_priv->drm, 3051 "[ENCODER:%d:%s] panel power wasn't enabled\n", 3052 dig_port->base.base.base.id, 3053 dig_port->base.base.name); 3054 msleep(intel_dp->panel_power_up_delay); 3055 } 3056 3057 return need_to_disable; 3058} 3059 3060/* 3061 * Must be paired with intel_edp_panel_vdd_off() or 3062 * intel_edp_panel_off(). 3063 * Nested calls to these functions are not allowed since 3064 * we drop the lock. Caller must use some higher level 3065 * locking to prevent nested calls from other threads. 3066 */ 3067void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) 3068{ 3069 intel_wakeref_t wakeref; 3070 bool vdd; 3071 3072 if (!intel_dp_is_edp(intel_dp)) 3073 return; 3074 3075 vdd = false; 3076 with_pps_lock(intel_dp, wakeref) 3077 vdd = edp_panel_vdd_on(intel_dp); 3078 I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n", 3079 dp_to_dig_port(intel_dp)->base.base.base.id, 3080 dp_to_dig_port(intel_dp)->base.base.name); 3081} 3082 3083static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) 3084{ 3085 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3086 struct intel_digital_port *dig_port = 3087 dp_to_dig_port(intel_dp); 3088 u32 pp; 3089 i915_reg_t pp_stat_reg, pp_ctrl_reg; 3090 3091 lockdep_assert_held(&dev_priv->pps_mutex); 3092 3093 drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd); 3094 3095 if (!edp_have_panel_vdd(intel_dp)) 3096 return; 3097 3098 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n", 3099 dig_port->base.base.base.id, 3100 dig_port->base.base.name); 3101 3102 pp = ilk_get_pp_control(intel_dp); 3103 pp &= ~EDP_FORCE_VDD; 3104 3105 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3106 pp_stat_reg = _pp_stat_reg(intel_dp); 3107 3108 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3109 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3110 3111 /* Make sure sequencer is idle before allowing subsequent activity */ 3112 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 3113 intel_de_read(dev_priv, pp_stat_reg), 3114 intel_de_read(dev_priv, pp_ctrl_reg)); 3115 3116 if ((pp & PANEL_POWER_ON) == 0) 3117 intel_dp->panel_power_off_time = ktime_get_boottime(); 3118 3119 intel_display_power_put_unchecked(dev_priv, 3120 intel_aux_power_domain(dig_port)); 3121} 3122 3123static void edp_panel_vdd_work(struct work_struct *__work) 3124{ 3125 struct intel_dp *intel_dp = 3126 container_of(to_delayed_work(__work), 3127 struct intel_dp, panel_vdd_work); 3128 intel_wakeref_t wakeref; 3129 3130 with_pps_lock(intel_dp, wakeref) { 3131 if (!intel_dp->want_panel_vdd) 3132 edp_panel_vdd_off_sync(intel_dp); 3133 } 3134} 3135 3136static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) 3137{ 3138 unsigned long delay; 3139 3140 /* 3141 * Queue the timer to fire a long time from now (relative to the power 3142 * down delay) to keep the panel power up across a sequence of 3143 * operations. 3144 */ 3145 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5); 3146 schedule_delayed_work(&intel_dp->panel_vdd_work, delay); 3147} 3148 3149/* 3150 * Must be paired with edp_panel_vdd_on(). 3151 * Must hold pps_mutex around the whole on/off sequence. 3152 * Can be nested with intel_edp_panel_vdd_{on,off}() calls. 3153 */ 3154static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 3155{ 3156 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3157 3158 lockdep_assert_held(&dev_priv->pps_mutex); 3159 3160 if (!intel_dp_is_edp(intel_dp)) 3161 return; 3162 3163 I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on", 3164 dp_to_dig_port(intel_dp)->base.base.base.id, 3165 dp_to_dig_port(intel_dp)->base.base.name); 3166 3167 intel_dp->want_panel_vdd = false; 3168 3169 if (sync) 3170 edp_panel_vdd_off_sync(intel_dp); 3171 else 3172 edp_panel_vdd_schedule_off(intel_dp); 3173} 3174 3175static void edp_panel_on(struct intel_dp *intel_dp) 3176{ 3177 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3178 u32 pp; 3179 i915_reg_t pp_ctrl_reg; 3180 3181 lockdep_assert_held(&dev_priv->pps_mutex); 3182 3183 if (!intel_dp_is_edp(intel_dp)) 3184 return; 3185 3186 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n", 3187 dp_to_dig_port(intel_dp)->base.base.base.id, 3188 dp_to_dig_port(intel_dp)->base.base.name); 3189 3190 if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp), 3191 "[ENCODER:%d:%s] panel power already on\n", 3192 dp_to_dig_port(intel_dp)->base.base.base.id, 3193 dp_to_dig_port(intel_dp)->base.base.name)) 3194 return; 3195 3196 wait_panel_power_cycle(intel_dp); 3197 3198 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3199 pp = ilk_get_pp_control(intel_dp); 3200 if (IS_GEN(dev_priv, 5)) { 3201 /* ILK workaround: disable reset around power sequence */ 3202 pp &= ~PANEL_POWER_RESET; 3203 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3204 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3205 } 3206 3207 pp |= PANEL_POWER_ON; 3208 if (!IS_GEN(dev_priv, 5)) 3209 pp |= PANEL_POWER_RESET; 3210 3211 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3212 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3213 3214 wait_panel_on(intel_dp); 3215 intel_dp->last_power_on = jiffies; 3216 3217 if (IS_GEN(dev_priv, 5)) { 3218 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 3219 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3220 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3221 } 3222} 3223 3224void intel_edp_panel_on(struct intel_dp *intel_dp) 3225{ 3226 intel_wakeref_t wakeref; 3227 3228 if (!intel_dp_is_edp(intel_dp)) 3229 return; 3230 3231 with_pps_lock(intel_dp, wakeref) 3232 edp_panel_on(intel_dp); 3233} 3234 3235 3236static void edp_panel_off(struct intel_dp *intel_dp) 3237{ 3238 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3239 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3240 u32 pp; 3241 i915_reg_t pp_ctrl_reg; 3242 3243 lockdep_assert_held(&dev_priv->pps_mutex); 3244 3245 if (!intel_dp_is_edp(intel_dp)) 3246 return; 3247 3248 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n", 3249 dig_port->base.base.base.id, dig_port->base.base.name); 3250 3251 drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd, 3252 "Need [ENCODER:%d:%s] VDD to turn off panel\n", 3253 dig_port->base.base.base.id, dig_port->base.base.name); 3254 3255 pp = ilk_get_pp_control(intel_dp); 3256 /* We need to switch off panel power _and_ force vdd, for otherwise some 3257 * panels get very unhappy and cease to work. */ 3258 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | 3259 EDP_BLC_ENABLE); 3260 3261 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3262 3263 intel_dp->want_panel_vdd = false; 3264 3265 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3266 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3267 3268 wait_panel_off(intel_dp); 3269 intel_dp->panel_power_off_time = ktime_get_boottime(); 3270 3271 /* We got a reference when we enabled the VDD. */ 3272 intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port)); 3273} 3274 3275void intel_edp_panel_off(struct intel_dp *intel_dp) 3276{ 3277 intel_wakeref_t wakeref; 3278 3279 if (!intel_dp_is_edp(intel_dp)) 3280 return; 3281 3282 with_pps_lock(intel_dp, wakeref) 3283 edp_panel_off(intel_dp); 3284} 3285 3286/* Enable backlight in the panel power control. */ 3287static void _intel_edp_backlight_on(struct intel_dp *intel_dp) 3288{ 3289 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3290 intel_wakeref_t wakeref; 3291 3292 /* 3293 * If we enable the backlight right away following a panel power 3294 * on, we may see slight flicker as the panel syncs with the eDP 3295 * link. So delay a bit to make sure the image is solid before 3296 * allowing it to appear. 3297 */ 3298 wait_backlight_on(intel_dp); 3299 3300 with_pps_lock(intel_dp, wakeref) { 3301 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3302 u32 pp; 3303 3304 pp = ilk_get_pp_control(intel_dp); 3305 pp |= EDP_BLC_ENABLE; 3306 3307 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3308 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3309 } 3310} 3311 3312/* Enable backlight PWM and backlight PP control. */ 3313void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, 3314 const struct drm_connector_state *conn_state) 3315{ 3316 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); 3317 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3318 3319 if (!intel_dp_is_edp(intel_dp)) 3320 return; 3321 3322 drm_dbg_kms(&i915->drm, "\n"); 3323 3324 intel_panel_enable_backlight(crtc_state, conn_state); 3325 _intel_edp_backlight_on(intel_dp); 3326} 3327 3328/* Disable backlight in the panel power control. */ 3329static void _intel_edp_backlight_off(struct intel_dp *intel_dp) 3330{ 3331 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3332 intel_wakeref_t wakeref; 3333 3334 if (!intel_dp_is_edp(intel_dp)) 3335 return; 3336 3337 with_pps_lock(intel_dp, wakeref) { 3338 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3339 u32 pp; 3340 3341 pp = ilk_get_pp_control(intel_dp); 3342 pp &= ~EDP_BLC_ENABLE; 3343 3344 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3345 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3346 } 3347 3348 intel_dp->last_backlight_off = jiffies; 3349 edp_wait_backlight_off(intel_dp); 3350} 3351 3352/* Disable backlight PP control and backlight PWM. */ 3353void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) 3354{ 3355 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); 3356 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3357 3358 if (!intel_dp_is_edp(intel_dp)) 3359 return; 3360 3361 drm_dbg_kms(&i915->drm, "\n"); 3362 3363 _intel_edp_backlight_off(intel_dp); 3364 intel_panel_disable_backlight(old_conn_state); 3365} 3366 3367/* 3368 * Hook for controlling the panel power control backlight through the bl_power 3369 * sysfs attribute. Take care to handle multiple calls. 3370 */ 3371static void intel_edp_backlight_power(struct intel_connector *connector, 3372 bool enable) 3373{ 3374 struct drm_i915_private *i915 = to_i915(connector->base.dev); 3375 struct intel_dp *intel_dp = intel_attached_dp(connector); 3376 intel_wakeref_t wakeref; 3377 bool is_enabled; 3378 3379 is_enabled = false; 3380 with_pps_lock(intel_dp, wakeref) 3381 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE; 3382 if (is_enabled == enable) 3383 return; 3384 3385 drm_dbg_kms(&i915->drm, "panel power control backlight %s\n", 3386 enable ? "enable" : "disable"); 3387 3388 if (enable) 3389 _intel_edp_backlight_on(intel_dp); 3390 else 3391 _intel_edp_backlight_off(intel_dp); 3392} 3393 3394static void assert_dp_port(struct intel_dp *intel_dp, bool state) 3395{ 3396 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3397 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 3398 bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN; 3399 3400 I915_STATE_WARN(cur_state != state, 3401 "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n", 3402 dig_port->base.base.base.id, dig_port->base.base.name, 3403 onoff(state), onoff(cur_state)); 3404} 3405#define assert_dp_port_disabled(d) assert_dp_port((d), false) 3406 3407static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) 3408{ 3409 bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE; 3410 3411 I915_STATE_WARN(cur_state != state, 3412 "eDP PLL state assertion failure (expected %s, current %s)\n", 3413 onoff(state), onoff(cur_state)); 3414} 3415#define assert_edp_pll_enabled(d) assert_edp_pll((d), true) 3416#define assert_edp_pll_disabled(d) assert_edp_pll((d), false) 3417 3418static void ilk_edp_pll_on(struct intel_dp *intel_dp, 3419 const struct intel_crtc_state *pipe_config) 3420{ 3421 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3422 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3423 3424 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); 3425 assert_dp_port_disabled(intel_dp); 3426 assert_edp_pll_disabled(dev_priv); 3427 3428 drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n", 3429 pipe_config->port_clock); 3430 3431 intel_dp->DP &= ~DP_PLL_FREQ_MASK; 3432 3433 if (pipe_config->port_clock == 162000) 3434 intel_dp->DP |= DP_PLL_FREQ_162MHZ; 3435 else 3436 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 3437 3438 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3439 intel_de_posting_read(dev_priv, DP_A); 3440 udelay(500); 3441 3442 /* 3443 * [DevILK] Work around required when enabling DP PLL 3444 * while a pipe is enabled going to FDI: 3445 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI 3446 * 2. Program DP PLL enable 3447 */ 3448 if (IS_GEN(dev_priv, 5)) 3449 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe); 3450 3451 intel_dp->DP |= DP_PLL_ENABLE; 3452 3453 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3454 intel_de_posting_read(dev_priv, DP_A); 3455 udelay(200); 3456} 3457 3458static void ilk_edp_pll_off(struct intel_dp *intel_dp, 3459 const struct intel_crtc_state *old_crtc_state) 3460{ 3461 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 3462 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3463 3464 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder); 3465 assert_dp_port_disabled(intel_dp); 3466 assert_edp_pll_enabled(dev_priv); 3467 3468 drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n"); 3469 3470 intel_dp->DP &= ~DP_PLL_ENABLE; 3471 3472 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3473 intel_de_posting_read(dev_priv, DP_A); 3474 udelay(200); 3475} 3476 3477static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) 3478{ 3479 /* 3480 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus 3481 * be capable of signalling downstream hpd with a long pulse. 3482 * Whether or not that means D3 is safe to use is not clear, 3483 * but let's assume so until proven otherwise. 3484 * 3485 * FIXME should really check all downstream ports... 3486 */ 3487 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && 3488 drm_dp_is_branch(intel_dp->dpcd) && 3489 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; 3490} 3491 3492void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, 3493 const struct intel_crtc_state *crtc_state, 3494 bool enable) 3495{ 3496 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3497 int ret; 3498 3499 if (!crtc_state->dsc.compression_enable) 3500 return; 3501 3502 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, 3503 enable ? DP_DECOMPRESSION_EN : 0); 3504 if (ret < 0) 3505 drm_dbg_kms(&i915->drm, 3506 "Failed to %s sink decompression state\n", 3507 enable ? "enable" : "disable"); 3508} 3509 3510/* If the device supports it, try to set the power state appropriately */ 3511void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode) 3512{ 3513 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3514 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 3515 int ret, i; 3516 3517 /* Should have a valid DPCD by this point */ 3518 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3519 return; 3520 3521 if (mode != DP_SET_POWER_D0) { 3522 if (downstream_hpd_needs_d0(intel_dp)) 3523 return; 3524 3525 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 3526 } else { 3527 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 3528 3529 /* 3530 * When turning on, we need to retry for 1ms to give the sink 3531 * time to wake up. 3532 */ 3533 for (i = 0; i < 3; i++) { 3534 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 3535 if (ret == 1) 3536 break; 3537 msleep(1); 3538 } 3539 3540 if (ret == 1 && lspcon->active) 3541 lspcon_wait_pcon_mode(lspcon); 3542 } 3543 3544 if (ret != 1) 3545 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n", 3546 encoder->base.base.id, encoder->base.name, 3547 mode == DP_SET_POWER_D0 ? "D0" : "D3"); 3548} 3549 3550static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv, 3551 enum port port, enum pipe *pipe) 3552{ 3553 enum pipe p; 3554 3555 for_each_pipe(dev_priv, p) { 3556 u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p)); 3557 3558 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) { 3559 *pipe = p; 3560 return true; 3561 } 3562 } 3563 3564 drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n", 3565 port_name(port)); 3566 3567 /* must initialize pipe to something for the asserts */ 3568 *pipe = PIPE_A; 3569 3570 return false; 3571} 3572 3573bool intel_dp_port_enabled(struct drm_i915_private *dev_priv, 3574 i915_reg_t dp_reg, enum port port, 3575 enum pipe *pipe) 3576{ 3577 bool ret; 3578 u32 val; 3579 3580 val = intel_de_read(dev_priv, dp_reg); 3581 3582 ret = val & DP_PORT_EN; 3583 3584 /* asserts want to know the pipe even if the port is disabled */ 3585 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 3586 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB; 3587 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) 3588 ret &= cpt_dp_port_selected(dev_priv, port, pipe); 3589 else if (IS_CHERRYVIEW(dev_priv)) 3590 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV; 3591 else 3592 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT; 3593 3594 return ret; 3595} 3596 3597static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 3598 enum pipe *pipe) 3599{ 3600 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3601 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3602 intel_wakeref_t wakeref; 3603 bool ret; 3604 3605 wakeref = intel_display_power_get_if_enabled(dev_priv, 3606 encoder->power_domain); 3607 if (!wakeref) 3608 return false; 3609 3610 ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg, 3611 encoder->port, pipe); 3612 3613 intel_display_power_put(dev_priv, encoder->power_domain, wakeref); 3614 3615 return ret; 3616} 3617 3618static void intel_dp_get_config(struct intel_encoder *encoder, 3619 struct intel_crtc_state *pipe_config) 3620{ 3621 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3622 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3623 u32 tmp, flags = 0; 3624 enum port port = encoder->port; 3625 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3626 3627 if (encoder->type == INTEL_OUTPUT_EDP) 3628 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP); 3629 else 3630 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP); 3631 3632 tmp = intel_de_read(dev_priv, intel_dp->output_reg); 3633 3634 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; 3635 3636 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 3637 u32 trans_dp = intel_de_read(dev_priv, 3638 TRANS_DP_CTL(crtc->pipe)); 3639 3640 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH) 3641 flags |= DRM_MODE_FLAG_PHSYNC; 3642 else 3643 flags |= DRM_MODE_FLAG_NHSYNC; 3644 3645 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH) 3646 flags |= DRM_MODE_FLAG_PVSYNC; 3647 else 3648 flags |= DRM_MODE_FLAG_NVSYNC; 3649 } else { 3650 if (tmp & DP_SYNC_HS_HIGH) 3651 flags |= DRM_MODE_FLAG_PHSYNC; 3652 else 3653 flags |= DRM_MODE_FLAG_NHSYNC; 3654 3655 if (tmp & DP_SYNC_VS_HIGH) 3656 flags |= DRM_MODE_FLAG_PVSYNC; 3657 else 3658 flags |= DRM_MODE_FLAG_NVSYNC; 3659 } 3660 3661 pipe_config->hw.adjusted_mode.flags |= flags; 3662 3663 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235) 3664 pipe_config->limited_color_range = true; 3665 3666 pipe_config->lane_count = 3667 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1; 3668 3669 intel_dp_get_m_n(crtc, pipe_config); 3670 3671 if (port == PORT_A) { 3672 if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ) 3673 pipe_config->port_clock = 162000; 3674 else 3675 pipe_config->port_clock = 270000; 3676 } 3677 3678 pipe_config->hw.adjusted_mode.crtc_clock = 3679 intel_dotclock_calculate(pipe_config->port_clock, 3680 &pipe_config->dp_m_n); 3681 3682 if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp && 3683 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) { 3684 /* 3685 * This is a big fat ugly hack. 3686 * 3687 * Some machines in UEFI boot mode provide us a VBT that has 18 3688 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons 3689 * unknown we fail to light up. Yet the same BIOS boots up with 3690 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as 3691 * max, not what it tells us to use. 3692 * 3693 * Note: This will still be broken if the eDP panel is not lit 3694 * up by the BIOS, and thus we can't get the mode at module 3695 * load. 3696 */ 3697 drm_dbg_kms(&dev_priv->drm, 3698 "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", 3699 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp); 3700 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp; 3701 } 3702} 3703 3704static void intel_disable_dp(struct intel_atomic_state *state, 3705 struct intel_encoder *encoder, 3706 const struct intel_crtc_state *old_crtc_state, 3707 const struct drm_connector_state *old_conn_state) 3708{ 3709 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3710 3711 intel_dp->link_trained = false; 3712 3713 if (old_crtc_state->has_audio) 3714 intel_audio_codec_disable(encoder, 3715 old_crtc_state, old_conn_state); 3716 3717 /* Make sure the panel is off before trying to change the mode. But also 3718 * ensure that we have vdd while we switch off the panel. */ 3719 intel_edp_panel_vdd_on(intel_dp); 3720 intel_edp_backlight_off(old_conn_state); 3721 intel_dp_set_power(intel_dp, DP_SET_POWER_D3); 3722 intel_edp_panel_off(intel_dp); 3723} 3724 3725static void g4x_disable_dp(struct intel_atomic_state *state, 3726 struct intel_encoder *encoder, 3727 const struct intel_crtc_state *old_crtc_state, 3728 const struct drm_connector_state *old_conn_state) 3729{ 3730 intel_disable_dp(state, encoder, old_crtc_state, old_conn_state); 3731} 3732 3733static void vlv_disable_dp(struct intel_atomic_state *state, 3734 struct intel_encoder *encoder, 3735 const struct intel_crtc_state *old_crtc_state, 3736 const struct drm_connector_state *old_conn_state) 3737{ 3738 intel_disable_dp(state, encoder, old_crtc_state, old_conn_state); 3739} 3740 3741static void g4x_post_disable_dp(struct intel_atomic_state *state, 3742 struct intel_encoder *encoder, 3743 const struct intel_crtc_state *old_crtc_state, 3744 const struct drm_connector_state *old_conn_state) 3745{ 3746 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3747 enum port port = encoder->port; 3748 3749 /* 3750 * Bspec does not list a specific disable sequence for g4x DP. 3751 * Follow the ilk+ sequence (disable pipe before the port) for 3752 * g4x DP as it does not suffer from underruns like the normal 3753 * g4x modeset sequence (disable pipe after the port). 3754 */ 3755 intel_dp_link_down(encoder, old_crtc_state); 3756 3757 /* Only ilk+ has port A */ 3758 if (port == PORT_A) 3759 ilk_edp_pll_off(intel_dp, old_crtc_state); 3760} 3761 3762static void vlv_post_disable_dp(struct intel_atomic_state *state, 3763 struct intel_encoder *encoder, 3764 const struct intel_crtc_state *old_crtc_state, 3765 const struct drm_connector_state *old_conn_state) 3766{ 3767 intel_dp_link_down(encoder, old_crtc_state); 3768} 3769 3770static void chv_post_disable_dp(struct intel_atomic_state *state, 3771 struct intel_encoder *encoder, 3772 const struct intel_crtc_state *old_crtc_state, 3773 const struct drm_connector_state *old_conn_state) 3774{ 3775 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3776 3777 intel_dp_link_down(encoder, old_crtc_state); 3778 3779 vlv_dpio_get(dev_priv); 3780 3781 /* Assert data lane reset */ 3782 chv_data_lane_soft_reset(encoder, old_crtc_state, true); 3783 3784 vlv_dpio_put(dev_priv); 3785} 3786 3787static void 3788cpt_set_link_train(struct intel_dp *intel_dp, 3789 u8 dp_train_pat) 3790{ 3791 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3792 u32 *DP = &intel_dp->DP; 3793 3794 *DP &= ~DP_LINK_TRAIN_MASK_CPT; 3795 3796 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 3797 case DP_TRAINING_PATTERN_DISABLE: 3798 *DP |= DP_LINK_TRAIN_OFF_CPT; 3799 break; 3800 case DP_TRAINING_PATTERN_1: 3801 *DP |= DP_LINK_TRAIN_PAT_1_CPT; 3802 break; 3803 case DP_TRAINING_PATTERN_2: 3804 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 3805 break; 3806 case DP_TRAINING_PATTERN_3: 3807 drm_dbg_kms(&dev_priv->drm, 3808 "TPS3 not supported, using TPS2 instead\n"); 3809 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 3810 break; 3811 } 3812 3813 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3814 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3815} 3816 3817static void 3818g4x_set_link_train(struct intel_dp *intel_dp, 3819 u8 dp_train_pat) 3820{ 3821 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3822 u32 *DP = &intel_dp->DP; 3823 3824 *DP &= ~DP_LINK_TRAIN_MASK; 3825 3826 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 3827 case DP_TRAINING_PATTERN_DISABLE: 3828 *DP |= DP_LINK_TRAIN_OFF; 3829 break; 3830 case DP_TRAINING_PATTERN_1: 3831 *DP |= DP_LINK_TRAIN_PAT_1; 3832 break; 3833 case DP_TRAINING_PATTERN_2: 3834 *DP |= DP_LINK_TRAIN_PAT_2; 3835 break; 3836 case DP_TRAINING_PATTERN_3: 3837 drm_dbg_kms(&dev_priv->drm, 3838 "TPS3 not supported, using TPS2 instead\n"); 3839 *DP |= DP_LINK_TRAIN_PAT_2; 3840 break; 3841 } 3842 3843 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3844 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3845} 3846 3847static void intel_dp_enable_port(struct intel_dp *intel_dp, 3848 const struct intel_crtc_state *old_crtc_state) 3849{ 3850 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3851 3852 /* enable with pattern 1 (as per spec) */ 3853 3854 intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1); 3855 3856 /* 3857 * Magic for VLV/CHV. We _must_ first set up the register 3858 * without actually enabling the port, and then do another 3859 * write to enable the port. Otherwise link training will 3860 * fail when the power sequencer is freshly used for this port. 3861 */ 3862 intel_dp->DP |= DP_PORT_EN; 3863 if (old_crtc_state->has_audio) 3864 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 3865 3866 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3867 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3868} 3869 3870void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, 3871 const struct intel_crtc_state *crtc_state) 3872{ 3873 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3874 u8 tmp; 3875 3876 if (intel_dp->dpcd[DP_DPCD_REV] < 0x13) 3877 return; 3878 3879 if (!drm_dp_is_branch(intel_dp->dpcd)) 3880 return; 3881 3882 tmp = intel_dp->has_hdmi_sink ? 3883 DP_HDMI_DVI_OUTPUT_CONFIG : 0; 3884 3885 if (drm_dp_dpcd_writeb(&intel_dp->aux, 3886 DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1) 3887 drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n", 3888 enableddisabled(intel_dp->has_hdmi_sink)); 3889 3890 tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && 3891 intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0; 3892 3893 if (drm_dp_dpcd_writeb(&intel_dp->aux, 3894 DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1) 3895 drm_dbg_kms(&i915->drm, 3896 "Failed to set protocol converter YCbCr 4:2:0 conversion mode to %s\n", 3897 enableddisabled(intel_dp->dfp.ycbcr_444_to_420)); 3898 3899 tmp = 0; 3900 3901 if (drm_dp_dpcd_writeb(&intel_dp->aux, 3902 DP_PROTOCOL_CONVERTER_CONTROL_2, tmp) <= 0) 3903 drm_dbg_kms(&i915->drm, 3904 "Failed to set protocol converter YCbCr 4:2:2 conversion mode to %s\n", 3905 enableddisabled(false)); 3906} 3907 3908static void intel_enable_dp(struct intel_atomic_state *state, 3909 struct intel_encoder *encoder, 3910 const struct intel_crtc_state *pipe_config, 3911 const struct drm_connector_state *conn_state) 3912{ 3913 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3914 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3915 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3916 u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg); 3917 enum pipe pipe = crtc->pipe; 3918 intel_wakeref_t wakeref; 3919 3920 if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN)) 3921 return; 3922 3923 with_pps_lock(intel_dp, wakeref) { 3924 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 3925 vlv_init_panel_power_sequencer(encoder, pipe_config); 3926 3927 intel_dp_enable_port(intel_dp, pipe_config); 3928 3929 edp_panel_vdd_on(intel_dp); 3930 edp_panel_on(intel_dp); 3931 edp_panel_vdd_off(intel_dp, true); 3932 } 3933 3934 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 3935 unsigned int lane_mask = 0x0; 3936 3937 if (IS_CHERRYVIEW(dev_priv)) 3938 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count); 3939 3940 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp), 3941 lane_mask); 3942 } 3943 3944 intel_dp_set_power(intel_dp, DP_SET_POWER_D0); 3945 intel_dp_configure_protocol_converter(intel_dp, pipe_config); 3946 intel_dp_start_link_train(intel_dp); 3947 intel_dp_stop_link_train(intel_dp); 3948 3949 if (pipe_config->has_audio) { 3950 drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n", 3951 pipe_name(pipe)); 3952 intel_audio_codec_enable(encoder, pipe_config, conn_state); 3953 } 3954} 3955 3956static void g4x_enable_dp(struct intel_atomic_state *state, 3957 struct intel_encoder *encoder, 3958 const struct intel_crtc_state *pipe_config, 3959 const struct drm_connector_state *conn_state) 3960{ 3961 intel_enable_dp(state, encoder, pipe_config, conn_state); 3962 intel_edp_backlight_on(pipe_config, conn_state); 3963} 3964 3965static void vlv_enable_dp(struct intel_atomic_state *state, 3966 struct intel_encoder *encoder, 3967 const struct intel_crtc_state *pipe_config, 3968 const struct drm_connector_state *conn_state) 3969{ 3970 intel_edp_backlight_on(pipe_config, conn_state); 3971} 3972 3973static void g4x_pre_enable_dp(struct intel_atomic_state *state, 3974 struct intel_encoder *encoder, 3975 const struct intel_crtc_state *pipe_config, 3976 const struct drm_connector_state *conn_state) 3977{ 3978 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3979 enum port port = encoder->port; 3980 3981 intel_dp_prepare(encoder, pipe_config); 3982 3983 /* Only ilk+ has port A */ 3984 if (port == PORT_A) 3985 ilk_edp_pll_on(intel_dp, pipe_config); 3986} 3987 3988static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) 3989{ 3990 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3991 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 3992 enum pipe pipe = intel_dp->pps_pipe; 3993 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe); 3994 3995 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE); 3996 3997 if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B)) 3998 return; 3999 4000 edp_panel_vdd_off_sync(intel_dp); 4001 4002 /* 4003 * VLV seems to get confused when multiple power sequencers 4004 * have the same port selected (even if only one has power/vdd 4005 * enabled). The failure manifests as vlv_wait_port_ready() failing 4006 * CHV on the other hand doesn't seem to mind having the same port 4007 * selected in multiple power sequencers, but let's clear the 4008 * port select always when logically disconnecting a power sequencer 4009 * from a port. 4010 */ 4011 drm_dbg_kms(&dev_priv->drm, 4012 "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n", 4013 pipe_name(pipe), dig_port->base.base.base.id, 4014 dig_port->base.base.name); 4015 intel_de_write(dev_priv, pp_on_reg, 0); 4016 intel_de_posting_read(dev_priv, pp_on_reg); 4017 4018 intel_dp->pps_pipe = INVALID_PIPE; 4019} 4020 4021static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, 4022 enum pipe pipe) 4023{ 4024 struct intel_encoder *encoder; 4025 4026 lockdep_assert_held(&dev_priv->pps_mutex); 4027 4028 for_each_intel_dp(&dev_priv->drm, encoder) { 4029 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4030 4031 drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe, 4032 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n", 4033 pipe_name(pipe), encoder->base.base.id, 4034 encoder->base.name); 4035 4036 if (intel_dp->pps_pipe != pipe) 4037 continue; 4038 4039 drm_dbg_kms(&dev_priv->drm, 4040 "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n", 4041 pipe_name(pipe), encoder->base.base.id, 4042 encoder->base.name); 4043 4044 /* make sure vdd is off before we steal it */ 4045 vlv_detach_power_sequencer(intel_dp); 4046 } 4047} 4048 4049static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, 4050 const struct intel_crtc_state *crtc_state) 4051{ 4052 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4053 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4054 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4055 4056 lockdep_assert_held(&dev_priv->pps_mutex); 4057 4058 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE); 4059 4060 if (intel_dp->pps_pipe != INVALID_PIPE && 4061 intel_dp->pps_pipe != crtc->pipe) { 4062 /* 4063 * If another power sequencer was being used on this 4064 * port previously make sure to turn off vdd there while 4065 * we still have control of it. 4066 */ 4067 vlv_detach_power_sequencer(intel_dp); 4068 } 4069 4070 /* 4071 * We may be stealing the power 4072 * sequencer from another port. 4073 */ 4074 vlv_steal_power_sequencer(dev_priv, crtc->pipe); 4075 4076 intel_dp->active_pipe = crtc->pipe; 4077 4078 if (!intel_dp_is_edp(intel_dp)) 4079 return; 4080 4081 /* now it's all ours */ 4082 intel_dp->pps_pipe = crtc->pipe; 4083 4084 drm_dbg_kms(&dev_priv->drm, 4085 "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n", 4086 pipe_name(intel_dp->pps_pipe), encoder->base.base.id, 4087 encoder->base.name); 4088 4089 /* init power sequencer on this pipe and port */ 4090 intel_dp_init_panel_power_sequencer(intel_dp); 4091 intel_dp_init_panel_power_sequencer_registers(intel_dp, true); 4092} 4093 4094static void vlv_pre_enable_dp(struct intel_atomic_state *state, 4095 struct intel_encoder *encoder, 4096 const struct intel_crtc_state *pipe_config, 4097 const struct drm_connector_state *conn_state) 4098{ 4099 vlv_phy_pre_encoder_enable(encoder, pipe_config); 4100 4101 intel_enable_dp(state, encoder, pipe_config, conn_state); 4102} 4103 4104static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state, 4105 struct intel_encoder *encoder, 4106 const struct intel_crtc_state *pipe_config, 4107 const struct drm_connector_state *conn_state) 4108{ 4109 intel_dp_prepare(encoder, pipe_config); 4110 4111 vlv_phy_pre_pll_enable(encoder, pipe_config); 4112} 4113 4114static void chv_pre_enable_dp(struct intel_atomic_state *state, 4115 struct intel_encoder *encoder, 4116 const struct intel_crtc_state *pipe_config, 4117 const struct drm_connector_state *conn_state) 4118{ 4119 chv_phy_pre_encoder_enable(encoder, pipe_config); 4120 4121 intel_enable_dp(state, encoder, pipe_config, conn_state); 4122 4123 /* Second common lane will stay alive on its own now */ 4124 chv_phy_release_cl2_override(encoder); 4125} 4126 4127static void chv_dp_pre_pll_enable(struct intel_atomic_state *state, 4128 struct intel_encoder *encoder, 4129 const struct intel_crtc_state *pipe_config, 4130 const struct drm_connector_state *conn_state) 4131{ 4132 intel_dp_prepare(encoder, pipe_config); 4133 4134 chv_phy_pre_pll_enable(encoder, pipe_config); 4135} 4136 4137static void chv_dp_post_pll_disable(struct intel_atomic_state *state, 4138 struct intel_encoder *encoder, 4139 const struct intel_crtc_state *old_crtc_state, 4140 const struct drm_connector_state *old_conn_state) 4141{ 4142 chv_phy_post_pll_disable(encoder, old_crtc_state); 4143} 4144 4145/* 4146 * Fetch AUX CH registers 0x202 - 0x207 which contain 4147 * link status information 4148 */ 4149bool 4150intel_dp_get_link_status(struct intel_dp *intel_dp, u8 *link_status) 4151{ 4152 return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status, 4153 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; 4154} 4155 4156static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp) 4157{ 4158 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 4159} 4160 4161static u8 intel_dp_voltage_max_3(struct intel_dp *intel_dp) 4162{ 4163 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 4164} 4165 4166static u8 intel_dp_pre_empemph_max_2(struct intel_dp *intel_dp) 4167{ 4168 return DP_TRAIN_PRE_EMPH_LEVEL_2; 4169} 4170 4171static u8 intel_dp_pre_empemph_max_3(struct intel_dp *intel_dp) 4172{ 4173 return DP_TRAIN_PRE_EMPH_LEVEL_3; 4174} 4175 4176static void vlv_set_signal_levels(struct intel_dp *intel_dp) 4177{ 4178 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4179 unsigned long demph_reg_value, preemph_reg_value, 4180 uniqtranscale_reg_value; 4181 u8 train_set = intel_dp->train_set[0]; 4182 4183 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4184 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4185 preemph_reg_value = 0x0004000; 4186 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4187 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4188 demph_reg_value = 0x2B405555; 4189 uniqtranscale_reg_value = 0x552AB83A; 4190 break; 4191 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4192 demph_reg_value = 0x2B404040; 4193 uniqtranscale_reg_value = 0x5548B83A; 4194 break; 4195 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4196 demph_reg_value = 0x2B245555; 4197 uniqtranscale_reg_value = 0x5560B83A; 4198 break; 4199 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4200 demph_reg_value = 0x2B405555; 4201 uniqtranscale_reg_value = 0x5598DA3A; 4202 break; 4203 default: 4204 return; 4205 } 4206 break; 4207 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4208 preemph_reg_value = 0x0002000; 4209 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4210 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4211 demph_reg_value = 0x2B404040; 4212 uniqtranscale_reg_value = 0x5552B83A; 4213 break; 4214 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4215 demph_reg_value = 0x2B404848; 4216 uniqtranscale_reg_value = 0x5580B83A; 4217 break; 4218 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4219 demph_reg_value = 0x2B404040; 4220 uniqtranscale_reg_value = 0x55ADDA3A; 4221 break; 4222 default: 4223 return; 4224 } 4225 break; 4226 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4227 preemph_reg_value = 0x0000000; 4228 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4229 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4230 demph_reg_value = 0x2B305555; 4231 uniqtranscale_reg_value = 0x5570B83A; 4232 break; 4233 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4234 demph_reg_value = 0x2B2B4040; 4235 uniqtranscale_reg_value = 0x55ADDA3A; 4236 break; 4237 default: 4238 return; 4239 } 4240 break; 4241 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4242 preemph_reg_value = 0x0006000; 4243 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4244 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4245 demph_reg_value = 0x1B405555; 4246 uniqtranscale_reg_value = 0x55ADDA3A; 4247 break; 4248 default: 4249 return; 4250 } 4251 break; 4252 default: 4253 return; 4254 } 4255 4256 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value, 4257 uniqtranscale_reg_value, 0); 4258} 4259 4260static void chv_set_signal_levels(struct intel_dp *intel_dp) 4261{ 4262 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4263 u32 deemph_reg_value, margin_reg_value; 4264 bool uniq_trans_scale = false; 4265 u8 train_set = intel_dp->train_set[0]; 4266 4267 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4268 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4269 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4270 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4271 deemph_reg_value = 128; 4272 margin_reg_value = 52; 4273 break; 4274 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4275 deemph_reg_value = 128; 4276 margin_reg_value = 77; 4277 break; 4278 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4279 deemph_reg_value = 128; 4280 margin_reg_value = 102; 4281 break; 4282 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4283 deemph_reg_value = 128; 4284 margin_reg_value = 154; 4285 uniq_trans_scale = true; 4286 break; 4287 default: 4288 return; 4289 } 4290 break; 4291 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4292 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4293 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4294 deemph_reg_value = 85; 4295 margin_reg_value = 78; 4296 break; 4297 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4298 deemph_reg_value = 85; 4299 margin_reg_value = 116; 4300 break; 4301 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4302 deemph_reg_value = 85; 4303 margin_reg_value = 154; 4304 break; 4305 default: 4306 return; 4307 } 4308 break; 4309 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4310 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4311 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4312 deemph_reg_value = 64; 4313 margin_reg_value = 104; 4314 break; 4315 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4316 deemph_reg_value = 64; 4317 margin_reg_value = 154; 4318 break; 4319 default: 4320 return; 4321 } 4322 break; 4323 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4324 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4325 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4326 deemph_reg_value = 43; 4327 margin_reg_value = 154; 4328 break; 4329 default: 4330 return; 4331 } 4332 break; 4333 default: 4334 return; 4335 } 4336 4337 chv_set_phy_signal_level(encoder, deemph_reg_value, 4338 margin_reg_value, uniq_trans_scale); 4339} 4340 4341static u32 g4x_signal_levels(u8 train_set) 4342{ 4343 u32 signal_levels = 0; 4344 4345 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4346 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4347 default: 4348 signal_levels |= DP_VOLTAGE_0_4; 4349 break; 4350 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4351 signal_levels |= DP_VOLTAGE_0_6; 4352 break; 4353 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4354 signal_levels |= DP_VOLTAGE_0_8; 4355 break; 4356 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4357 signal_levels |= DP_VOLTAGE_1_2; 4358 break; 4359 } 4360 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4361 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4362 default: 4363 signal_levels |= DP_PRE_EMPHASIS_0; 4364 break; 4365 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4366 signal_levels |= DP_PRE_EMPHASIS_3_5; 4367 break; 4368 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4369 signal_levels |= DP_PRE_EMPHASIS_6; 4370 break; 4371 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4372 signal_levels |= DP_PRE_EMPHASIS_9_5; 4373 break; 4374 } 4375 return signal_levels; 4376} 4377 4378static void 4379g4x_set_signal_levels(struct intel_dp *intel_dp) 4380{ 4381 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4382 u8 train_set = intel_dp->train_set[0]; 4383 u32 signal_levels; 4384 4385 signal_levels = g4x_signal_levels(train_set); 4386 4387 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 4388 signal_levels); 4389 4390 intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK); 4391 intel_dp->DP |= signal_levels; 4392 4393 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4394 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4395} 4396 4397/* SNB CPU eDP voltage swing and pre-emphasis control */ 4398static u32 snb_cpu_edp_signal_levels(u8 train_set) 4399{ 4400 u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 4401 DP_TRAIN_PRE_EMPHASIS_MASK); 4402 4403 switch (signal_levels) { 4404 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4405 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4406 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 4407 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4408 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 4409 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4410 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4411 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 4412 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4413 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4414 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 4415 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4416 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4417 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 4418 default: 4419 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 4420 "0x%x\n", signal_levels); 4421 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 4422 } 4423} 4424 4425static void 4426snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp) 4427{ 4428 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4429 u8 train_set = intel_dp->train_set[0]; 4430 u32 signal_levels; 4431 4432 signal_levels = snb_cpu_edp_signal_levels(train_set); 4433 4434 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 4435 signal_levels); 4436 4437 intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; 4438 intel_dp->DP |= signal_levels; 4439 4440 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4441 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4442} 4443 4444/* IVB CPU eDP voltage swing and pre-emphasis control */ 4445static u32 ivb_cpu_edp_signal_levels(u8 train_set) 4446{ 4447 u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 4448 DP_TRAIN_PRE_EMPHASIS_MASK); 4449 4450 switch (signal_levels) { 4451 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4452 return EDP_LINK_TRAIN_400MV_0DB_IVB; 4453 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4454 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 4455 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4456 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4457 return EDP_LINK_TRAIN_400MV_6DB_IVB; 4458 4459 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4460 return EDP_LINK_TRAIN_600MV_0DB_IVB; 4461 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4462 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 4463 4464 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4465 return EDP_LINK_TRAIN_800MV_0DB_IVB; 4466 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4467 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 4468 4469 default: 4470 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 4471 "0x%x\n", signal_levels); 4472 return EDP_LINK_TRAIN_500MV_0DB_IVB; 4473 } 4474} 4475 4476static void 4477ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp) 4478{ 4479 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4480 u8 train_set = intel_dp->train_set[0]; 4481 u32 signal_levels; 4482 4483 signal_levels = ivb_cpu_edp_signal_levels(train_set); 4484 4485 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 4486 signal_levels); 4487 4488 intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; 4489 intel_dp->DP |= signal_levels; 4490 4491 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4492 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4493} 4494 4495void intel_dp_set_signal_levels(struct intel_dp *intel_dp) 4496{ 4497 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4498 u8 train_set = intel_dp->train_set[0]; 4499 4500 drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n", 4501 train_set & DP_TRAIN_VOLTAGE_SWING_MASK, 4502 train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : ""); 4503 drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n", 4504 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >> 4505 DP_TRAIN_PRE_EMPHASIS_SHIFT, 4506 train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? 4507 " (max)" : ""); 4508 4509 intel_dp->set_signal_levels(intel_dp); 4510} 4511 4512void 4513intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, 4514 u8 dp_train_pat) 4515{ 4516 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4517 u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd); 4518 4519 if (dp_train_pat & train_pat_mask) 4520 drm_dbg_kms(&dev_priv->drm, 4521 "Using DP training pattern TPS%d\n", 4522 dp_train_pat & train_pat_mask); 4523 4524 intel_dp->set_link_train(intel_dp, dp_train_pat); 4525} 4526 4527void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) 4528{ 4529 if (intel_dp->set_idle_link_train) 4530 intel_dp->set_idle_link_train(intel_dp); 4531} 4532 4533static void 4534intel_dp_link_down(struct intel_encoder *encoder, 4535 const struct intel_crtc_state *old_crtc_state) 4536{ 4537 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4538 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4539 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 4540 enum port port = encoder->port; 4541 u32 DP = intel_dp->DP; 4542 4543 if (drm_WARN_ON(&dev_priv->drm, 4544 (intel_de_read(dev_priv, intel_dp->output_reg) & 4545 DP_PORT_EN) == 0)) 4546 return; 4547 4548 drm_dbg_kms(&dev_priv->drm, "\n"); 4549 4550 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || 4551 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { 4552 DP &= ~DP_LINK_TRAIN_MASK_CPT; 4553 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT; 4554 } else { 4555 DP &= ~DP_LINK_TRAIN_MASK; 4556 DP |= DP_LINK_TRAIN_PAT_IDLE; 4557 } 4558 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4559 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4560 4561 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); 4562 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4563 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4564 4565 /* 4566 * HW workaround for IBX, we need to move the port 4567 * to transcoder A after disabling it to allow the 4568 * matching HDMI port to be enabled on transcoder A. 4569 */ 4570 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) { 4571 /* 4572 * We get CPU/PCH FIFO underruns on the other pipe when 4573 * doing the workaround. Sweep them under the rug. 4574 */ 4575 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false); 4576 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); 4577 4578 /* always enable with pattern 1 (as per spec) */ 4579 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK); 4580 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) | 4581 DP_LINK_TRAIN_PAT_1; 4582 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4583 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4584 4585 DP &= ~DP_PORT_EN; 4586 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4587 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4588 4589 intel_wait_for_vblank_if_active(dev_priv, PIPE_A); 4590 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); 4591 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); 4592 } 4593 4594 msleep(intel_dp->panel_power_down_delay); 4595 4596 intel_dp->DP = DP; 4597 4598 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 4599 intel_wakeref_t wakeref; 4600 4601 with_pps_lock(intel_dp, wakeref) 4602 intel_dp->active_pipe = INVALID_PIPE; 4603 } 4604} 4605 4606bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) 4607{ 4608 u8 dprx = 0; 4609 4610 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, 4611 &dprx) != 1) 4612 return false; 4613 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; 4614} 4615 4616static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) 4617{ 4618 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4619 4620 /* 4621 * Clear the cached register set to avoid using stale values 4622 * for the sinks that do not support DSC. 4623 */ 4624 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 4625 4626 /* Clear fec_capable to avoid using stale values */ 4627 intel_dp->fec_capable = 0; 4628 4629 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */ 4630 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 || 4631 intel_dp->edp_dpcd[0] >= DP_EDP_14) { 4632 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT, 4633 intel_dp->dsc_dpcd, 4634 sizeof(intel_dp->dsc_dpcd)) < 0) 4635 drm_err(&i915->drm, 4636 "Failed to read DPCD register 0x%x\n", 4637 DP_DSC_SUPPORT); 4638 4639 drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n", 4640 (int)sizeof(intel_dp->dsc_dpcd), 4641 intel_dp->dsc_dpcd); 4642 4643 /* FEC is supported only on DP 1.4 */ 4644 if (!intel_dp_is_edp(intel_dp) && 4645 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY, 4646 &intel_dp->fec_capable) < 0) 4647 drm_err(&i915->drm, 4648 "Failed to read FEC DPCD register\n"); 4649 4650 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n", 4651 intel_dp->fec_capable); 4652 } 4653} 4654 4655static bool 4656intel_edp_init_dpcd(struct intel_dp *intel_dp) 4657{ 4658 struct drm_i915_private *dev_priv = 4659 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 4660 4661 /* this function is meant to be called only once */ 4662 drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); 4663 4664 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) 4665 return false; 4666 4667 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4668 drm_dp_is_branch(intel_dp->dpcd)); 4669 4670 /* 4671 * Read the eDP display control registers. 4672 * 4673 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in 4674 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it 4675 * set, but require eDP 1.4+ detection (e.g. for supported link rates 4676 * method). The display control registers should read zero if they're 4677 * not supported anyway. 4678 */ 4679 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 4680 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == 4681 sizeof(intel_dp->edp_dpcd)) 4682 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", 4683 (int)sizeof(intel_dp->edp_dpcd), 4684 intel_dp->edp_dpcd); 4685 4686 /* 4687 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks 4688 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] 4689 */ 4690 intel_psr_init_dpcd(intel_dp); 4691 4692 /* Clear the default sink rates */ 4693 intel_dp->num_sink_rates = 0; 4694 4695 /* Read the eDP 1.4+ supported link rates. */ 4696 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { 4697 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 4698 int i; 4699 4700 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 4701 sink_rates, sizeof(sink_rates)); 4702 4703 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { 4704 int val = le16_to_cpu(sink_rates[i]); 4705 4706 if (val == 0) 4707 break; 4708 4709 /* Value read multiplied by 200kHz gives the per-lane 4710 * link rate in kHz. The source rates are, however, 4711 * stored in terms of LS_Clk kHz. The full conversion 4712 * back to symbols is 4713 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) 4714 */ 4715 intel_dp->sink_rates[i] = (val * 200) / 10; 4716 } 4717 intel_dp->num_sink_rates = i; 4718 } 4719 4720 /* 4721 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, 4722 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. 4723 */ 4724 if (intel_dp->num_sink_rates) 4725 intel_dp->use_rate_select = true; 4726 else 4727 intel_dp_set_sink_rates(intel_dp); 4728 4729 intel_dp_set_common_rates(intel_dp); 4730 4731 /* Read the eDP DSC DPCD registers */ 4732 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 4733 intel_dp_get_dsc_sink_cap(intel_dp); 4734 4735 return true; 4736} 4737 4738static bool 4739intel_dp_has_sink_count(struct intel_dp *intel_dp) 4740{ 4741 if (!intel_dp->attached_connector) 4742 return false; 4743 4744 return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base, 4745 intel_dp->dpcd, 4746 &intel_dp->desc); 4747} 4748 4749static bool 4750intel_dp_get_dpcd(struct intel_dp *intel_dp) 4751{ 4752 int ret; 4753 4754 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) 4755 return false; 4756 4757 /* 4758 * Don't clobber cached eDP rates. Also skip re-reading 4759 * the OUI/ID since we know it won't change. 4760 */ 4761 if (!intel_dp_is_edp(intel_dp)) { 4762 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4763 drm_dp_is_branch(intel_dp->dpcd)); 4764 4765 intel_dp_set_sink_rates(intel_dp); 4766 intel_dp_set_common_rates(intel_dp); 4767 } 4768 4769 if (intel_dp_has_sink_count(intel_dp)) { 4770 ret = drm_dp_read_sink_count(&intel_dp->aux); 4771 if (ret < 0) 4772 return false; 4773 4774 /* 4775 * Sink count can change between short pulse hpd hence 4776 * a member variable in intel_dp will track any changes 4777 * between short pulse interrupts. 4778 */ 4779 intel_dp->sink_count = ret; 4780 4781 /* 4782 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that 4783 * a dongle is present but no display. Unless we require to know 4784 * if a dongle is present or not, we don't need to update 4785 * downstream port information. So, an early return here saves 4786 * time from performing other operations which are not required. 4787 */ 4788 if (!intel_dp->sink_count) 4789 return false; 4790 } 4791 4792 return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd, 4793 intel_dp->downstream_ports) == 0; 4794} 4795 4796static bool 4797intel_dp_can_mst(struct intel_dp *intel_dp) 4798{ 4799 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4800 4801 return i915->params.enable_dp_mst && 4802 intel_dp->can_mst && 4803 drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 4804} 4805 4806static void 4807intel_dp_configure_mst(struct intel_dp *intel_dp) 4808{ 4809 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4810 struct intel_encoder *encoder = 4811 &dp_to_dig_port(intel_dp)->base; 4812 bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 4813 4814 drm_dbg_kms(&i915->drm, 4815 "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", 4816 encoder->base.base.id, encoder->base.name, 4817 yesno(intel_dp->can_mst), yesno(sink_can_mst), 4818 yesno(i915->params.enable_dp_mst)); 4819 4820 if (!intel_dp->can_mst) 4821 return; 4822 4823 intel_dp->is_mst = sink_can_mst && 4824 i915->params.enable_dp_mst; 4825 4826 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 4827 intel_dp->is_mst); 4828} 4829 4830static bool 4831intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) 4832{ 4833 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, 4834 sink_irq_vector, DP_DPRX_ESI_LEN) == 4835 DP_DPRX_ESI_LEN; 4836} 4837 4838bool 4839intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, 4840 const struct drm_connector_state *conn_state) 4841{ 4842 /* 4843 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication 4844 * of Color Encoding Format and Content Color Gamut], in order to 4845 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP. 4846 */ 4847 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 4848 return true; 4849 4850 switch (conn_state->colorspace) { 4851 case DRM_MODE_COLORIMETRY_SYCC_601: 4852 case DRM_MODE_COLORIMETRY_OPYCC_601: 4853 case DRM_MODE_COLORIMETRY_BT2020_YCC: 4854 case DRM_MODE_COLORIMETRY_BT2020_RGB: 4855 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 4856 return true; 4857 default: 4858 break; 4859 } 4860 4861 return false; 4862} 4863 4864static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, 4865 struct dp_sdp *sdp, size_t size) 4866{ 4867 size_t length = sizeof(struct dp_sdp); 4868 4869 if (size < length) 4870 return -ENOSPC; 4871 4872 memset(sdp, 0, size); 4873 4874 /* 4875 * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 4876 * VSC SDP Header Bytes 4877 */ 4878 sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */ 4879 sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */ 4880 sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */ 4881 sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */ 4882 4883 /* 4884 * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as 4885 * per DP 1.4a spec. 4886 */ 4887 if (vsc->revision != 0x5) 4888 goto out; 4889 4890 /* VSC SDP Payload for DB16 through DB18 */ 4891 /* Pixel Encoding and Colorimetry Formats */ 4892 sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */ 4893 sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */ 4894 4895 switch (vsc->bpc) { 4896 case 6: 4897 /* 6bpc: 0x0 */ 4898 break; 4899 case 8: 4900 sdp->db[17] = 0x1; /* DB17[3:0] */ 4901 break; 4902 case 10: 4903 sdp->db[17] = 0x2; 4904 break; 4905 case 12: 4906 sdp->db[17] = 0x3; 4907 break; 4908 case 16: 4909 sdp->db[17] = 0x4; 4910 break; 4911 default: 4912 MISSING_CASE(vsc->bpc); 4913 break; 4914 } 4915 /* Dynamic Range and Component Bit Depth */ 4916 if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA) 4917 sdp->db[17] |= 0x80; /* DB17[7] */ 4918 4919 /* Content Type */ 4920 sdp->db[18] = vsc->content_type & 0x7; 4921 4922out: 4923 return length; 4924} 4925 4926static ssize_t 4927intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe, 4928 struct dp_sdp *sdp, 4929 size_t size) 4930{ 4931 size_t length = sizeof(struct dp_sdp); 4932 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; 4933 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; 4934 ssize_t len; 4935 4936 if (size < length) 4937 return -ENOSPC; 4938 4939 memset(sdp, 0, size); 4940 4941 len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf)); 4942 if (len < 0) { 4943 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n"); 4944 return -ENOSPC; 4945 } 4946 4947 if (len != infoframe_size) { 4948 DRM_DEBUG_KMS("wrong static hdr metadata size\n"); 4949 return -ENOSPC; 4950 } 4951 4952 /* 4953 * Set up the infoframe sdp packet for HDR static metadata. 4954 * Prepare VSC Header for SU as per DP 1.4a spec, 4955 * Table 2-100 and Table 2-101 4956 */ 4957 4958 /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */ 4959 sdp->sdp_header.HB0 = 0; 4960 /* 4961 * Packet Type 80h + Non-audio INFOFRAME Type value 4962 * HDMI_INFOFRAME_TYPE_DRM: 0x87 4963 * - 80h + Non-audio INFOFRAME Type value 4964 * - InfoFrame Type: 0x07 4965 * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame] 4966 */ 4967 sdp->sdp_header.HB1 = drm_infoframe->type; 4968 /* 4969 * Least Significant Eight Bits of (Data Byte Count – 1) 4970 * infoframe_size - 1 4971 */ 4972 sdp->sdp_header.HB2 = 0x1D; 4973 /* INFOFRAME SDP Version Number */ 4974 sdp->sdp_header.HB3 = (0x13 << 2); 4975 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 4976 sdp->db[0] = drm_infoframe->version; 4977 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 4978 sdp->db[1] = drm_infoframe->length; 4979 /* 4980 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after 4981 * HDMI_INFOFRAME_HEADER_SIZE 4982 */ 4983 BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2); 4984 memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], 4985 HDMI_DRM_INFOFRAME_SIZE); 4986 4987 /* 4988 * Size of DP infoframe sdp packet for HDR static metadata consists of 4989 * - DP SDP Header(struct dp_sdp_header): 4 bytes 4990 * - Two Data Blocks: 2 bytes 4991 * CTA Header Byte2 (INFOFRAME Version Number) 4992 * CTA Header Byte3 (Length of INFOFRAME) 4993 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes 4994 * 4995 * Prior to GEN11's GMP register size is identical to DP HDR static metadata 4996 * infoframe size. But GEN11+ has larger than that size, write_infoframe 4997 * will pad rest of the size. 4998 */ 4999 return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE; 5000} 5001 5002static void intel_write_dp_sdp(struct intel_encoder *encoder, 5003 const struct intel_crtc_state *crtc_state, 5004 unsigned int type) 5005{ 5006 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5007 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5008 struct dp_sdp sdp = {}; 5009 ssize_t len; 5010 5011 if ((crtc_state->infoframes.enable & 5012 intel_hdmi_infoframe_enable(type)) == 0) 5013 return; 5014 5015 switch (type) { 5016 case DP_SDP_VSC: 5017 len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp, 5018 sizeof(sdp)); 5019 break; 5020 case HDMI_PACKET_TYPE_GAMUT_METADATA: 5021 len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm, 5022 &sdp, sizeof(sdp)); 5023 break; 5024 default: 5025 MISSING_CASE(type); 5026 return; 5027 } 5028 5029 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 5030 return; 5031 5032 dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len); 5033} 5034 5035void intel_write_dp_vsc_sdp(struct intel_encoder *encoder, 5036 const struct intel_crtc_state *crtc_state, 5037 struct drm_dp_vsc_sdp *vsc) 5038{ 5039 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5040 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5041 struct dp_sdp sdp = {}; 5042 ssize_t len; 5043 5044 len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp)); 5045 5046 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 5047 return; 5048 5049 dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC, 5050 &sdp, len); 5051} 5052 5053void intel_dp_set_infoframes(struct intel_encoder *encoder, 5054 bool enable, 5055 const struct intel_crtc_state *crtc_state, 5056 const struct drm_connector_state *conn_state) 5057{ 5058 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5059 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5060 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); 5061 u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | 5062 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | 5063 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK; 5064 u32 val = intel_de_read(dev_priv, reg); 5065 5066 /* TODO: Add DSC case (DIP_ENABLE_PPS) */ 5067 /* When PSR is enabled, this routine doesn't disable VSC DIP */ 5068 if (intel_psr_enabled(intel_dp)) 5069 val &= ~dip_enable; 5070 else 5071 val &= ~(dip_enable | VIDEO_DIP_ENABLE_VSC_HSW); 5072 5073 if (!enable) { 5074 intel_de_write(dev_priv, reg, val); 5075 intel_de_posting_read(dev_priv, reg); 5076 return; 5077 } 5078 5079 intel_de_write(dev_priv, reg, val); 5080 intel_de_posting_read(dev_priv, reg); 5081 5082 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 5083 if (!intel_psr_enabled(intel_dp)) 5084 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC); 5085 5086 intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA); 5087} 5088 5089static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc, 5090 const void *buffer, size_t size) 5091{ 5092 const struct dp_sdp *sdp = buffer; 5093 5094 if (size < sizeof(struct dp_sdp)) 5095 return -EINVAL; 5096 5097 memset(vsc, 0, sizeof(*vsc)); 5098 5099 if (sdp->sdp_header.HB0 != 0) 5100 return -EINVAL; 5101 5102 if (sdp->sdp_header.HB1 != DP_SDP_VSC) 5103 return -EINVAL; 5104 5105 vsc->sdp_type = sdp->sdp_header.HB1; 5106 vsc->revision = sdp->sdp_header.HB2; 5107 vsc->length = sdp->sdp_header.HB3; 5108 5109 if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) || 5110 (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) { 5111 /* 5112 * - HB2 = 0x2, HB3 = 0x8 5113 * VSC SDP supporting 3D stereo + PSR 5114 * - HB2 = 0x4, HB3 = 0xe 5115 * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of 5116 * first scan line of the SU region (applies to eDP v1.4b 5117 * and higher). 5118 */ 5119 return 0; 5120 } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) { 5121 /* 5122 * - HB2 = 0x5, HB3 = 0x13 5123 * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry 5124 * Format. 5125 */ 5126 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf; 5127 vsc->colorimetry = sdp->db[16] & 0xf; 5128 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1; 5129 5130 switch (sdp->db[17] & 0x7) { 5131 case 0x0: 5132 vsc->bpc = 6; 5133 break; 5134 case 0x1: 5135 vsc->bpc = 8; 5136 break; 5137 case 0x2: 5138 vsc->bpc = 10; 5139 break; 5140 case 0x3: 5141 vsc->bpc = 12; 5142 break; 5143 case 0x4: 5144 vsc->bpc = 16; 5145 break; 5146 default: 5147 MISSING_CASE(sdp->db[17] & 0x7); 5148 return -EINVAL; 5149 } 5150 5151 vsc->content_type = sdp->db[18] & 0x7; 5152 } else { 5153 return -EINVAL; 5154 } 5155 5156 return 0; 5157} 5158 5159static int 5160intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe, 5161 const void *buffer, size_t size) 5162{ 5163 int ret; 5164 5165 const struct dp_sdp *sdp = buffer; 5166 5167 if (size < sizeof(struct dp_sdp)) 5168 return -EINVAL; 5169 5170 if (sdp->sdp_header.HB0 != 0) 5171 return -EINVAL; 5172 5173 if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM) 5174 return -EINVAL; 5175 5176 /* 5177 * Least Significant Eight Bits of (Data Byte Count – 1) 5178 * 1Dh (i.e., Data Byte Count = 30 bytes). 5179 */ 5180 if (sdp->sdp_header.HB2 != 0x1D) 5181 return -EINVAL; 5182 5183 /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */ 5184 if ((sdp->sdp_header.HB3 & 0x3) != 0) 5185 return -EINVAL; 5186 5187 /* INFOFRAME SDP Version Number */ 5188 if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13) 5189 return -EINVAL; 5190 5191 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 5192 if (sdp->db[0] != 1) 5193 return -EINVAL; 5194 5195 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 5196 if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE) 5197 return -EINVAL; 5198 5199 ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2], 5200 HDMI_DRM_INFOFRAME_SIZE); 5201 5202 return ret; 5203} 5204 5205static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder, 5206 struct intel_crtc_state *crtc_state, 5207 struct drm_dp_vsc_sdp *vsc) 5208{ 5209 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5210 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5211 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5212 unsigned int type = DP_SDP_VSC; 5213 struct dp_sdp sdp = {}; 5214 int ret; 5215 5216 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 5217 if (intel_psr_enabled(intel_dp)) 5218 return; 5219 5220 if ((crtc_state->infoframes.enable & 5221 intel_hdmi_infoframe_enable(type)) == 0) 5222 return; 5223 5224 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp)); 5225 5226 ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp)); 5227 5228 if (ret) 5229 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n"); 5230} 5231 5232static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder, 5233 struct intel_crtc_state *crtc_state, 5234 struct hdmi_drm_infoframe *drm_infoframe) 5235{ 5236 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5237 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5238 unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA; 5239 struct dp_sdp sdp = {}; 5240 int ret; 5241 5242 if ((crtc_state->infoframes.enable & 5243 intel_hdmi_infoframe_enable(type)) == 0) 5244 return; 5245 5246 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, 5247 sizeof(sdp)); 5248 5249 ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp, 5250 sizeof(sdp)); 5251 5252 if (ret) 5253 drm_dbg_kms(&dev_priv->drm, 5254 "Failed to unpack DP HDR Metadata Infoframe SDP\n"); 5255} 5256 5257void intel_read_dp_sdp(struct intel_encoder *encoder, 5258 struct intel_crtc_state *crtc_state, 5259 unsigned int type) 5260{ 5261 if (encoder->type != INTEL_OUTPUT_DDI) 5262 return; 5263 5264 switch (type) { 5265 case DP_SDP_VSC: 5266 intel_read_dp_vsc_sdp(encoder, crtc_state, 5267 &crtc_state->infoframes.vsc); 5268 break; 5269 case HDMI_PACKET_TYPE_GAMUT_METADATA: 5270 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state, 5271 &crtc_state->infoframes.drm.drm); 5272 break; 5273 default: 5274 MISSING_CASE(type); 5275 break; 5276 } 5277} 5278 5279static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) 5280{ 5281 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5282 int status = 0; 5283 int test_link_rate; 5284 u8 test_lane_count, test_link_bw; 5285 /* (DP CTS 1.2) 5286 * 4.3.1.11 5287 */ 5288 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ 5289 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, 5290 &test_lane_count); 5291 5292 if (status <= 0) { 5293 drm_dbg_kms(&i915->drm, "Lane count read failed\n"); 5294 return DP_TEST_NAK; 5295 } 5296 test_lane_count &= DP_MAX_LANE_COUNT_MASK; 5297 5298 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, 5299 &test_link_bw); 5300 if (status <= 0) { 5301 drm_dbg_kms(&i915->drm, "Link Rate read failed\n"); 5302 return DP_TEST_NAK; 5303 } 5304 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); 5305 5306 /* Validate the requested link rate and lane count */ 5307 if (!intel_dp_link_params_valid(intel_dp, test_link_rate, 5308 test_lane_count)) 5309 return DP_TEST_NAK; 5310 5311 intel_dp->compliance.test_lane_count = test_lane_count; 5312 intel_dp->compliance.test_link_rate = test_link_rate; 5313 5314 return DP_TEST_ACK; 5315} 5316 5317static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) 5318{ 5319 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5320 u8 test_pattern; 5321 u8 test_misc; 5322 __be16 h_width, v_height; 5323 int status = 0; 5324 5325 /* Read the TEST_PATTERN (DP CTS 3.1.5) */ 5326 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, 5327 &test_pattern); 5328 if (status <= 0) { 5329 drm_dbg_kms(&i915->drm, "Test pattern read failed\n"); 5330 return DP_TEST_NAK; 5331 } 5332 if (test_pattern != DP_COLOR_RAMP) 5333 return DP_TEST_NAK; 5334 5335 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, 5336 &h_width, 2); 5337 if (status <= 0) { 5338 drm_dbg_kms(&i915->drm, "H Width read failed\n"); 5339 return DP_TEST_NAK; 5340 } 5341 5342 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, 5343 &v_height, 2); 5344 if (status <= 0) { 5345 drm_dbg_kms(&i915->drm, "V Height read failed\n"); 5346 return DP_TEST_NAK; 5347 } 5348 5349 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, 5350 &test_misc); 5351 if (status <= 0) { 5352 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n"); 5353 return DP_TEST_NAK; 5354 } 5355 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) 5356 return DP_TEST_NAK; 5357 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) 5358 return DP_TEST_NAK; 5359 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { 5360 case DP_TEST_BIT_DEPTH_6: 5361 intel_dp->compliance.test_data.bpc = 6; 5362 break; 5363 case DP_TEST_BIT_DEPTH_8: 5364 intel_dp->compliance.test_data.bpc = 8; 5365 break; 5366 default: 5367 return DP_TEST_NAK; 5368 } 5369 5370 intel_dp->compliance.test_data.video_pattern = test_pattern; 5371 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); 5372 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); 5373 /* Set test active flag here so userspace doesn't interrupt things */ 5374 intel_dp->compliance.test_active = true; 5375 5376 return DP_TEST_ACK; 5377} 5378 5379static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) 5380{ 5381 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5382 u8 test_result = DP_TEST_ACK; 5383 struct intel_connector *intel_connector = intel_dp->attached_connector; 5384 struct drm_connector *connector = &intel_connector->base; 5385 5386 if (intel_connector->detect_edid == NULL || 5387 connector->edid_corrupt || 5388 intel_dp->aux.i2c_defer_count > 6) { 5389 /* Check EDID read for NACKs, DEFERs and corruption 5390 * (DP CTS 1.2 Core r1.1) 5391 * 4.2.2.4 : Failed EDID read, I2C_NAK 5392 * 4.2.2.5 : Failed EDID read, I2C_DEFER 5393 * 4.2.2.6 : EDID corruption detected 5394 * Use failsafe mode for all cases 5395 */ 5396 if (intel_dp->aux.i2c_nack_count > 0 || 5397 intel_dp->aux.i2c_defer_count > 0) 5398 drm_dbg_kms(&i915->drm, 5399 "EDID read had %d NACKs, %d DEFERs\n", 5400 intel_dp->aux.i2c_nack_count, 5401 intel_dp->aux.i2c_defer_count); 5402 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; 5403 } else { 5404 struct edid *block = intel_connector->detect_edid; 5405 5406 /* We have to write the checksum 5407 * of the last block read 5408 */ 5409 block += intel_connector->detect_edid->extensions; 5410 5411 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, 5412 block->checksum) <= 0) 5413 drm_dbg_kms(&i915->drm, 5414 "Failed to write EDID checksum\n"); 5415 5416 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; 5417 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; 5418 } 5419 5420 /* Set test active flag here so userspace doesn't interrupt things */ 5421 intel_dp->compliance.test_active = true; 5422 5423 return test_result; 5424} 5425 5426static u8 intel_dp_prepare_phytest(struct intel_dp *intel_dp) 5427{ 5428 struct drm_dp_phy_test_params *data = 5429 &intel_dp->compliance.test_data.phytest; 5430 5431 if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { 5432 DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n"); 5433 return DP_TEST_NAK; 5434 } 5435 5436 /* 5437 * link_mst is set to false to avoid executing mst related code 5438 * during compliance testing. 5439 */ 5440 intel_dp->link_mst = false; 5441 5442 return DP_TEST_ACK; 5443} 5444 5445static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp) 5446{ 5447 struct drm_i915_private *dev_priv = 5448 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 5449 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5450 struct drm_dp_phy_test_params *data = 5451 &intel_dp->compliance.test_data.phytest; 5452 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 5453 enum pipe pipe = crtc->pipe; 5454 u32 pattern_val; 5455 5456 switch (data->phy_pattern) { 5457 case DP_PHY_TEST_PATTERN_NONE: 5458 DRM_DEBUG_KMS("Disable Phy Test Pattern\n"); 5459 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); 5460 break; 5461 case DP_PHY_TEST_PATTERN_D10_2: 5462 DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n"); 5463 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5464 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); 5465 break; 5466 case DP_PHY_TEST_PATTERN_ERROR_COUNT: 5467 DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n"); 5468 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5469 DDI_DP_COMP_CTL_ENABLE | 5470 DDI_DP_COMP_CTL_SCRAMBLED_0); 5471 break; 5472 case DP_PHY_TEST_PATTERN_PRBS7: 5473 DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n"); 5474 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5475 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); 5476 break; 5477 case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: 5478 /* 5479 * FIXME: Ideally pattern should come from DPCD 0x250. As 5480 * current firmware of DPR-100 could not set it, so hardcoding 5481 * now for complaince test. 5482 */ 5483 DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); 5484 pattern_val = 0x3e0f83e0; 5485 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val); 5486 pattern_val = 0x0f83e0f8; 5487 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val); 5488 pattern_val = 0x0000f83e; 5489 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val); 5490 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5491 DDI_DP_COMP_CTL_ENABLE | 5492 DDI_DP_COMP_CTL_CUSTOM80); 5493 break; 5494 case DP_PHY_TEST_PATTERN_CP2520: 5495 /* 5496 * FIXME: Ideally pattern should come from DPCD 0x24A. As 5497 * current firmware of DPR-100 could not set it, so hardcoding 5498 * now for complaince test. 5499 */ 5500 DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n"); 5501 pattern_val = 0xFB; 5502 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5503 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | 5504 pattern_val); 5505 break; 5506 default: 5507 WARN(1, "Invalid Phy Test Pattern\n"); 5508 } 5509} 5510 5511static void 5512intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp) 5513{ 5514 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5515 struct drm_device *dev = dig_port->base.base.dev; 5516 struct drm_i915_private *dev_priv = to_i915(dev); 5517 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 5518 enum pipe pipe = crtc->pipe; 5519 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 5520 5521 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 5522 TRANS_DDI_FUNC_CTL(pipe)); 5523 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 5524 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 5525 5526 trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE | 5527 TGL_TRANS_DDI_PORT_MASK); 5528 trans_conf_value &= ~PIPECONF_ENABLE; 5529 dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE; 5530 5531 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 5532 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 5533 trans_ddi_func_ctl_value); 5534 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 5535} 5536 5537static void 5538intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, uint8_t lane_cnt) 5539{ 5540 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5541 struct drm_device *dev = dig_port->base.base.dev; 5542 struct drm_i915_private *dev_priv = to_i915(dev); 5543 enum port port = dig_port->base.port; 5544 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 5545 enum pipe pipe = crtc->pipe; 5546 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 5547 5548 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 5549 TRANS_DDI_FUNC_CTL(pipe)); 5550 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 5551 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 5552 5553 trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE | 5554 TGL_TRANS_DDI_SELECT_PORT(port); 5555 trans_conf_value |= PIPECONF_ENABLE; 5556 dp_tp_ctl_value |= DP_TP_CTL_ENABLE; 5557 5558 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 5559 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 5560 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 5561 trans_ddi_func_ctl_value); 5562} 5563 5564void intel_dp_process_phy_request(struct intel_dp *intel_dp) 5565{ 5566 struct drm_dp_phy_test_params *data = 5567 &intel_dp->compliance.test_data.phytest; 5568 u8 link_status[DP_LINK_STATUS_SIZE]; 5569 5570 if (!intel_dp_get_link_status(intel_dp, link_status)) { 5571 DRM_DEBUG_KMS("failed to get link status\n"); 5572 return; 5573 } 5574 5575 /* retrieve vswing & pre-emphasis setting */ 5576 intel_dp_get_adjust_train(intel_dp, link_status); 5577 5578 intel_dp_autotest_phy_ddi_disable(intel_dp); 5579 5580 intel_dp_set_signal_levels(intel_dp); 5581 5582 intel_dp_phy_pattern_update(intel_dp); 5583 5584 intel_dp_autotest_phy_ddi_enable(intel_dp, data->num_lanes); 5585 5586 drm_dp_set_phy_test_pattern(&intel_dp->aux, data, 5587 intel_dp->dpcd[DP_DPCD_REV]); 5588} 5589 5590static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) 5591{ 5592 u8 test_result; 5593 5594 test_result = intel_dp_prepare_phytest(intel_dp); 5595 if (test_result != DP_TEST_ACK) 5596 DRM_ERROR("Phy test preparation failed\n"); 5597 5598 intel_dp_process_phy_request(intel_dp); 5599 5600 return test_result; 5601} 5602 5603static void intel_dp_handle_test_request(struct intel_dp *intel_dp) 5604{ 5605 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5606 u8 response = DP_TEST_NAK; 5607 u8 request = 0; 5608 int status; 5609 5610 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); 5611 if (status <= 0) { 5612 drm_dbg_kms(&i915->drm, 5613 "Could not read test request from sink\n"); 5614 goto update_status; 5615 } 5616 5617 switch (request) { 5618 case DP_TEST_LINK_TRAINING: 5619 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n"); 5620 response = intel_dp_autotest_link_training(intel_dp); 5621 break; 5622 case DP_TEST_LINK_VIDEO_PATTERN: 5623 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n"); 5624 response = intel_dp_autotest_video_pattern(intel_dp); 5625 break; 5626 case DP_TEST_LINK_EDID_READ: 5627 drm_dbg_kms(&i915->drm, "EDID test requested\n"); 5628 response = intel_dp_autotest_edid(intel_dp); 5629 break; 5630 case DP_TEST_LINK_PHY_TEST_PATTERN: 5631 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n"); 5632 response = intel_dp_autotest_phy_pattern(intel_dp); 5633 break; 5634 default: 5635 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n", 5636 request); 5637 break; 5638 } 5639 5640 if (response & DP_TEST_ACK) 5641 intel_dp->compliance.test_type = request; 5642 5643update_status: 5644 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); 5645 if (status <= 0) 5646 drm_dbg_kms(&i915->drm, 5647 "Could not write test response to sink\n"); 5648} 5649 5650/** 5651 * intel_dp_check_mst_status - service any pending MST interrupts, check link status 5652 * @intel_dp: Intel DP struct 5653 * 5654 * Read any pending MST interrupts, call MST core to handle these and ack the 5655 * interrupts. Check if the main and AUX link state is ok. 5656 * 5657 * Returns: 5658 * - %true if pending interrupts were serviced (or no interrupts were 5659 * pending) w/o detecting an error condition. 5660 * - %false if an error condition - like AUX failure or a loss of link - is 5661 * detected, which needs servicing from the hotplug work. 5662 */ 5663static bool 5664intel_dp_check_mst_status(struct intel_dp *intel_dp) 5665{ 5666 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5667 bool link_ok = true; 5668 5669 drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0); 5670 5671 for (;;) { 5672 /* 5673 * The +2 is because DP_DPRX_ESI_LEN is 14, but we then 5674 * pass in "esi+10" to drm_dp_channel_eq_ok(), which 5675 * takes a 6-byte array. So we actually need 16 bytes 5676 * here. 5677 * 5678 * Somebody who knows what the limits actually are 5679 * should check this, but for now this is at least 5680 * harmless and avoids a valid compiler warning about 5681 * using more of the array than we have allocated. 5682 */ 5683 u8 esi[DP_DPRX_ESI_LEN+2] = {}; 5684 bool handled; 5685 int retry; 5686 5687 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) { 5688 drm_dbg_kms(&i915->drm, 5689 "failed to get ESI - device may have failed\n"); 5690 link_ok = false; 5691 5692 break; 5693 } 5694 5695 /* check link status - esi[10] = 0x200c */ 5696 if (intel_dp->active_mst_links > 0 && link_ok && 5697 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { 5698 drm_dbg_kms(&i915->drm, 5699 "channel EQ not ok, retraining\n"); 5700 link_ok = false; 5701 } 5702 5703 drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi); 5704 5705 drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled); 5706 if (!handled) 5707 break; 5708 5709 for (retry = 0; retry < 3; retry++) { 5710 int wret; 5711 5712 wret = drm_dp_dpcd_write(&intel_dp->aux, 5713 DP_SINK_COUNT_ESI+1, 5714 &esi[1], 3); 5715 if (wret == 3) 5716 break; 5717 } 5718 } 5719 5720 return link_ok; 5721} 5722 5723static bool 5724intel_dp_needs_link_retrain(struct intel_dp *intel_dp) 5725{ 5726 u8 link_status[DP_LINK_STATUS_SIZE]; 5727 5728 if (!intel_dp->link_trained) 5729 return false; 5730 5731 /* 5732 * While PSR source HW is enabled, it will control main-link sending 5733 * frames, enabling and disabling it so trying to do a retrain will fail 5734 * as the link would or not be on or it could mix training patterns 5735 * and frame data at the same time causing retrain to fail. 5736 * Also when exiting PSR, HW will retrain the link anyways fixing 5737 * any link status error. 5738 */ 5739 if (intel_psr_enabled(intel_dp)) 5740 return false; 5741 5742 if (!intel_dp_get_link_status(intel_dp, link_status)) 5743 return false; 5744 5745 /* 5746 * Validate the cached values of intel_dp->link_rate and 5747 * intel_dp->lane_count before attempting to retrain. 5748 */ 5749 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, 5750 intel_dp->lane_count)) 5751 return false; 5752 5753 /* Retrain if Channel EQ or CR not ok */ 5754 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 5755} 5756 5757static bool intel_dp_has_connector(struct intel_dp *intel_dp, 5758 const struct drm_connector_state *conn_state) 5759{ 5760 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5761 struct intel_encoder *encoder; 5762 enum pipe pipe; 5763 5764 if (!conn_state->best_encoder) 5765 return false; 5766 5767 /* SST */ 5768 encoder = &dp_to_dig_port(intel_dp)->base; 5769 if (conn_state->best_encoder == &encoder->base) 5770 return true; 5771 5772 /* MST */ 5773 for_each_pipe(i915, pipe) { 5774 encoder = &intel_dp->mst_encoders[pipe]->base; 5775 if (conn_state->best_encoder == &encoder->base) 5776 return true; 5777 } 5778 5779 return false; 5780} 5781 5782static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp, 5783 struct drm_modeset_acquire_ctx *ctx, 5784 u32 *crtc_mask) 5785{ 5786 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5787 struct drm_connector_list_iter conn_iter; 5788 struct intel_connector *connector; 5789 int ret = 0; 5790 5791 *crtc_mask = 0; 5792 5793 if (!intel_dp_needs_link_retrain(intel_dp)) 5794 return 0; 5795 5796 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 5797 for_each_intel_connector_iter(connector, &conn_iter) { 5798 struct drm_connector_state *conn_state = 5799 connector->base.state; 5800 struct intel_crtc_state *crtc_state; 5801 struct intel_crtc *crtc; 5802 5803 if (!intel_dp_has_connector(intel_dp, conn_state)) 5804 continue; 5805 5806 crtc = to_intel_crtc(conn_state->crtc); 5807 if (!crtc) 5808 continue; 5809 5810 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 5811 if (ret) 5812 break; 5813 5814 crtc_state = to_intel_crtc_state(crtc->base.state); 5815 5816 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 5817 5818 if (!crtc_state->hw.active) 5819 continue; 5820 5821 if (conn_state->commit && 5822 !try_wait_for_completion(&conn_state->commit->hw_done)) 5823 continue; 5824 5825 *crtc_mask |= drm_crtc_mask(&crtc->base); 5826 } 5827 drm_connector_list_iter_end(&conn_iter); 5828 5829 if (!intel_dp_needs_link_retrain(intel_dp)) 5830 *crtc_mask = 0; 5831 5832 return ret; 5833} 5834 5835static bool intel_dp_is_connected(struct intel_dp *intel_dp) 5836{ 5837 struct intel_connector *connector = intel_dp->attached_connector; 5838 5839 return connector->base.status == connector_status_connected || 5840 intel_dp->is_mst; 5841} 5842 5843int intel_dp_retrain_link(struct intel_encoder *encoder, 5844 struct drm_modeset_acquire_ctx *ctx) 5845{ 5846 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5847 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5848 struct intel_crtc *crtc; 5849 u32 crtc_mask; 5850 int ret; 5851 5852 if (!intel_dp_is_connected(intel_dp)) 5853 return 0; 5854 5855 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 5856 ctx); 5857 if (ret) 5858 return ret; 5859 5860 ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask); 5861 if (ret) 5862 return ret; 5863 5864 if (crtc_mask == 0) 5865 return 0; 5866 5867 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n", 5868 encoder->base.base.id, encoder->base.name); 5869 5870 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 5871 const struct intel_crtc_state *crtc_state = 5872 to_intel_crtc_state(crtc->base.state); 5873 5874 /* Suppress underruns caused by re-training */ 5875 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 5876 if (crtc_state->has_pch_encoder) 5877 intel_set_pch_fifo_underrun_reporting(dev_priv, 5878 intel_crtc_pch_transcoder(crtc), false); 5879 } 5880 5881 intel_dp_start_link_train(intel_dp); 5882 intel_dp_stop_link_train(intel_dp); 5883 5884 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 5885 const struct intel_crtc_state *crtc_state = 5886 to_intel_crtc_state(crtc->base.state); 5887 5888 /* Keep underrun reporting disabled until things are stable */ 5889 intel_wait_for_vblank(dev_priv, crtc->pipe); 5890 5891 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 5892 if (crtc_state->has_pch_encoder) 5893 intel_set_pch_fifo_underrun_reporting(dev_priv, 5894 intel_crtc_pch_transcoder(crtc), true); 5895 } 5896 5897 return 0; 5898} 5899 5900/* 5901 * If display is now connected check links status, 5902 * there has been known issues of link loss triggering 5903 * long pulse. 5904 * 5905 * Some sinks (eg. ASUS PB287Q) seem to perform some 5906 * weird HPD ping pong during modesets. So we can apparently 5907 * end up with HPD going low during a modeset, and then 5908 * going back up soon after. And once that happens we must 5909 * retrain the link to get a picture. That's in case no 5910 * userspace component reacted to intermittent HPD dip. 5911 */ 5912static enum intel_hotplug_state 5913intel_dp_hotplug(struct intel_encoder *encoder, 5914 struct intel_connector *connector) 5915{ 5916 struct drm_modeset_acquire_ctx ctx; 5917 enum intel_hotplug_state state; 5918 int ret; 5919 5920 state = intel_encoder_hotplug(encoder, connector); 5921 5922 drm_modeset_acquire_init(&ctx, 0); 5923 5924 for (;;) { 5925 ret = intel_dp_retrain_link(encoder, &ctx); 5926 5927 if (ret == -EDEADLK) { 5928 drm_modeset_backoff(&ctx); 5929 continue; 5930 } 5931 5932 break; 5933 } 5934 5935 drm_modeset_drop_locks(&ctx); 5936 drm_modeset_acquire_fini(&ctx); 5937 drm_WARN(encoder->base.dev, ret, 5938 "Acquiring modeset locks failed with %i\n", ret); 5939 5940 /* 5941 * Keeping it consistent with intel_ddi_hotplug() and 5942 * intel_hdmi_hotplug(). 5943 */ 5944 if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries) 5945 state = INTEL_HOTPLUG_RETRY; 5946 5947 return state; 5948} 5949 5950static void intel_dp_check_service_irq(struct intel_dp *intel_dp) 5951{ 5952 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5953 u8 val; 5954 5955 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 5956 return; 5957 5958 if (drm_dp_dpcd_readb(&intel_dp->aux, 5959 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val) 5960 return; 5961 5962 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); 5963 5964 if (val & DP_AUTOMATED_TEST_REQUEST) 5965 intel_dp_handle_test_request(intel_dp); 5966 5967 if (val & DP_CP_IRQ) 5968 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 5969 5970 if (val & DP_SINK_SPECIFIC_IRQ) 5971 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n"); 5972} 5973 5974/* 5975 * According to DP spec 5976 * 5.1.2: 5977 * 1. Read DPCD 5978 * 2. Configure link according to Receiver Capabilities 5979 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 5980 * 4. Check link status on receipt of hot-plug interrupt 5981 * 5982 * intel_dp_short_pulse - handles short pulse interrupts 5983 * when full detection is not required. 5984 * Returns %true if short pulse is handled and full detection 5985 * is NOT required and %false otherwise. 5986 */ 5987static bool 5988intel_dp_short_pulse(struct intel_dp *intel_dp) 5989{ 5990 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 5991 u8 old_sink_count = intel_dp->sink_count; 5992 bool ret; 5993 5994 /* 5995 * Clearing compliance test variables to allow capturing 5996 * of values for next automated test request. 5997 */ 5998 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 5999 6000 /* 6001 * Now read the DPCD to see if it's actually running 6002 * If the current value of sink count doesn't match with 6003 * the value that was stored earlier or dpcd read failed 6004 * we need to do full detection 6005 */ 6006 ret = intel_dp_get_dpcd(intel_dp); 6007 6008 if ((old_sink_count != intel_dp->sink_count) || !ret) { 6009 /* No need to proceed if we are going to do full detect */ 6010 return false; 6011 } 6012 6013 intel_dp_check_service_irq(intel_dp); 6014 6015 /* Handle CEC interrupts, if any */ 6016 drm_dp_cec_irq(&intel_dp->aux); 6017 6018 /* defer to the hotplug work for link retraining if needed */ 6019 if (intel_dp_needs_link_retrain(intel_dp)) 6020 return false; 6021 6022 intel_psr_short_pulse(intel_dp); 6023 6024 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 6025 drm_dbg_kms(&dev_priv->drm, 6026 "Link Training Compliance Test requested\n"); 6027 /* Send a Hotplug Uevent to userspace to start modeset */ 6028 drm_kms_helper_hotplug_event(&dev_priv->drm); 6029 } 6030 6031 return true; 6032} 6033 6034/* XXX this is probably wrong for multiple downstream ports */ 6035static enum drm_connector_status 6036intel_dp_detect_dpcd(struct intel_dp *intel_dp) 6037{ 6038 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 6039 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 6040 u8 *dpcd = intel_dp->dpcd; 6041 u8 type; 6042 6043 if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp))) 6044 return connector_status_connected; 6045 6046 if (lspcon->active) 6047 lspcon_resume(lspcon); 6048 6049 if (!intel_dp_get_dpcd(intel_dp)) 6050 return connector_status_disconnected; 6051 6052 /* if there's no downstream port, we're done */ 6053 if (!drm_dp_is_branch(dpcd)) 6054 return connector_status_connected; 6055 6056 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 6057 if (intel_dp_has_sink_count(intel_dp) && 6058 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 6059 return intel_dp->sink_count ? 6060 connector_status_connected : connector_status_disconnected; 6061 } 6062 6063 if (intel_dp_can_mst(intel_dp)) 6064 return connector_status_connected; 6065 6066 /* If no HPD, poke DDC gently */ 6067 if (drm_probe_ddc(&intel_dp->aux.ddc)) 6068 return connector_status_connected; 6069 6070 /* Well we tried, say unknown for unreliable port types */ 6071 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 6072 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 6073 if (type == DP_DS_PORT_TYPE_VGA || 6074 type == DP_DS_PORT_TYPE_NON_EDID) 6075 return connector_status_unknown; 6076 } else { 6077 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 6078 DP_DWN_STRM_PORT_TYPE_MASK; 6079 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || 6080 type == DP_DWN_STRM_PORT_TYPE_OTHER) 6081 return connector_status_unknown; 6082 } 6083 6084 /* Anything else is out of spec, warn and ignore */ 6085 drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n"); 6086 return connector_status_disconnected; 6087} 6088 6089static enum drm_connector_status 6090edp_detect(struct intel_dp *intel_dp) 6091{ 6092 return connector_status_connected; 6093} 6094 6095static bool ibx_digital_port_connected(struct intel_encoder *encoder) 6096{ 6097 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6098 u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin]; 6099 6100 return intel_de_read(dev_priv, SDEISR) & bit; 6101} 6102 6103static bool g4x_digital_port_connected(struct intel_encoder *encoder) 6104{ 6105 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6106 u32 bit; 6107 6108 switch (encoder->hpd_pin) { 6109 case HPD_PORT_B: 6110 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X; 6111 break; 6112 case HPD_PORT_C: 6113 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X; 6114 break; 6115 case HPD_PORT_D: 6116 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; 6117 break; 6118 default: 6119 MISSING_CASE(encoder->hpd_pin); 6120 return false; 6121 } 6122 6123 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; 6124} 6125 6126static bool gm45_digital_port_connected(struct intel_encoder *encoder) 6127{ 6128 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6129 u32 bit; 6130 6131 switch (encoder->hpd_pin) { 6132 case HPD_PORT_B: 6133 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45; 6134 break; 6135 case HPD_PORT_C: 6136 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45; 6137 break; 6138 case HPD_PORT_D: 6139 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45; 6140 break; 6141 default: 6142 MISSING_CASE(encoder->hpd_pin); 6143 return false; 6144 } 6145 6146 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; 6147} 6148 6149static bool ilk_digital_port_connected(struct intel_encoder *encoder) 6150{ 6151 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6152 u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin]; 6153 6154 return intel_de_read(dev_priv, DEISR) & bit; 6155} 6156 6157/* 6158 * intel_digital_port_connected - is the specified port connected? 6159 * @encoder: intel_encoder 6160 * 6161 * In cases where there's a connector physically connected but it can't be used 6162 * by our hardware we also return false, since the rest of the driver should 6163 * pretty much treat the port as disconnected. This is relevant for type-C 6164 * (starting on ICL) where there's ownership involved. 6165 * 6166 * Return %true if port is connected, %false otherwise. 6167 */ 6168bool intel_digital_port_connected(struct intel_encoder *encoder) 6169{ 6170 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6171 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 6172 bool is_connected = false; 6173 intel_wakeref_t wakeref; 6174 6175 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) 6176 is_connected = dig_port->connected(encoder); 6177 6178 return is_connected; 6179} 6180 6181static struct edid * 6182intel_dp_get_edid(struct intel_dp *intel_dp) 6183{ 6184 struct intel_connector *intel_connector = intel_dp->attached_connector; 6185 6186 /* use cached edid if we have one */ 6187 if (intel_connector->edid) { 6188 /* invalid edid */ 6189 if (IS_ERR(intel_connector->edid)) 6190 return NULL; 6191 6192 return drm_edid_duplicate(intel_connector->edid); 6193 } else 6194 return drm_get_edid(&intel_connector->base, 6195 &intel_dp->aux.ddc); 6196} 6197 6198static void 6199intel_dp_update_dfp(struct intel_dp *intel_dp, 6200 const struct edid *edid) 6201{ 6202 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 6203 struct intel_connector *connector = intel_dp->attached_connector; 6204 6205 intel_dp->dfp.max_bpc = 6206 drm_dp_downstream_max_bpc(intel_dp->dpcd, 6207 intel_dp->downstream_ports, edid); 6208 6209 intel_dp->dfp.max_dotclock = 6210 drm_dp_downstream_max_dotclock(intel_dp->dpcd, 6211 intel_dp->downstream_ports); 6212 6213 intel_dp->dfp.min_tmds_clock = 6214 drm_dp_downstream_min_tmds_clock(intel_dp->dpcd, 6215 intel_dp->downstream_ports, 6216 edid); 6217 intel_dp->dfp.max_tmds_clock = 6218 drm_dp_downstream_max_tmds_clock(intel_dp->dpcd, 6219 intel_dp->downstream_ports, 6220 edid); 6221 6222 drm_dbg_kms(&i915->drm, 6223 "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d\n", 6224 connector->base.base.id, connector->base.name, 6225 intel_dp->dfp.max_bpc, 6226 intel_dp->dfp.max_dotclock, 6227 intel_dp->dfp.min_tmds_clock, 6228 intel_dp->dfp.max_tmds_clock); 6229} 6230 6231static void 6232intel_dp_update_420(struct intel_dp *intel_dp) 6233{ 6234 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 6235 struct intel_connector *connector = intel_dp->attached_connector; 6236 bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420; 6237 6238 /* No YCbCr output support on gmch platforms */ 6239 if (HAS_GMCH(i915)) 6240 return; 6241 6242 /* 6243 * ILK doesn't seem capable of DP YCbCr output. The 6244 * displayed image is severly corrupted. SNB+ is fine. 6245 */ 6246 if (IS_GEN(i915, 5)) 6247 return; 6248 6249 is_branch = drm_dp_is_branch(intel_dp->dpcd); 6250 ycbcr_420_passthrough = 6251 drm_dp_downstream_420_passthrough(intel_dp->dpcd, 6252 intel_dp->downstream_ports); 6253 ycbcr_444_to_420 = 6254 drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd, 6255 intel_dp->downstream_ports); 6256 6257 if (INTEL_GEN(i915) >= 11) { 6258 /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */ 6259 intel_dp->dfp.ycbcr_444_to_420 = 6260 ycbcr_444_to_420 && !ycbcr_420_passthrough; 6261 6262 connector->base.ycbcr_420_allowed = 6263 !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough; 6264 } else { 6265 /* 4:4:4->4:2:0 conversion is the only way */ 6266 intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420; 6267 6268 connector->base.ycbcr_420_allowed = ycbcr_444_to_420; 6269 } 6270 6271 drm_dbg_kms(&i915->drm, 6272 "[CONNECTOR:%d:%s] YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n", 6273 connector->base.base.id, connector->base.name, 6274 yesno(connector->base.ycbcr_420_allowed), 6275 yesno(intel_dp->dfp.ycbcr_444_to_420)); 6276} 6277 6278static void 6279intel_dp_set_edid(struct intel_dp *intel_dp) 6280{ 6281 struct intel_connector *connector = intel_dp->attached_connector; 6282 struct edid *edid; 6283 6284 intel_dp_unset_edid(intel_dp); 6285 edid = intel_dp_get_edid(intel_dp); 6286 connector->detect_edid = edid; 6287 6288 intel_dp_update_dfp(intel_dp, edid); 6289 intel_dp_update_420(intel_dp); 6290 6291 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { 6292 intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid); 6293 intel_dp->has_audio = drm_detect_monitor_audio(edid); 6294 } 6295 6296 drm_dp_cec_set_edid(&intel_dp->aux, edid); 6297 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); 6298} 6299 6300static void 6301intel_dp_unset_edid(struct intel_dp *intel_dp) 6302{ 6303 struct intel_connector *connector = intel_dp->attached_connector; 6304 6305 drm_dp_cec_unset_edid(&intel_dp->aux); 6306 kfree(connector->detect_edid); 6307 connector->detect_edid = NULL; 6308 6309 intel_dp->has_hdmi_sink = false; 6310 intel_dp->has_audio = false; 6311 intel_dp->edid_quirks = 0; 6312 6313 intel_dp->dfp.max_bpc = 0; 6314 intel_dp->dfp.max_dotclock = 0; 6315 intel_dp->dfp.min_tmds_clock = 0; 6316 intel_dp->dfp.max_tmds_clock = 0; 6317 6318 intel_dp->dfp.ycbcr_444_to_420 = false; 6319 connector->base.ycbcr_420_allowed = false; 6320} 6321 6322static int 6323intel_dp_detect(struct drm_connector *connector, 6324 struct drm_modeset_acquire_ctx *ctx, 6325 bool force) 6326{ 6327 struct drm_i915_private *dev_priv = to_i915(connector->dev); 6328 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6329 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6330 struct intel_encoder *encoder = &dig_port->base; 6331 enum drm_connector_status status; 6332 6333 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 6334 connector->base.id, connector->name); 6335 drm_WARN_ON(&dev_priv->drm, 6336 !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 6337 6338 if (!INTEL_DISPLAY_ENABLED(dev_priv)) 6339 return connector_status_disconnected; 6340 6341 /* Can't disconnect eDP */ 6342 if (intel_dp_is_edp(intel_dp)) 6343 status = edp_detect(intel_dp); 6344 else if (intel_digital_port_connected(encoder)) 6345 status = intel_dp_detect_dpcd(intel_dp); 6346 else 6347 status = connector_status_disconnected; 6348 6349 if (status == connector_status_disconnected) { 6350 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 6351 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 6352 6353 if (intel_dp->is_mst) { 6354 drm_dbg_kms(&dev_priv->drm, 6355 "MST device may have disappeared %d vs %d\n", 6356 intel_dp->is_mst, 6357 intel_dp->mst_mgr.mst_state); 6358 intel_dp->is_mst = false; 6359 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 6360 intel_dp->is_mst); 6361 } 6362 6363 goto out; 6364 } 6365 6366 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ 6367 if (INTEL_GEN(dev_priv) >= 11) 6368 intel_dp_get_dsc_sink_cap(intel_dp); 6369 6370 intel_dp_configure_mst(intel_dp); 6371 6372 /* 6373 * TODO: Reset link params when switching to MST mode, until MST 6374 * supports link training fallback params. 6375 */ 6376 if (intel_dp->reset_link_params || intel_dp->is_mst) { 6377 /* Initial max link lane count */ 6378 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 6379 6380 /* Initial max link rate */ 6381 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 6382 6383 intel_dp->reset_link_params = false; 6384 } 6385 6386 intel_dp_print_rates(intel_dp); 6387 6388 if (intel_dp->is_mst) { 6389 /* 6390 * If we are in MST mode then this connector 6391 * won't appear connected or have anything 6392 * with EDID on it 6393 */ 6394 status = connector_status_disconnected; 6395 goto out; 6396 } 6397 6398 /* 6399 * Some external monitors do not signal loss of link synchronization 6400 * with an IRQ_HPD, so force a link status check. 6401 */ 6402 if (!intel_dp_is_edp(intel_dp)) { 6403 int ret; 6404 6405 ret = intel_dp_retrain_link(encoder, ctx); 6406 if (ret) 6407 return ret; 6408 } 6409 6410 /* 6411 * Clearing NACK and defer counts to get their exact values 6412 * while reading EDID which are required by Compliance tests 6413 * 4.2.2.4 and 4.2.2.5 6414 */ 6415 intel_dp->aux.i2c_nack_count = 0; 6416 intel_dp->aux.i2c_defer_count = 0; 6417 6418 intel_dp_set_edid(intel_dp); 6419 if (intel_dp_is_edp(intel_dp) || 6420 to_intel_connector(connector)->detect_edid) 6421 status = connector_status_connected; 6422 6423 intel_dp_check_service_irq(intel_dp); 6424 6425out: 6426 if (status != connector_status_connected && !intel_dp->is_mst) 6427 intel_dp_unset_edid(intel_dp); 6428 6429 /* 6430 * Make sure the refs for power wells enabled during detect are 6431 * dropped to avoid a new detect cycle triggered by HPD polling. 6432 */ 6433 intel_display_power_flush_work(dev_priv); 6434 6435 if (!intel_dp_is_edp(intel_dp)) 6436 drm_dp_set_subconnector_property(connector, 6437 status, 6438 intel_dp->dpcd, 6439 intel_dp->downstream_ports); 6440 return status; 6441} 6442 6443static void 6444intel_dp_force(struct drm_connector *connector) 6445{ 6446 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6447 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6448 struct intel_encoder *intel_encoder = &dig_port->base; 6449 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 6450 enum intel_display_power_domain aux_domain = 6451 intel_aux_power_domain(dig_port); 6452 intel_wakeref_t wakeref; 6453 6454 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 6455 connector->base.id, connector->name); 6456 intel_dp_unset_edid(intel_dp); 6457 6458 if (connector->status != connector_status_connected) 6459 return; 6460 6461 wakeref = intel_display_power_get(dev_priv, aux_domain); 6462 6463 intel_dp_set_edid(intel_dp); 6464 6465 intel_display_power_put(dev_priv, aux_domain, wakeref); 6466} 6467 6468static int intel_dp_get_modes(struct drm_connector *connector) 6469{ 6470 struct intel_connector *intel_connector = to_intel_connector(connector); 6471 struct edid *edid; 6472 6473 edid = intel_connector->detect_edid; 6474 if (edid) { 6475 int ret = intel_connector_update_modes(connector, edid); 6476 if (ret) 6477 return ret; 6478 } 6479 6480 /* if eDP has no EDID, fall back to fixed mode */ 6481 if (intel_dp_is_edp(intel_attached_dp(intel_connector)) && 6482 intel_connector->panel.fixed_mode) { 6483 struct drm_display_mode *mode; 6484 6485 mode = drm_mode_duplicate(connector->dev, 6486 intel_connector->panel.fixed_mode); 6487 if (mode) { 6488 drm_mode_probed_add(connector, mode); 6489 return 1; 6490 } 6491 } 6492 6493 if (!edid) { 6494 struct intel_dp *intel_dp = intel_attached_dp(intel_connector); 6495 struct drm_display_mode *mode; 6496 6497 mode = drm_dp_downstream_mode(connector->dev, 6498 intel_dp->dpcd, 6499 intel_dp->downstream_ports); 6500 if (mode) { 6501 drm_mode_probed_add(connector, mode); 6502 return 1; 6503 } 6504 } 6505 6506 return 0; 6507} 6508 6509static int 6510intel_dp_connector_register(struct drm_connector *connector) 6511{ 6512 struct drm_i915_private *i915 = to_i915(connector->dev); 6513 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6514 int ret; 6515 6516 ret = intel_connector_register(connector); 6517 if (ret) 6518 return ret; 6519 6520 drm_dbg_kms(&i915->drm, "registering %s bus for %s\n", 6521 intel_dp->aux.name, connector->kdev->kobj.name); 6522 6523 intel_dp->aux.dev = connector->kdev; 6524 ret = drm_dp_aux_register(&intel_dp->aux); 6525 if (!ret) 6526 drm_dp_cec_register_connector(&intel_dp->aux, connector); 6527 return ret; 6528} 6529 6530static void 6531intel_dp_connector_unregister(struct drm_connector *connector) 6532{ 6533 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6534 6535 drm_dp_cec_unregister_connector(&intel_dp->aux); 6536 drm_dp_aux_unregister(&intel_dp->aux); 6537 intel_connector_unregister(connector); 6538} 6539 6540void intel_dp_encoder_flush_work(struct drm_encoder *encoder) 6541{ 6542 struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); 6543 struct intel_dp *intel_dp = &dig_port->dp; 6544 6545 intel_dp_mst_encoder_cleanup(dig_port); 6546 if (intel_dp_is_edp(intel_dp)) { 6547 intel_wakeref_t wakeref; 6548 6549 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 6550 /* 6551 * vdd might still be enabled do to the delayed vdd off. 6552 * Make sure vdd is actually turned off here. 6553 */ 6554 with_pps_lock(intel_dp, wakeref) 6555 edp_panel_vdd_off_sync(intel_dp); 6556 6557 if (intel_dp->edp_notifier.notifier_call) { 6558 unregister_reboot_notifier(&intel_dp->edp_notifier); 6559 intel_dp->edp_notifier.notifier_call = NULL; 6560 } 6561 } 6562 6563 intel_dp_aux_fini(intel_dp); 6564} 6565 6566static void intel_dp_encoder_destroy(struct drm_encoder *encoder) 6567{ 6568 intel_dp_encoder_flush_work(encoder); 6569 6570 drm_encoder_cleanup(encoder); 6571 kfree(enc_to_dig_port(to_intel_encoder(encoder))); 6572} 6573 6574void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 6575{ 6576 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 6577 intel_wakeref_t wakeref; 6578 6579 if (!intel_dp_is_edp(intel_dp)) 6580 return; 6581 6582 /* 6583 * vdd might still be enabled do to the delayed vdd off. 6584 * Make sure vdd is actually turned off here. 6585 */ 6586 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 6587 with_pps_lock(intel_dp, wakeref) 6588 edp_panel_vdd_off_sync(intel_dp); 6589} 6590 6591static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) 6592{ 6593 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6594 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6595 6596 lockdep_assert_held(&dev_priv->pps_mutex); 6597 6598 if (!edp_have_panel_vdd(intel_dp)) 6599 return; 6600 6601 /* 6602 * The VDD bit needs a power domain reference, so if the bit is 6603 * already enabled when we boot or resume, grab this reference and 6604 * schedule a vdd off, so we don't hold on to the reference 6605 * indefinitely. 6606 */ 6607 drm_dbg_kms(&dev_priv->drm, 6608 "VDD left on by BIOS, adjusting state tracking\n"); 6609 intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port)); 6610 6611 edp_panel_vdd_schedule_off(intel_dp); 6612} 6613 6614static enum pipe vlv_active_pipe(struct intel_dp *intel_dp) 6615{ 6616 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6617 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 6618 enum pipe pipe; 6619 6620 if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg, 6621 encoder->port, &pipe)) 6622 return pipe; 6623 6624 return INVALID_PIPE; 6625} 6626 6627void intel_dp_encoder_reset(struct drm_encoder *encoder) 6628{ 6629 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 6630 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder)); 6631 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 6632 intel_wakeref_t wakeref; 6633 6634 if (!HAS_DDI(dev_priv)) 6635 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 6636 6637 if (lspcon->active) 6638 lspcon_resume(lspcon); 6639 6640 intel_dp->reset_link_params = true; 6641 6642 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 6643 !intel_dp_is_edp(intel_dp)) 6644 return; 6645 6646 with_pps_lock(intel_dp, wakeref) { 6647 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 6648 intel_dp->active_pipe = vlv_active_pipe(intel_dp); 6649 6650 if (intel_dp_is_edp(intel_dp)) { 6651 /* 6652 * Reinit the power sequencer, in case BIOS did 6653 * something nasty with it. 6654 */ 6655 intel_dp_pps_init(intel_dp); 6656 intel_edp_panel_vdd_sanitize(intel_dp); 6657 } 6658 } 6659} 6660 6661static int intel_modeset_tile_group(struct intel_atomic_state *state, 6662 int tile_group_id) 6663{ 6664 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6665 struct drm_connector_list_iter conn_iter; 6666 struct drm_connector *connector; 6667 int ret = 0; 6668 6669 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 6670 drm_for_each_connector_iter(connector, &conn_iter) { 6671 struct drm_connector_state *conn_state; 6672 struct intel_crtc_state *crtc_state; 6673 struct intel_crtc *crtc; 6674 6675 if (!connector->has_tile || 6676 connector->tile_group->id != tile_group_id) 6677 continue; 6678 6679 conn_state = drm_atomic_get_connector_state(&state->base, 6680 connector); 6681 if (IS_ERR(conn_state)) { 6682 ret = PTR_ERR(conn_state); 6683 break; 6684 } 6685 6686 crtc = to_intel_crtc(conn_state->crtc); 6687 6688 if (!crtc) 6689 continue; 6690 6691 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 6692 crtc_state->uapi.mode_changed = true; 6693 6694 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 6695 if (ret) 6696 break; 6697 } 6698 drm_connector_list_iter_end(&conn_iter); 6699 6700 return ret; 6701} 6702 6703static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders) 6704{ 6705 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6706 struct intel_crtc *crtc; 6707 6708 if (transcoders == 0) 6709 return 0; 6710 6711 for_each_intel_crtc(&dev_priv->drm, crtc) { 6712 struct intel_crtc_state *crtc_state; 6713 int ret; 6714 6715 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 6716 if (IS_ERR(crtc_state)) 6717 return PTR_ERR(crtc_state); 6718 6719 if (!crtc_state->hw.enable) 6720 continue; 6721 6722 if (!(transcoders & BIT(crtc_state->cpu_transcoder))) 6723 continue; 6724 6725 crtc_state->uapi.mode_changed = true; 6726 6727 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 6728 if (ret) 6729 return ret; 6730 6731 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 6732 if (ret) 6733 return ret; 6734 6735 transcoders &= ~BIT(crtc_state->cpu_transcoder); 6736 } 6737 6738 drm_WARN_ON(&dev_priv->drm, transcoders != 0); 6739 6740 return 0; 6741} 6742 6743static int intel_modeset_synced_crtcs(struct intel_atomic_state *state, 6744 struct drm_connector *connector) 6745{ 6746 const struct drm_connector_state *old_conn_state = 6747 drm_atomic_get_old_connector_state(&state->base, connector); 6748 const struct intel_crtc_state *old_crtc_state; 6749 struct intel_crtc *crtc; 6750 u8 transcoders; 6751 6752 crtc = to_intel_crtc(old_conn_state->crtc); 6753 if (!crtc) 6754 return 0; 6755 6756 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 6757 6758 if (!old_crtc_state->hw.active) 6759 return 0; 6760 6761 transcoders = old_crtc_state->sync_mode_slaves_mask; 6762 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER) 6763 transcoders |= BIT(old_crtc_state->master_transcoder); 6764 6765 return intel_modeset_affected_transcoders(state, 6766 transcoders); 6767} 6768 6769static int intel_dp_connector_atomic_check(struct drm_connector *conn, 6770 struct drm_atomic_state *_state) 6771{ 6772 struct drm_i915_private *dev_priv = to_i915(conn->dev); 6773 struct intel_atomic_state *state = to_intel_atomic_state(_state); 6774 int ret; 6775 6776 ret = intel_digital_connector_atomic_check(conn, &state->base); 6777 if (ret) 6778 return ret; 6779 6780 /* 6781 * We don't enable port sync on BDW due to missing w/as and 6782 * due to not having adjusted the modeset sequence appropriately. 6783 */ 6784 if (INTEL_GEN(dev_priv) < 9) 6785 return 0; 6786 6787 if (!intel_connector_needs_modeset(state, conn)) 6788 return 0; 6789 6790 if (conn->has_tile) { 6791 ret = intel_modeset_tile_group(state, conn->tile_group->id); 6792 if (ret) 6793 return ret; 6794 } 6795 6796 return intel_modeset_synced_crtcs(state, conn); 6797} 6798 6799static const struct drm_connector_funcs intel_dp_connector_funcs = { 6800 .force = intel_dp_force, 6801 .fill_modes = drm_helper_probe_single_connector_modes, 6802 .atomic_get_property = intel_digital_connector_atomic_get_property, 6803 .atomic_set_property = intel_digital_connector_atomic_set_property, 6804 .late_register = intel_dp_connector_register, 6805 .early_unregister = intel_dp_connector_unregister, 6806 .destroy = intel_connector_destroy, 6807 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 6808 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 6809}; 6810 6811static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 6812 .detect_ctx = intel_dp_detect, 6813 .get_modes = intel_dp_get_modes, 6814 .mode_valid = intel_dp_mode_valid, 6815 .atomic_check = intel_dp_connector_atomic_check, 6816}; 6817 6818static const struct drm_encoder_funcs intel_dp_enc_funcs = { 6819 .reset = intel_dp_encoder_reset, 6820 .destroy = intel_dp_encoder_destroy, 6821}; 6822 6823static bool intel_edp_have_power(struct intel_dp *intel_dp) 6824{ 6825 intel_wakeref_t wakeref; 6826 bool have_power = false; 6827 6828 with_pps_lock(intel_dp, wakeref) { 6829 have_power = edp_have_panel_power(intel_dp) && 6830 edp_have_panel_vdd(intel_dp); 6831 } 6832 6833 return have_power; 6834} 6835 6836enum irqreturn 6837intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) 6838{ 6839 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6840 struct intel_dp *intel_dp = &dig_port->dp; 6841 6842 if (dig_port->base.type == INTEL_OUTPUT_EDP && 6843 (long_hpd || !intel_edp_have_power(intel_dp))) { 6844 /* 6845 * vdd off can generate a long/short pulse on eDP which 6846 * would require vdd on to handle it, and thus we 6847 * would end up in an endless cycle of 6848 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..." 6849 */ 6850 drm_dbg_kms(&i915->drm, 6851 "ignoring %s hpd on eDP [ENCODER:%d:%s]\n", 6852 long_hpd ? "long" : "short", 6853 dig_port->base.base.base.id, 6854 dig_port->base.base.name); 6855 return IRQ_HANDLED; 6856 } 6857 6858 drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n", 6859 dig_port->base.base.base.id, 6860 dig_port->base.base.name, 6861 long_hpd ? "long" : "short"); 6862 6863 if (long_hpd) { 6864 intel_dp->reset_link_params = true; 6865 return IRQ_NONE; 6866 } 6867 6868 if (intel_dp->is_mst) { 6869 if (!intel_dp_check_mst_status(intel_dp)) 6870 return IRQ_NONE; 6871 } else if (!intel_dp_short_pulse(intel_dp)) { 6872 return IRQ_NONE; 6873 } 6874 6875 return IRQ_HANDLED; 6876} 6877 6878/* check the VBT to see whether the eDP is on another port */ 6879bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port) 6880{ 6881 /* 6882 * eDP not supported on g4x. so bail out early just 6883 * for a bit extra safety in case the VBT is bonkers. 6884 */ 6885 if (INTEL_GEN(dev_priv) < 5) 6886 return false; 6887 6888 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A) 6889 return true; 6890 6891 return intel_bios_is_port_edp(dev_priv, port); 6892} 6893 6894static void 6895intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 6896{ 6897 struct drm_i915_private *dev_priv = to_i915(connector->dev); 6898 enum port port = dp_to_dig_port(intel_dp)->base.port; 6899 6900 if (!intel_dp_is_edp(intel_dp)) 6901 drm_connector_attach_dp_subconnector_property(connector); 6902 6903 if (!IS_G4X(dev_priv) && port != PORT_A) 6904 intel_attach_force_audio_property(connector); 6905 6906 intel_attach_broadcast_rgb_property(connector); 6907 if (HAS_GMCH(dev_priv)) 6908 drm_connector_attach_max_bpc_property(connector, 6, 10); 6909 else if (INTEL_GEN(dev_priv) >= 5) 6910 drm_connector_attach_max_bpc_property(connector, 6, 12); 6911 6912 intel_attach_colorspace_property(connector); 6913 6914 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11) 6915 drm_object_attach_property(&connector->base, 6916 connector->dev->mode_config.hdr_output_metadata_property, 6917 0); 6918 6919 if (intel_dp_is_edp(intel_dp)) { 6920 u32 allowed_scalers; 6921 6922 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); 6923 if (!HAS_GMCH(dev_priv)) 6924 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER); 6925 6926 drm_connector_attach_scaling_mode_property(connector, allowed_scalers); 6927 6928 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT; 6929 6930 } 6931} 6932 6933static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) 6934{ 6935 intel_dp->panel_power_off_time = ktime_get_boottime(); 6936 intel_dp->last_power_on = jiffies; 6937 intel_dp->last_backlight_off = jiffies; 6938} 6939 6940static void 6941intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) 6942{ 6943 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6944 u32 pp_on, pp_off, pp_ctl; 6945 struct pps_registers regs; 6946 6947 intel_pps_get_registers(intel_dp, ®s); 6948 6949 pp_ctl = ilk_get_pp_control(intel_dp); 6950 6951 /* Ensure PPS is unlocked */ 6952 if (!HAS_DDI(dev_priv)) 6953 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); 6954 6955 pp_on = intel_de_read(dev_priv, regs.pp_on); 6956 pp_off = intel_de_read(dev_priv, regs.pp_off); 6957 6958 /* Pull timing values out of registers */ 6959 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on); 6960 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on); 6961 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off); 6962 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off); 6963 6964 if (i915_mmio_reg_valid(regs.pp_div)) { 6965 u32 pp_div; 6966 6967 pp_div = intel_de_read(dev_priv, regs.pp_div); 6968 6969 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000; 6970 } else { 6971 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000; 6972 } 6973} 6974 6975static void 6976intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq) 6977{ 6978 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 6979 state_name, 6980 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12); 6981} 6982 6983static void 6984intel_pps_verify_state(struct intel_dp *intel_dp) 6985{ 6986 struct edp_power_seq hw; 6987 struct edp_power_seq *sw = &intel_dp->pps_delays; 6988 6989 intel_pps_readout_hw_state(intel_dp, &hw); 6990 6991 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 || 6992 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) { 6993 DRM_ERROR("PPS state mismatch\n"); 6994 intel_pps_dump_state("sw", sw); 6995 intel_pps_dump_state("hw", &hw); 6996 } 6997} 6998 6999static void 7000intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp) 7001{ 7002 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7003 struct edp_power_seq cur, vbt, spec, 7004 *final = &intel_dp->pps_delays; 7005 7006 lockdep_assert_held(&dev_priv->pps_mutex); 7007 7008 /* already initialized? */ 7009 if (final->t11_t12 != 0) 7010 return; 7011 7012 intel_pps_readout_hw_state(intel_dp, &cur); 7013 7014 intel_pps_dump_state("cur", &cur); 7015 7016 vbt = dev_priv->vbt.edp.pps; 7017 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay 7018 * of 500ms appears to be too short. Ocassionally the panel 7019 * just fails to power back on. Increasing the delay to 800ms 7020 * seems sufficient to avoid this problem. 7021 */ 7022 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { 7023 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10); 7024 drm_dbg_kms(&dev_priv->drm, 7025 "Increasing T12 panel delay as per the quirk to %d\n", 7026 vbt.t11_t12); 7027 } 7028 /* T11_T12 delay is special and actually in units of 100ms, but zero 7029 * based in the hw (so we need to add 100 ms). But the sw vbt 7030 * table multiplies it with 1000 to make it in units of 100usec, 7031 * too. */ 7032 vbt.t11_t12 += 100 * 10; 7033 7034 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of 7035 * our hw here, which are all in 100usec. */ 7036 spec.t1_t3 = 210 * 10; 7037 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ 7038 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ 7039 spec.t10 = 500 * 10; 7040 /* This one is special and actually in units of 100ms, but zero 7041 * based in the hw (so we need to add 100 ms). But the sw vbt 7042 * table multiplies it with 1000 to make it in units of 100usec, 7043 * too. */ 7044 spec.t11_t12 = (510 + 100) * 10; 7045 7046 intel_pps_dump_state("vbt", &vbt); 7047 7048 /* Use the max of the register settings and vbt. If both are 7049 * unset, fall back to the spec limits. */ 7050#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \ 7051 spec.field : \ 7052 max(cur.field, vbt.field)) 7053 assign_final(t1_t3); 7054 assign_final(t8); 7055 assign_final(t9); 7056 assign_final(t10); 7057 assign_final(t11_t12); 7058#undef assign_final 7059 7060#define get_delay(field) (DIV_ROUND_UP(final->field, 10)) 7061 intel_dp->panel_power_up_delay = get_delay(t1_t3); 7062 intel_dp->backlight_on_delay = get_delay(t8); 7063 intel_dp->backlight_off_delay = get_delay(t9); 7064 intel_dp->panel_power_down_delay = get_delay(t10); 7065 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 7066#undef get_delay 7067 7068 drm_dbg_kms(&dev_priv->drm, 7069 "panel power up delay %d, power down delay %d, power cycle delay %d\n", 7070 intel_dp->panel_power_up_delay, 7071 intel_dp->panel_power_down_delay, 7072 intel_dp->panel_power_cycle_delay); 7073 7074 drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n", 7075 intel_dp->backlight_on_delay, 7076 intel_dp->backlight_off_delay); 7077 7078 /* 7079 * We override the HW backlight delays to 1 because we do manual waits 7080 * on them. For T8, even BSpec recommends doing it. For T9, if we 7081 * don't do this, we'll end up waiting for the backlight off delay 7082 * twice: once when we do the manual sleep, and once when we disable 7083 * the panel and wait for the PP_STATUS bit to become zero. 7084 */ 7085 final->t8 = 1; 7086 final->t9 = 1; 7087 7088 /* 7089 * HW has only a 100msec granularity for t11_t12 so round it up 7090 * accordingly. 7091 */ 7092 final->t11_t12 = roundup(final->t11_t12, 100 * 10); 7093} 7094 7095static void 7096intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, 7097 bool force_disable_vdd) 7098{ 7099 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7100 u32 pp_on, pp_off, port_sel = 0; 7101 int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000; 7102 struct pps_registers regs; 7103 enum port port = dp_to_dig_port(intel_dp)->base.port; 7104 const struct edp_power_seq *seq = &intel_dp->pps_delays; 7105 7106 lockdep_assert_held(&dev_priv->pps_mutex); 7107 7108 intel_pps_get_registers(intel_dp, ®s); 7109 7110 /* 7111 * On some VLV machines the BIOS can leave the VDD 7112 * enabled even on power sequencers which aren't 7113 * hooked up to any port. This would mess up the 7114 * power domain tracking the first time we pick 7115 * one of these power sequencers for use since 7116 * edp_panel_vdd_on() would notice that the VDD was 7117 * already on and therefore wouldn't grab the power 7118 * domain reference. Disable VDD first to avoid this. 7119 * This also avoids spuriously turning the VDD on as 7120 * soon as the new power sequencer gets initialized. 7121 */ 7122 if (force_disable_vdd) { 7123 u32 pp = ilk_get_pp_control(intel_dp); 7124 7125 drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON, 7126 "Panel power already on\n"); 7127 7128 if (pp & EDP_FORCE_VDD) 7129 drm_dbg_kms(&dev_priv->drm, 7130 "VDD already on, disabling first\n"); 7131 7132 pp &= ~EDP_FORCE_VDD; 7133 7134 intel_de_write(dev_priv, regs.pp_ctrl, pp); 7135 } 7136 7137 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) | 7138 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8); 7139 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) | 7140 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10); 7141 7142 /* Haswell doesn't have any port selection bits for the panel 7143 * power sequencer any more. */ 7144 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7145 port_sel = PANEL_PORT_SELECT_VLV(port); 7146 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 7147 switch (port) { 7148 case PORT_A: 7149 port_sel = PANEL_PORT_SELECT_DPA; 7150 break; 7151 case PORT_C: 7152 port_sel = PANEL_PORT_SELECT_DPC; 7153 break; 7154 case PORT_D: 7155 port_sel = PANEL_PORT_SELECT_DPD; 7156 break; 7157 default: 7158 MISSING_CASE(port); 7159 break; 7160 } 7161 } 7162 7163 pp_on |= port_sel; 7164 7165 intel_de_write(dev_priv, regs.pp_on, pp_on); 7166 intel_de_write(dev_priv, regs.pp_off, pp_off); 7167 7168 /* 7169 * Compute the divisor for the pp clock, simply match the Bspec formula. 7170 */ 7171 if (i915_mmio_reg_valid(regs.pp_div)) { 7172 intel_de_write(dev_priv, regs.pp_div, 7173 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000))); 7174 } else { 7175 u32 pp_ctl; 7176 7177 pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl); 7178 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK; 7179 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)); 7180 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); 7181 } 7182 7183 drm_dbg_kms(&dev_priv->drm, 7184 "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 7185 intel_de_read(dev_priv, regs.pp_on), 7186 intel_de_read(dev_priv, regs.pp_off), 7187 i915_mmio_reg_valid(regs.pp_div) ? 7188 intel_de_read(dev_priv, regs.pp_div) : 7189 (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK)); 7190} 7191 7192static void intel_dp_pps_init(struct intel_dp *intel_dp) 7193{ 7194 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7195 7196 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7197 vlv_initial_power_sequencer_setup(intel_dp); 7198 } else { 7199 intel_dp_init_panel_power_sequencer(intel_dp); 7200 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 7201 } 7202} 7203 7204/** 7205 * intel_dp_set_drrs_state - program registers for RR switch to take effect 7206 * @dev_priv: i915 device 7207 * @crtc_state: a pointer to the active intel_crtc_state 7208 * @refresh_rate: RR to be programmed 7209 * 7210 * This function gets called when refresh rate (RR) has to be changed from 7211 * one frequency to another. Switches can be between high and low RR 7212 * supported by the panel or to any other RR based on media playback (in 7213 * this case, RR value needs to be passed from user space). 7214 * 7215 * The caller of this function needs to take a lock on dev_priv->drrs. 7216 */ 7217static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, 7218 const struct intel_crtc_state *crtc_state, 7219 int refresh_rate) 7220{ 7221 struct intel_dp *intel_dp = dev_priv->drrs.dp; 7222 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); 7223 enum drrs_refresh_rate_type index = DRRS_HIGH_RR; 7224 7225 if (refresh_rate <= 0) { 7226 drm_dbg_kms(&dev_priv->drm, 7227 "Refresh rate should be positive non-zero.\n"); 7228 return; 7229 } 7230 7231 if (intel_dp == NULL) { 7232 drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n"); 7233 return; 7234 } 7235 7236 if (!intel_crtc) { 7237 drm_dbg_kms(&dev_priv->drm, 7238 "DRRS: intel_crtc not initialized\n"); 7239 return; 7240 } 7241 7242 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) { 7243 drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n"); 7244 return; 7245 } 7246 7247 if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) == 7248 refresh_rate) 7249 index = DRRS_LOW_RR; 7250 7251 if (index == dev_priv->drrs.refresh_rate_type) { 7252 drm_dbg_kms(&dev_priv->drm, 7253 "DRRS requested for previously set RR...ignoring\n"); 7254 return; 7255 } 7256 7257 if (!crtc_state->hw.active) { 7258 drm_dbg_kms(&dev_priv->drm, 7259 "eDP encoder disabled. CRTC not Active\n"); 7260 return; 7261 } 7262 7263 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { 7264 switch (index) { 7265 case DRRS_HIGH_RR: 7266 intel_dp_set_m_n(crtc_state, M1_N1); 7267 break; 7268 case DRRS_LOW_RR: 7269 intel_dp_set_m_n(crtc_state, M2_N2); 7270 break; 7271 case DRRS_MAX_RR: 7272 default: 7273 drm_err(&dev_priv->drm, 7274 "Unsupported refreshrate type\n"); 7275 } 7276 } else if (INTEL_GEN(dev_priv) > 6) { 7277 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder); 7278 u32 val; 7279 7280 val = intel_de_read(dev_priv, reg); 7281 if (index > DRRS_HIGH_RR) { 7282 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7283 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; 7284 else 7285 val |= PIPECONF_EDP_RR_MODE_SWITCH; 7286 } else { 7287 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7288 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; 7289 else 7290 val &= ~PIPECONF_EDP_RR_MODE_SWITCH; 7291 } 7292 intel_de_write(dev_priv, reg, val); 7293 } 7294 7295 dev_priv->drrs.refresh_rate_type = index; 7296 7297 drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n", 7298 refresh_rate); 7299} 7300 7301static void 7302intel_edp_drrs_enable_locked(struct intel_dp *intel_dp) 7303{ 7304 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7305 7306 dev_priv->drrs.busy_frontbuffer_bits = 0; 7307 dev_priv->drrs.dp = intel_dp; 7308} 7309 7310/** 7311 * intel_edp_drrs_enable - init drrs struct if supported 7312 * @intel_dp: DP struct 7313 * @crtc_state: A pointer to the active crtc state. 7314 * 7315 * Initializes frontbuffer_bits and drrs.dp 7316 */ 7317void intel_edp_drrs_enable(struct intel_dp *intel_dp, 7318 const struct intel_crtc_state *crtc_state) 7319{ 7320 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7321 7322 if (!crtc_state->has_drrs) 7323 return; 7324 7325 drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n"); 7326 7327 mutex_lock(&dev_priv->drrs.mutex); 7328 7329 if (dev_priv->drrs.dp) { 7330 drm_warn(&dev_priv->drm, "DRRS already enabled\n"); 7331 goto unlock; 7332 } 7333 7334 intel_edp_drrs_enable_locked(intel_dp); 7335 7336unlock: 7337 mutex_unlock(&dev_priv->drrs.mutex); 7338} 7339 7340static void 7341intel_edp_drrs_disable_locked(struct intel_dp *intel_dp, 7342 const struct intel_crtc_state *crtc_state) 7343{ 7344 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7345 7346 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) { 7347 int refresh; 7348 7349 refresh = drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode); 7350 intel_dp_set_drrs_state(dev_priv, crtc_state, refresh); 7351 } 7352 7353 dev_priv->drrs.dp = NULL; 7354} 7355 7356/** 7357 * intel_edp_drrs_disable - Disable DRRS 7358 * @intel_dp: DP struct 7359 * @old_crtc_state: Pointer to old crtc_state. 7360 * 7361 */ 7362void intel_edp_drrs_disable(struct intel_dp *intel_dp, 7363 const struct intel_crtc_state *old_crtc_state) 7364{ 7365 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7366 7367 if (!old_crtc_state->has_drrs) 7368 return; 7369 7370 mutex_lock(&dev_priv->drrs.mutex); 7371 if (!dev_priv->drrs.dp) { 7372 mutex_unlock(&dev_priv->drrs.mutex); 7373 return; 7374 } 7375 7376 intel_edp_drrs_disable_locked(intel_dp, old_crtc_state); 7377 mutex_unlock(&dev_priv->drrs.mutex); 7378 7379 cancel_delayed_work_sync(&dev_priv->drrs.work); 7380} 7381 7382/** 7383 * intel_edp_drrs_update - Update DRRS state 7384 * @intel_dp: Intel DP 7385 * @crtc_state: new CRTC state 7386 * 7387 * This function will update DRRS states, disabling or enabling DRRS when 7388 * executing fastsets. For full modeset, intel_edp_drrs_disable() and 7389 * intel_edp_drrs_enable() should be called instead. 7390 */ 7391void 7392intel_edp_drrs_update(struct intel_dp *intel_dp, 7393 const struct intel_crtc_state *crtc_state) 7394{ 7395 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7396 7397 if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) 7398 return; 7399 7400 mutex_lock(&dev_priv->drrs.mutex); 7401 7402 /* New state matches current one? */ 7403 if (crtc_state->has_drrs == !!dev_priv->drrs.dp) 7404 goto unlock; 7405 7406 if (crtc_state->has_drrs) 7407 intel_edp_drrs_enable_locked(intel_dp); 7408 else 7409 intel_edp_drrs_disable_locked(intel_dp, crtc_state); 7410 7411unlock: 7412 mutex_unlock(&dev_priv->drrs.mutex); 7413} 7414 7415static void intel_edp_drrs_downclock_work(struct work_struct *work) 7416{ 7417 struct drm_i915_private *dev_priv = 7418 container_of(work, typeof(*dev_priv), drrs.work.work); 7419 struct intel_dp *intel_dp; 7420 7421 mutex_lock(&dev_priv->drrs.mutex); 7422 7423 intel_dp = dev_priv->drrs.dp; 7424 7425 if (!intel_dp) 7426 goto unlock; 7427 7428 /* 7429 * The delayed work can race with an invalidate hence we need to 7430 * recheck. 7431 */ 7432 7433 if (dev_priv->drrs.busy_frontbuffer_bits) 7434 goto unlock; 7435 7436 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) { 7437 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 7438 7439 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7440 drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode)); 7441 } 7442 7443unlock: 7444 mutex_unlock(&dev_priv->drrs.mutex); 7445} 7446 7447/** 7448 * intel_edp_drrs_invalidate - Disable Idleness DRRS 7449 * @dev_priv: i915 device 7450 * @frontbuffer_bits: frontbuffer plane tracking bits 7451 * 7452 * This function gets called everytime rendering on the given planes start. 7453 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR). 7454 * 7455 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 7456 */ 7457void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, 7458 unsigned int frontbuffer_bits) 7459{ 7460 struct intel_dp *intel_dp; 7461 struct drm_crtc *crtc; 7462 enum pipe pipe; 7463 7464 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 7465 return; 7466 7467 cancel_delayed_work(&dev_priv->drrs.work); 7468 7469 mutex_lock(&dev_priv->drrs.mutex); 7470 7471 intel_dp = dev_priv->drrs.dp; 7472 if (!intel_dp) { 7473 mutex_unlock(&dev_priv->drrs.mutex); 7474 return; 7475 } 7476 7477 crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 7478 pipe = to_intel_crtc(crtc)->pipe; 7479 7480 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 7481 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits; 7482 7483 /* invalidate means busy screen hence upclock */ 7484 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7485 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7486 drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); 7487 7488 mutex_unlock(&dev_priv->drrs.mutex); 7489} 7490 7491/** 7492 * intel_edp_drrs_flush - Restart Idleness DRRS 7493 * @dev_priv: i915 device 7494 * @frontbuffer_bits: frontbuffer plane tracking bits 7495 * 7496 * This function gets called every time rendering on the given planes has 7497 * completed or flip on a crtc is completed. So DRRS should be upclocked 7498 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again, 7499 * if no other planes are dirty. 7500 * 7501 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 7502 */ 7503void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, 7504 unsigned int frontbuffer_bits) 7505{ 7506 struct intel_dp *intel_dp; 7507 struct drm_crtc *crtc; 7508 enum pipe pipe; 7509 7510 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 7511 return; 7512 7513 cancel_delayed_work(&dev_priv->drrs.work); 7514 7515 mutex_lock(&dev_priv->drrs.mutex); 7516 7517 intel_dp = dev_priv->drrs.dp; 7518 if (!intel_dp) { 7519 mutex_unlock(&dev_priv->drrs.mutex); 7520 return; 7521 } 7522 7523 crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 7524 pipe = to_intel_crtc(crtc)->pipe; 7525 7526 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 7527 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; 7528 7529 /* flush means busy screen hence upclock */ 7530 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7531 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7532 drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); 7533 7534 /* 7535 * flush also means no more activity hence schedule downclock, if all 7536 * other fbs are quiescent too 7537 */ 7538 if (!dev_priv->drrs.busy_frontbuffer_bits) 7539 schedule_delayed_work(&dev_priv->drrs.work, 7540 msecs_to_jiffies(1000)); 7541 mutex_unlock(&dev_priv->drrs.mutex); 7542} 7543 7544/** 7545 * DOC: Display Refresh Rate Switching (DRRS) 7546 * 7547 * Display Refresh Rate Switching (DRRS) is a power conservation feature 7548 * which enables swtching between low and high refresh rates, 7549 * dynamically, based on the usage scenario. This feature is applicable 7550 * for internal panels. 7551 * 7552 * Indication that the panel supports DRRS is given by the panel EDID, which 7553 * would list multiple refresh rates for one resolution. 7554 * 7555 * DRRS is of 2 types - static and seamless. 7556 * Static DRRS involves changing refresh rate (RR) by doing a full modeset 7557 * (may appear as a blink on screen) and is used in dock-undock scenario. 7558 * Seamless DRRS involves changing RR without any visual effect to the user 7559 * and can be used during normal system usage. This is done by programming 7560 * certain registers. 7561 * 7562 * Support for static/seamless DRRS may be indicated in the VBT based on 7563 * inputs from the panel spec. 7564 * 7565 * DRRS saves power by switching to low RR based on usage scenarios. 7566 * 7567 * The implementation is based on frontbuffer tracking implementation. When 7568 * there is a disturbance on the screen triggered by user activity or a periodic 7569 * system activity, DRRS is disabled (RR is changed to high RR). When there is 7570 * no movement on screen, after a timeout of 1 second, a switch to low RR is 7571 * made. 7572 * 7573 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate() 7574 * and intel_edp_drrs_flush() are called. 7575 * 7576 * DRRS can be further extended to support other internal panels and also 7577 * the scenario of video playback wherein RR is set based on the rate 7578 * requested by userspace. 7579 */ 7580 7581/** 7582 * intel_dp_drrs_init - Init basic DRRS work and mutex. 7583 * @connector: eDP connector 7584 * @fixed_mode: preferred mode of panel 7585 * 7586 * This function is called only once at driver load to initialize basic 7587 * DRRS stuff. 7588 * 7589 * Returns: 7590 * Downclock mode if panel supports it, else return NULL. 7591 * DRRS support is determined by the presence of downclock mode (apart 7592 * from VBT setting). 7593 */ 7594static struct drm_display_mode * 7595intel_dp_drrs_init(struct intel_connector *connector, 7596 struct drm_display_mode *fixed_mode) 7597{ 7598 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 7599 struct drm_display_mode *downclock_mode = NULL; 7600 7601 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); 7602 mutex_init(&dev_priv->drrs.mutex); 7603 7604 if (INTEL_GEN(dev_priv) <= 6) { 7605 drm_dbg_kms(&dev_priv->drm, 7606 "DRRS supported for Gen7 and above\n"); 7607 return NULL; 7608 } 7609 7610 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { 7611 drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n"); 7612 return NULL; 7613 } 7614 7615 downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode); 7616 if (!downclock_mode) { 7617 drm_dbg_kms(&dev_priv->drm, 7618 "Downclock mode is not found. DRRS not supported\n"); 7619 return NULL; 7620 } 7621 7622 dev_priv->drrs.type = dev_priv->vbt.drrs_type; 7623 7624 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR; 7625 drm_dbg_kms(&dev_priv->drm, 7626 "seamless DRRS supported for eDP panel.\n"); 7627 return downclock_mode; 7628} 7629 7630static bool intel_edp_init_connector(struct intel_dp *intel_dp, 7631 struct intel_connector *intel_connector) 7632{ 7633 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7634 struct drm_device *dev = &dev_priv->drm; 7635 struct drm_connector *connector = &intel_connector->base; 7636 struct drm_display_mode *fixed_mode = NULL; 7637 struct drm_display_mode *downclock_mode = NULL; 7638 bool has_dpcd; 7639 enum pipe pipe = INVALID_PIPE; 7640 intel_wakeref_t wakeref; 7641 struct edid *edid; 7642 7643 if (!intel_dp_is_edp(intel_dp)) 7644 return true; 7645 7646 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work); 7647 7648 /* 7649 * On IBX/CPT we may get here with LVDS already registered. Since the 7650 * driver uses the only internal power sequencer available for both 7651 * eDP and LVDS bail out early in this case to prevent interfering 7652 * with an already powered-on LVDS power sequencer. 7653 */ 7654 if (intel_get_lvds_encoder(dev_priv)) { 7655 drm_WARN_ON(dev, 7656 !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); 7657 drm_info(&dev_priv->drm, 7658 "LVDS was detected, not registering eDP\n"); 7659 7660 return false; 7661 } 7662 7663 with_pps_lock(intel_dp, wakeref) { 7664 intel_dp_init_panel_power_timestamps(intel_dp); 7665 intel_dp_pps_init(intel_dp); 7666 intel_edp_panel_vdd_sanitize(intel_dp); 7667 } 7668 7669 /* Cache DPCD and EDID for edp. */ 7670 has_dpcd = intel_edp_init_dpcd(intel_dp); 7671 7672 if (!has_dpcd) { 7673 /* if this fails, presume the device is a ghost */ 7674 drm_info(&dev_priv->drm, 7675 "failed to retrieve link info, disabling eDP\n"); 7676 goto out_vdd_off; 7677 } 7678 7679 mutex_lock(&dev->mode_config.mutex); 7680 edid = drm_get_edid(connector, &intel_dp->aux.ddc); 7681 if (edid) { 7682 if (drm_add_edid_modes(connector, edid)) { 7683 drm_connector_update_edid_property(connector, edid); 7684 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); 7685 } else { 7686 kfree(edid); 7687 edid = ERR_PTR(-EINVAL); 7688 } 7689 } else { 7690 edid = ERR_PTR(-ENOENT); 7691 } 7692 intel_connector->edid = edid; 7693 7694 fixed_mode = intel_panel_edid_fixed_mode(intel_connector); 7695 if (fixed_mode) 7696 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode); 7697 7698 /* fallback to VBT if available for eDP */ 7699 if (!fixed_mode) 7700 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); 7701 mutex_unlock(&dev->mode_config.mutex); 7702 7703 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7704 intel_dp->edp_notifier.notifier_call = edp_notify_handler; 7705 register_reboot_notifier(&intel_dp->edp_notifier); 7706 7707 /* 7708 * Figure out the current pipe for the initial backlight setup. 7709 * If the current pipe isn't valid, try the PPS pipe, and if that 7710 * fails just assume pipe A. 7711 */ 7712 pipe = vlv_active_pipe(intel_dp); 7713 7714 if (pipe != PIPE_A && pipe != PIPE_B) 7715 pipe = intel_dp->pps_pipe; 7716 7717 if (pipe != PIPE_A && pipe != PIPE_B) 7718 pipe = PIPE_A; 7719 7720 drm_dbg_kms(&dev_priv->drm, 7721 "using pipe %c for initial backlight setup\n", 7722 pipe_name(pipe)); 7723 } 7724 7725 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 7726 intel_connector->panel.backlight.power = intel_edp_backlight_power; 7727 intel_panel_setup_backlight(connector, pipe); 7728 7729 if (fixed_mode) { 7730 drm_connector_set_panel_orientation_with_quirk(connector, 7731 dev_priv->vbt.orientation, 7732 fixed_mode->hdisplay, fixed_mode->vdisplay); 7733 } 7734 7735 return true; 7736 7737out_vdd_off: 7738 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 7739 /* 7740 * vdd might still be enabled do to the delayed vdd off. 7741 * Make sure vdd is actually turned off here. 7742 */ 7743 with_pps_lock(intel_dp, wakeref) 7744 edp_panel_vdd_off_sync(intel_dp); 7745 7746 return false; 7747} 7748 7749static void intel_dp_modeset_retry_work_fn(struct work_struct *work) 7750{ 7751 struct intel_connector *intel_connector; 7752 struct drm_connector *connector; 7753 7754 intel_connector = container_of(work, typeof(*intel_connector), 7755 modeset_retry_work); 7756 connector = &intel_connector->base; 7757 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 7758 connector->name); 7759 7760 /* Grab the locks before changing connector property*/ 7761 mutex_lock(&connector->dev->mode_config.mutex); 7762 /* Set connector link status to BAD and send a Uevent to notify 7763 * userspace to do a modeset. 7764 */ 7765 drm_connector_set_link_status_property(connector, 7766 DRM_MODE_LINK_STATUS_BAD); 7767 mutex_unlock(&connector->dev->mode_config.mutex); 7768 /* Send Hotplug uevent so userspace can reprobe */ 7769 drm_kms_helper_hotplug_event(connector->dev); 7770} 7771 7772bool 7773intel_dp_init_connector(struct intel_digital_port *dig_port, 7774 struct intel_connector *intel_connector) 7775{ 7776 struct drm_connector *connector = &intel_connector->base; 7777 struct intel_dp *intel_dp = &dig_port->dp; 7778 struct intel_encoder *intel_encoder = &dig_port->base; 7779 struct drm_device *dev = intel_encoder->base.dev; 7780 struct drm_i915_private *dev_priv = to_i915(dev); 7781 enum port port = intel_encoder->port; 7782 enum phy phy = intel_port_to_phy(dev_priv, port); 7783 int type; 7784 7785 /* Initialize the work for modeset in case of link train failure */ 7786 INIT_WORK(&intel_connector->modeset_retry_work, 7787 intel_dp_modeset_retry_work_fn); 7788 7789 if (drm_WARN(dev, dig_port->max_lanes < 1, 7790 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", 7791 dig_port->max_lanes, intel_encoder->base.base.id, 7792 intel_encoder->base.name)) 7793 return false; 7794 7795 intel_dp_set_source_rates(intel_dp); 7796 intel_dp_set_default_sink_rates(intel_dp); 7797 intel_dp_set_common_rates(intel_dp); 7798 7799 intel_dp->reset_link_params = true; 7800 intel_dp->pps_pipe = INVALID_PIPE; 7801 intel_dp->active_pipe = INVALID_PIPE; 7802 7803 /* Preserve the current hw state. */ 7804 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 7805 intel_dp->attached_connector = intel_connector; 7806 7807 if (intel_dp_is_port_edp(dev_priv, port)) { 7808 /* 7809 * Currently we don't support eDP on TypeC ports, although in 7810 * theory it could work on TypeC legacy ports. 7811 */ 7812 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy)); 7813 type = DRM_MODE_CONNECTOR_eDP; 7814 } else { 7815 type = DRM_MODE_CONNECTOR_DisplayPort; 7816 } 7817 7818 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7819 intel_dp->active_pipe = vlv_active_pipe(intel_dp); 7820 7821 /* 7822 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but 7823 * for DP the encoder type can be set by the caller to 7824 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it. 7825 */ 7826 if (type == DRM_MODE_CONNECTOR_eDP) 7827 intel_encoder->type = INTEL_OUTPUT_EDP; 7828 7829 /* eDP only on port B and/or C on vlv/chv */ 7830 if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || 7831 IS_CHERRYVIEW(dev_priv)) && 7832 intel_dp_is_edp(intel_dp) && 7833 port != PORT_B && port != PORT_C)) 7834 return false; 7835 7836 drm_dbg_kms(&dev_priv->drm, 7837 "Adding %s connector on [ENCODER:%d:%s]\n", 7838 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 7839 intel_encoder->base.base.id, intel_encoder->base.name); 7840 7841 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 7842 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 7843 7844 if (!HAS_GMCH(dev_priv)) 7845 connector->interlace_allowed = true; 7846 connector->doublescan_allowed = 0; 7847 7848 intel_connector->polled = DRM_CONNECTOR_POLL_HPD; 7849 7850 intel_dp_aux_init(intel_dp); 7851 7852 intel_connector_attach_encoder(intel_connector, intel_encoder); 7853 7854 if (HAS_DDI(dev_priv)) 7855 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 7856 else 7857 intel_connector->get_hw_state = intel_connector_get_hw_state; 7858 7859 /* init MST on ports that can support it */ 7860 intel_dp_mst_encoder_init(dig_port, 7861 intel_connector->base.base.id); 7862 7863 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 7864 intel_dp_aux_fini(intel_dp); 7865 intel_dp_mst_encoder_cleanup(dig_port); 7866 goto fail; 7867 } 7868 7869 intel_dp_add_properties(intel_dp, connector); 7870 7871 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { 7872 int ret = intel_dp_init_hdcp(dig_port, intel_connector); 7873 if (ret) 7874 drm_dbg_kms(&dev_priv->drm, 7875 "HDCP init failed, skipping.\n"); 7876 } 7877 7878 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 7879 * 0xd. Failure to do so will result in spurious interrupts being 7880 * generated on the port when a cable is not attached. 7881 */ 7882 if (IS_G45(dev_priv)) { 7883 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA); 7884 intel_de_write(dev_priv, PEG_BAND_GAP_DATA, 7885 (temp & ~0xf) | 0xd); 7886 } 7887 7888 return true; 7889 7890fail: 7891 drm_connector_cleanup(connector); 7892 7893 return false; 7894} 7895 7896bool intel_dp_init(struct drm_i915_private *dev_priv, 7897 i915_reg_t output_reg, 7898 enum port port) 7899{ 7900 struct intel_digital_port *dig_port; 7901 struct intel_encoder *intel_encoder; 7902 struct drm_encoder *encoder; 7903 struct intel_connector *intel_connector; 7904 7905 dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL); 7906 if (!dig_port) 7907 return false; 7908 7909 intel_connector = intel_connector_alloc(); 7910 if (!intel_connector) 7911 goto err_connector_alloc; 7912 7913 intel_encoder = &dig_port->base; 7914 encoder = &intel_encoder->base; 7915 7916 mutex_init(&dig_port->hdcp_mutex); 7917 7918 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base, 7919 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS, 7920 "DP %c", port_name(port))) 7921 goto err_encoder_init; 7922 7923 intel_encoder->hotplug = intel_dp_hotplug; 7924 intel_encoder->compute_config = intel_dp_compute_config; 7925 intel_encoder->get_hw_state = intel_dp_get_hw_state; 7926 intel_encoder->get_config = intel_dp_get_config; 7927 intel_encoder->update_pipe = intel_panel_update_backlight; 7928 intel_encoder->suspend = intel_dp_encoder_suspend; 7929 if (IS_CHERRYVIEW(dev_priv)) { 7930 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; 7931 intel_encoder->pre_enable = chv_pre_enable_dp; 7932 intel_encoder->enable = vlv_enable_dp; 7933 intel_encoder->disable = vlv_disable_dp; 7934 intel_encoder->post_disable = chv_post_disable_dp; 7935 intel_encoder->post_pll_disable = chv_dp_post_pll_disable; 7936 } else if (IS_VALLEYVIEW(dev_priv)) { 7937 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; 7938 intel_encoder->pre_enable = vlv_pre_enable_dp; 7939 intel_encoder->enable = vlv_enable_dp; 7940 intel_encoder->disable = vlv_disable_dp; 7941 intel_encoder->post_disable = vlv_post_disable_dp; 7942 } else { 7943 intel_encoder->pre_enable = g4x_pre_enable_dp; 7944 intel_encoder->enable = g4x_enable_dp; 7945 intel_encoder->disable = g4x_disable_dp; 7946 intel_encoder->post_disable = g4x_post_disable_dp; 7947 } 7948 7949 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || 7950 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) 7951 dig_port->dp.set_link_train = cpt_set_link_train; 7952 else 7953 dig_port->dp.set_link_train = g4x_set_link_train; 7954 7955 if (IS_CHERRYVIEW(dev_priv)) 7956 dig_port->dp.set_signal_levels = chv_set_signal_levels; 7957 else if (IS_VALLEYVIEW(dev_priv)) 7958 dig_port->dp.set_signal_levels = vlv_set_signal_levels; 7959 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 7960 dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels; 7961 else if (IS_GEN(dev_priv, 6) && port == PORT_A) 7962 dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels; 7963 else 7964 dig_port->dp.set_signal_levels = g4x_set_signal_levels; 7965 7966 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) || 7967 (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) { 7968 dig_port->dp.preemph_max = intel_dp_pre_empemph_max_3; 7969 dig_port->dp.voltage_max = intel_dp_voltage_max_3; 7970 } else { 7971 dig_port->dp.preemph_max = intel_dp_pre_empemph_max_2; 7972 dig_port->dp.voltage_max = intel_dp_voltage_max_2; 7973 } 7974 7975 dig_port->dp.output_reg = output_reg; 7976 dig_port->max_lanes = 4; 7977 dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port); 7978 dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port); 7979 7980 intel_encoder->type = INTEL_OUTPUT_DP; 7981 intel_encoder->power_domain = intel_port_to_power_domain(port); 7982 if (IS_CHERRYVIEW(dev_priv)) { 7983 if (port == PORT_D) 7984 intel_encoder->pipe_mask = BIT(PIPE_C); 7985 else 7986 intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B); 7987 } else { 7988 intel_encoder->pipe_mask = ~0; 7989 } 7990 intel_encoder->cloneable = 0; 7991 intel_encoder->port = port; 7992 intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); 7993 7994 dig_port->hpd_pulse = intel_dp_hpd_pulse; 7995 7996 if (HAS_GMCH(dev_priv)) { 7997 if (IS_GM45(dev_priv)) 7998 dig_port->connected = gm45_digital_port_connected; 7999 else 8000 dig_port->connected = g4x_digital_port_connected; 8001 } else { 8002 if (port == PORT_A) 8003 dig_port->connected = ilk_digital_port_connected; 8004 else 8005 dig_port->connected = ibx_digital_port_connected; 8006 } 8007 8008 if (port != PORT_A) 8009 intel_infoframe_init(dig_port); 8010 8011 dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); 8012 if (!intel_dp_init_connector(dig_port, intel_connector)) 8013 goto err_init_connector; 8014 8015 return true; 8016 8017err_init_connector: 8018 drm_encoder_cleanup(encoder); 8019err_encoder_init: 8020 kfree(intel_connector); 8021err_connector_alloc: 8022 kfree(dig_port); 8023 return false; 8024} 8025 8026void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) 8027{ 8028 struct intel_encoder *encoder; 8029 8030 for_each_intel_encoder(&dev_priv->drm, encoder) { 8031 struct intel_dp *intel_dp; 8032 8033 if (encoder->type != INTEL_OUTPUT_DDI) 8034 continue; 8035 8036 intel_dp = enc_to_intel_dp(encoder); 8037 8038 if (!intel_dp->can_mst) 8039 continue; 8040 8041 if (intel_dp->is_mst) 8042 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr); 8043 } 8044} 8045 8046void intel_dp_mst_resume(struct drm_i915_private *dev_priv) 8047{ 8048 struct intel_encoder *encoder; 8049 8050 for_each_intel_encoder(&dev_priv->drm, encoder) { 8051 struct intel_dp *intel_dp; 8052 int ret; 8053 8054 if (encoder->type != INTEL_OUTPUT_DDI) 8055 continue; 8056 8057 intel_dp = enc_to_intel_dp(encoder); 8058 8059 if (!intel_dp->can_mst) 8060 continue; 8061 8062 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, 8063 true); 8064 if (ret) { 8065 intel_dp->is_mst = false; 8066 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 8067 false); 8068 } 8069 } 8070} 8071