1/* 2 * Copyright © 2013 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25#include <asm/iosf_mbi.h> 26 27#include "i915_drv.h" 28#include "intel_sideband.h" 29 30/* 31 * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and 32 * VLV_VLV2_PUNIT_HAS_0.8.docx 33 */ 34 35/* Standard MMIO read, non-posted */ 36#define SB_MRD_NP 0x00 37/* Standard MMIO write, non-posted */ 38#define SB_MWR_NP 0x01 39/* Private register read, double-word addressing, non-posted */ 40#define SB_CRRDDA_NP 0x06 41/* Private register write, double-word addressing, non-posted */ 42#define SB_CRWRDA_NP 0x07 43 44static void ping(void *info) 45{ 46} 47 48static void __vlv_punit_get(struct drm_i915_private *i915) 49{ 50 iosf_mbi_punit_acquire(); 51 52 /* 53 * Prevent the cpu from sleeping while we use this sideband, otherwise 54 * the punit may cause a machine hang. The issue appears to be isolated 55 * with changing the power state of the CPU package while changing 56 * the power state via the punit, and we have only observed it 57 * reliably on 4-core Baytail systems suggesting the issue is in the 58 * power delivery mechanism and likely to be be board/function 59 * specific. Hence we presume the workaround needs only be applied 60 * to the Valleyview P-unit and not all sideband communications. 61 */ 62 if (IS_VALLEYVIEW(i915)) { 63 cpu_latency_qos_update_request(&i915->sb_qos, 0); 64 on_each_cpu(ping, NULL, 1); 65 } 66} 67 68static void __vlv_punit_put(struct drm_i915_private *i915) 69{ 70 if (IS_VALLEYVIEW(i915)) 71 cpu_latency_qos_update_request(&i915->sb_qos, 72 PM_QOS_DEFAULT_VALUE); 73 74 iosf_mbi_punit_release(); 75} 76 77void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports) 78{ 79 if (ports & BIT(VLV_IOSF_SB_PUNIT)) 80 __vlv_punit_get(i915); 81 82 mutex_lock(&i915->sb_lock); 83} 84 85void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports) 86{ 87 mutex_unlock(&i915->sb_lock); 88 89 if (ports & BIT(VLV_IOSF_SB_PUNIT)) 90 __vlv_punit_put(i915); 91} 92 93static int vlv_sideband_rw(struct drm_i915_private *i915, 94 u32 devfn, u32 port, u32 opcode, 95 u32 addr, u32 *val) 96{ 97 struct intel_uncore *uncore = &i915->uncore; 98 const bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP); 99 int err; 100 101 lockdep_assert_held(&i915->sb_lock); 102 if (port == IOSF_PORT_PUNIT) 103 iosf_mbi_assert_punit_acquired(); 104 105 /* Flush the previous comms, just in case it failed last time. */ 106 if (intel_wait_for_register(uncore, 107 VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0, 108 5)) { 109 drm_dbg(&i915->drm, "IOSF sideband idle wait (%s) timed out\n", 110 is_read ? "read" : "write"); 111 return -EAGAIN; 112 } 113 114 preempt_disable(); 115 116 intel_uncore_write_fw(uncore, VLV_IOSF_ADDR, addr); 117 intel_uncore_write_fw(uncore, VLV_IOSF_DATA, is_read ? 0 : *val); 118 intel_uncore_write_fw(uncore, VLV_IOSF_DOORBELL_REQ, 119 (devfn << IOSF_DEVFN_SHIFT) | 120 (opcode << IOSF_OPCODE_SHIFT) | 121 (port << IOSF_PORT_SHIFT) | 122 (0xf << IOSF_BYTE_ENABLES_SHIFT) | 123 (0 << IOSF_BAR_SHIFT) | 124 IOSF_SB_BUSY); 125 126 if (__intel_wait_for_register_fw(uncore, 127 VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0, 128 10000, 0, NULL) == 0) { 129 if (is_read) 130 *val = intel_uncore_read_fw(uncore, VLV_IOSF_DATA); 131 err = 0; 132 } else { 133 drm_dbg(&i915->drm, "IOSF sideband finish wait (%s) timed out\n", 134 is_read ? "read" : "write"); 135 err = -ETIMEDOUT; 136 } 137 138 preempt_enable(); 139 140 return err; 141} 142 143u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr) 144{ 145 u32 val = 0; 146 147 vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT, 148 SB_CRRDDA_NP, addr, &val); 149 150 return val; 151} 152 153int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val) 154{ 155 return vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT, 156 SB_CRWRDA_NP, addr, &val); 157} 158 159u32 vlv_bunit_read(struct drm_i915_private *i915, u32 reg) 160{ 161 u32 val = 0; 162 163 vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT, 164 SB_CRRDDA_NP, reg, &val); 165 166 return val; 167} 168 169void vlv_bunit_write(struct drm_i915_private *i915, u32 reg, u32 val) 170{ 171 vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT, 172 SB_CRWRDA_NP, reg, &val); 173} 174 175u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr) 176{ 177 u32 val = 0; 178 179 vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_NC, 180 SB_CRRDDA_NP, addr, &val); 181 182 return val; 183} 184 185u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg) 186{ 187 u32 val = 0; 188 189 vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port, 190 SB_CRRDDA_NP, reg, &val); 191 192 return val; 193} 194 195void vlv_iosf_sb_write(struct drm_i915_private *i915, 196 u8 port, u32 reg, u32 val) 197{ 198 vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port, 199 SB_CRWRDA_NP, reg, &val); 200} 201 202u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg) 203{ 204 u32 val = 0; 205 206 vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK, 207 SB_CRRDDA_NP, reg, &val); 208 209 return val; 210} 211 212void vlv_cck_write(struct drm_i915_private *i915, u32 reg, u32 val) 213{ 214 vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK, 215 SB_CRWRDA_NP, reg, &val); 216} 217 218u32 vlv_ccu_read(struct drm_i915_private *i915, u32 reg) 219{ 220 u32 val = 0; 221 222 vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU, 223 SB_CRRDDA_NP, reg, &val); 224 225 return val; 226} 227 228void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val) 229{ 230 vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU, 231 SB_CRWRDA_NP, reg, &val); 232} 233 234static u32 vlv_dpio_phy_iosf_port(struct drm_i915_private *i915, enum dpio_phy phy) 235{ 236 /* 237 * IOSF_PORT_DPIO: VLV x2 PHY (DP/HDMI B and C), CHV x1 PHY (DP/HDMI D) 238 * IOSF_PORT_DPIO_2: CHV x2 PHY (DP/HDMI B and C) 239 */ 240 if (IS_CHERRYVIEW(i915)) 241 return phy == DPIO_PHY0 ? IOSF_PORT_DPIO_2 : IOSF_PORT_DPIO; 242 else 243 return IOSF_PORT_DPIO; 244} 245 246u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg) 247{ 248 u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe)); 249 u32 val = 0; 250 251 vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MRD_NP, reg, &val); 252 253 /* 254 * FIXME: There might be some registers where all 1's is a valid value, 255 * so ideally we should check the register offset instead... 256 */ 257 drm_WARN(&i915->drm, val == 0xffffffff, 258 "DPIO read pipe %c reg 0x%x == 0x%x\n", 259 pipe_name(pipe), reg, val); 260 261 return val; 262} 263 264void vlv_dpio_write(struct drm_i915_private *i915, 265 enum pipe pipe, int reg, u32 val) 266{ 267 u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe)); 268 269 vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MWR_NP, reg, &val); 270} 271 272u32 vlv_flisdsi_read(struct drm_i915_private *i915, u32 reg) 273{ 274 u32 val = 0; 275 276 vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP, 277 reg, &val); 278 return val; 279} 280 281void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val) 282{ 283 vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP, 284 reg, &val); 285} 286 287/* SBI access */ 288static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg, 289 enum intel_sbi_destination destination, 290 u32 *val, bool is_read) 291{ 292 struct intel_uncore *uncore = &i915->uncore; 293 u32 cmd; 294 295 lockdep_assert_held(&i915->sb_lock); 296 297 if (intel_wait_for_register_fw(uncore, 298 SBI_CTL_STAT, SBI_BUSY, 0, 299 100)) { 300 drm_err(&i915->drm, 301 "timeout waiting for SBI to become ready\n"); 302 return -EBUSY; 303 } 304 305 intel_uncore_write_fw(uncore, SBI_ADDR, (u32)reg << 16); 306 intel_uncore_write_fw(uncore, SBI_DATA, is_read ? 0 : *val); 307 308 if (destination == SBI_ICLK) 309 cmd = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD; 310 else 311 cmd = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD; 312 if (!is_read) 313 cmd |= BIT(8); 314 intel_uncore_write_fw(uncore, SBI_CTL_STAT, cmd | SBI_BUSY); 315 316 if (__intel_wait_for_register_fw(uncore, 317 SBI_CTL_STAT, SBI_BUSY, 0, 318 100, 100, &cmd)) { 319 drm_err(&i915->drm, 320 "timeout waiting for SBI to complete read\n"); 321 return -ETIMEDOUT; 322 } 323 324 if (cmd & SBI_RESPONSE_FAIL) { 325 drm_err(&i915->drm, "error during SBI read of reg %x\n", reg); 326 return -ENXIO; 327 } 328 329 if (is_read) 330 *val = intel_uncore_read_fw(uncore, SBI_DATA); 331 332 return 0; 333} 334 335u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg, 336 enum intel_sbi_destination destination) 337{ 338 u32 result = 0; 339 340 intel_sbi_rw(i915, reg, destination, &result, true); 341 342 return result; 343} 344 345void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value, 346 enum intel_sbi_destination destination) 347{ 348 intel_sbi_rw(i915, reg, destination, &value, false); 349} 350 351static int gen6_check_mailbox_status(u32 mbox) 352{ 353 switch (mbox & GEN6_PCODE_ERROR_MASK) { 354 case GEN6_PCODE_SUCCESS: 355 return 0; 356 case GEN6_PCODE_UNIMPLEMENTED_CMD: 357 return -ENODEV; 358 case GEN6_PCODE_ILLEGAL_CMD: 359 return -ENXIO; 360 case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: 361 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: 362 return -EOVERFLOW; 363 case GEN6_PCODE_TIMEOUT: 364 return -ETIMEDOUT; 365 default: 366 MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK); 367 return 0; 368 } 369} 370 371static int gen7_check_mailbox_status(u32 mbox) 372{ 373 switch (mbox & GEN6_PCODE_ERROR_MASK) { 374 case GEN6_PCODE_SUCCESS: 375 return 0; 376 case GEN6_PCODE_ILLEGAL_CMD: 377 return -ENXIO; 378 case GEN7_PCODE_TIMEOUT: 379 return -ETIMEDOUT; 380 case GEN7_PCODE_ILLEGAL_DATA: 381 return -EINVAL; 382 case GEN11_PCODE_ILLEGAL_SUBCOMMAND: 383 return -ENXIO; 384 case GEN11_PCODE_LOCKED: 385 return -EBUSY; 386 case GEN11_PCODE_REJECTED: 387 return -EACCES; 388 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: 389 return -EOVERFLOW; 390 default: 391 MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK); 392 return 0; 393 } 394} 395 396static int __sandybridge_pcode_rw(struct drm_i915_private *i915, 397 u32 mbox, u32 *val, u32 *val1, 398 int fast_timeout_us, 399 int slow_timeout_ms, 400 bool is_read) 401{ 402 struct intel_uncore *uncore = &i915->uncore; 403 404 lockdep_assert_held(&i915->sb_lock); 405 406 /* 407 * GEN6_PCODE_* are outside of the forcewake domain, we can 408 * use te fw I915_READ variants to reduce the amount of work 409 * required when reading/writing. 410 */ 411 412 if (intel_uncore_read_fw(uncore, GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) 413 return -EAGAIN; 414 415 intel_uncore_write_fw(uncore, GEN6_PCODE_DATA, *val); 416 intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, val1 ? *val1 : 0); 417 intel_uncore_write_fw(uncore, 418 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); 419 420 if (__intel_wait_for_register_fw(uncore, 421 GEN6_PCODE_MAILBOX, 422 GEN6_PCODE_READY, 0, 423 fast_timeout_us, 424 slow_timeout_ms, 425 &mbox)) 426 return -ETIMEDOUT; 427 428 if (is_read) 429 *val = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA); 430 if (is_read && val1) 431 *val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1); 432 433 if (INTEL_GEN(i915) > 6) 434 return gen7_check_mailbox_status(mbox); 435 else 436 return gen6_check_mailbox_status(mbox); 437} 438 439int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox, 440 u32 *val, u32 *val1) 441{ 442 int err; 443 444 mutex_lock(&i915->sb_lock); 445 err = __sandybridge_pcode_rw(i915, mbox, val, val1, 446 500, 20, 447 true); 448 mutex_unlock(&i915->sb_lock); 449 450 if (err) { 451 drm_dbg(&i915->drm, 452 "warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n", 453 mbox, __builtin_return_address(0), err); 454 } 455 456 return err; 457} 458 459int sandybridge_pcode_write_timeout(struct drm_i915_private *i915, 460 u32 mbox, u32 val, 461 int fast_timeout_us, 462 int slow_timeout_ms) 463{ 464 int err; 465 466 mutex_lock(&i915->sb_lock); 467 err = __sandybridge_pcode_rw(i915, mbox, &val, NULL, 468 fast_timeout_us, slow_timeout_ms, 469 false); 470 mutex_unlock(&i915->sb_lock); 471 472 if (err) { 473 drm_dbg(&i915->drm, 474 "warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n", 475 val, mbox, __builtin_return_address(0), err); 476 } 477 478 return err; 479} 480 481static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox, 482 u32 request, u32 reply_mask, u32 reply, 483 u32 *status) 484{ 485 *status = __sandybridge_pcode_rw(i915, mbox, &request, NULL, 486 500, 0, 487 true); 488 489 return *status || ((request & reply_mask) == reply); 490} 491 492/** 493 * skl_pcode_request - send PCODE request until acknowledgment 494 * @i915: device private 495 * @mbox: PCODE mailbox ID the request is targeted for 496 * @request: request ID 497 * @reply_mask: mask used to check for request acknowledgment 498 * @reply: value used to check for request acknowledgment 499 * @timeout_base_ms: timeout for polling with preemption enabled 500 * 501 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE 502 * reports an error or an overall timeout of @timeout_base_ms+50 ms expires. 503 * The request is acknowledged once the PCODE reply dword equals @reply after 504 * applying @reply_mask. Polling is first attempted with preemption enabled 505 * for @timeout_base_ms and if this times out for another 50 ms with 506 * preemption disabled. 507 * 508 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some 509 * other error as reported by PCODE. 510 */ 511int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request, 512 u32 reply_mask, u32 reply, int timeout_base_ms) 513{ 514 u32 status; 515 int ret; 516 517 mutex_lock(&i915->sb_lock); 518 519#define COND \ 520 skl_pcode_try_request(i915, mbox, request, reply_mask, reply, &status) 521 522 /* 523 * Prime the PCODE by doing a request first. Normally it guarantees 524 * that a subsequent request, at most @timeout_base_ms later, succeeds. 525 * _wait_for() doesn't guarantee when its passed condition is evaluated 526 * first, so send the first request explicitly. 527 */ 528 if (COND) { 529 ret = 0; 530 goto out; 531 } 532 ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10); 533 if (!ret) 534 goto out; 535 536 /* 537 * The above can time out if the number of requests was low (2 in the 538 * worst case) _and_ PCODE was busy for some reason even after a 539 * (queued) request and @timeout_base_ms delay. As a workaround retry 540 * the poll with preemption disabled to maximize the number of 541 * requests. Increase the timeout from @timeout_base_ms to 50ms to 542 * account for interrupts that could reduce the number of these 543 * requests, and for any quirks of the PCODE firmware that delays 544 * the request completion. 545 */ 546 drm_dbg_kms(&i915->drm, 547 "PCODE timeout, retrying with preemption disabled\n"); 548 drm_WARN_ON_ONCE(&i915->drm, timeout_base_ms > 3); 549 preempt_disable(); 550 ret = wait_for_atomic(COND, 50); 551 preempt_enable(); 552 553out: 554 mutex_unlock(&i915->sb_lock); 555 return ret ? ret : status; 556#undef COND 557} 558 559void intel_pcode_init(struct drm_i915_private *i915) 560{ 561 int ret; 562 563 if (!IS_DGFX(i915)) 564 return; 565 566 ret = skl_pcode_request(i915, DG1_PCODE_STATUS, 567 DG1_UNCORE_GET_INIT_STATUS, 568 DG1_UNCORE_INIT_STATUS_COMPLETE, 569 DG1_UNCORE_INIT_STATUS_COMPLETE, 50); 570 if (ret) 571 drm_err(&i915->drm, "Pcode did not report uncore initialization completion!\n"); 572} 573