1// SPDX-License-Identifier: GPL-2.0 2/* 3 * USB4 specific functionality 4 * 5 * Copyright (C) 2019, Intel Corporation 6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> 7 * Rajmohan Mani <rajmohan.mani@intel.com> 8 */ 9 10#include <linux/delay.h> 11#include <linux/ktime.h> 12#include <linux/units.h> 13 14#include "sb_regs.h" 15#include "tb.h" 16 17#define USB4_DATA_RETRIES 3 18#define USB4_DATA_DWORDS 16 19 20enum usb4_sb_target { 21 USB4_SB_TARGET_ROUTER, 22 USB4_SB_TARGET_PARTNER, 23 USB4_SB_TARGET_RETIMER, 24}; 25 26#define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2) 27#define USB4_NVM_READ_OFFSET_SHIFT 2 28#define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24) 29#define USB4_NVM_READ_LENGTH_SHIFT 24 30 31#define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK 32#define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT 33 34#define USB4_DROM_ADDRESS_MASK GENMASK(14, 2) 35#define USB4_DROM_ADDRESS_SHIFT 2 36#define USB4_DROM_SIZE_MASK GENMASK(19, 15) 37#define USB4_DROM_SIZE_SHIFT 15 38 39#define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0) 40 41#define USB4_BA_LENGTH_MASK GENMASK(7, 0) 42#define USB4_BA_INDEX_MASK GENMASK(15, 0) 43 44enum usb4_ba_index { 45 USB4_BA_MAX_USB3 = 0x1, 46 USB4_BA_MIN_DP_AUX = 0x2, 47 USB4_BA_MIN_DP_MAIN = 0x3, 48 USB4_BA_MAX_PCIE = 0x4, 49 USB4_BA_MAX_HI = 0x5, 50}; 51 52#define USB4_BA_VALUE_MASK GENMASK(31, 16) 53#define USB4_BA_VALUE_SHIFT 16 54 55static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode, 56 u32 *metadata, u8 *status, 57 const void *tx_data, size_t tx_dwords, 58 void *rx_data, size_t rx_dwords) 59{ 60 u32 val; 61 int ret; 62 63 if (metadata) { 64 ret = tb_sw_write(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); 65 if (ret) 66 return ret; 67 } 68 if (tx_dwords) { 69 ret = tb_sw_write(sw, tx_data, TB_CFG_SWITCH, ROUTER_CS_9, 70 tx_dwords); 71 if (ret) 72 return ret; 73 } 74 75 val = opcode | ROUTER_CS_26_OV; 76 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); 77 if (ret) 78 return ret; 79 80 ret = tb_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500); 81 if (ret) 82 return ret; 83 84 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); 85 if (ret) 86 return ret; 87 88 if (val & ROUTER_CS_26_ONS) 89 return -EOPNOTSUPP; 90 91 if (status) 92 *status = (val & ROUTER_CS_26_STATUS_MASK) >> 93 ROUTER_CS_26_STATUS_SHIFT; 94 95 if (metadata) { 96 ret = tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); 97 if (ret) 98 return ret; 99 } 100 if (rx_dwords) { 101 ret = tb_sw_read(sw, rx_data, TB_CFG_SWITCH, ROUTER_CS_9, 102 rx_dwords); 103 if (ret) 104 return ret; 105 } 106 107 return 0; 108} 109 110static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata, 111 u8 *status, const void *tx_data, size_t tx_dwords, 112 void *rx_data, size_t rx_dwords) 113{ 114 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 115 116 if (tx_dwords > USB4_DATA_DWORDS || rx_dwords > USB4_DATA_DWORDS) 117 return -EINVAL; 118 119 /* 120 * If the connection manager implementation provides USB4 router 121 * operation proxy callback, call it here instead of running the 122 * operation natively. 123 */ 124 if (cm_ops->usb4_switch_op) { 125 int ret; 126 127 ret = cm_ops->usb4_switch_op(sw, opcode, metadata, status, 128 tx_data, tx_dwords, rx_data, 129 rx_dwords); 130 if (ret != -EOPNOTSUPP) 131 return ret; 132 133 /* 134 * If the proxy was not supported then run the native 135 * router operation instead. 136 */ 137 } 138 139 return usb4_native_switch_op(sw, opcode, metadata, status, tx_data, 140 tx_dwords, rx_data, rx_dwords); 141} 142 143static inline int usb4_switch_op(struct tb_switch *sw, u16 opcode, 144 u32 *metadata, u8 *status) 145{ 146 return __usb4_switch_op(sw, opcode, metadata, status, NULL, 0, NULL, 0); 147} 148 149static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode, 150 u32 *metadata, u8 *status, 151 const void *tx_data, size_t tx_dwords, 152 void *rx_data, size_t rx_dwords) 153{ 154 return __usb4_switch_op(sw, opcode, metadata, status, tx_data, 155 tx_dwords, rx_data, rx_dwords); 156} 157 158static void usb4_switch_check_wakes(struct tb_switch *sw) 159{ 160 bool wakeup_usb4 = false; 161 struct usb4_port *usb4; 162 struct tb_port *port; 163 bool wakeup = false; 164 u32 val; 165 166 if (!device_may_wakeup(&sw->dev)) 167 return; 168 169 if (tb_route(sw)) { 170 if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1)) 171 return; 172 173 tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n", 174 (val & ROUTER_CS_6_WOPS) ? "yes" : "no", 175 (val & ROUTER_CS_6_WOUS) ? "yes" : "no"); 176 177 wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS); 178 } 179 180 /* 181 * Check for any downstream ports for USB4 wake, 182 * connection wake and disconnection wake. 183 */ 184 tb_switch_for_each_port(sw, port) { 185 if (!port->cap_usb4) 186 continue; 187 188 if (tb_port_read(port, &val, TB_CFG_PORT, 189 port->cap_usb4 + PORT_CS_18, 1)) 190 break; 191 192 tb_port_dbg(port, "USB4 wake: %s, connection wake: %s, disconnection wake: %s\n", 193 (val & PORT_CS_18_WOU4S) ? "yes" : "no", 194 (val & PORT_CS_18_WOCS) ? "yes" : "no", 195 (val & PORT_CS_18_WODS) ? "yes" : "no"); 196 197 wakeup_usb4 = val & (PORT_CS_18_WOU4S | PORT_CS_18_WOCS | 198 PORT_CS_18_WODS); 199 200 usb4 = port->usb4; 201 if (device_may_wakeup(&usb4->dev) && wakeup_usb4) 202 pm_wakeup_event(&usb4->dev, 0); 203 204 wakeup |= wakeup_usb4; 205 } 206 207 if (wakeup) 208 pm_wakeup_event(&sw->dev, 0); 209} 210 211static bool link_is_usb4(struct tb_port *port) 212{ 213 u32 val; 214 215 if (!port->cap_usb4) 216 return false; 217 218 if (tb_port_read(port, &val, TB_CFG_PORT, 219 port->cap_usb4 + PORT_CS_18, 1)) 220 return false; 221 222 return !(val & PORT_CS_18_TCM); 223} 224 225/** 226 * usb4_switch_setup() - Additional setup for USB4 device 227 * @sw: USB4 router to setup 228 * 229 * USB4 routers need additional settings in order to enable all the 230 * tunneling. This function enables USB and PCIe tunneling if it can be 231 * enabled (e.g the parent switch also supports them). If USB tunneling 232 * is not available for some reason (like that there is Thunderbolt 3 233 * switch upstream) then the internal xHCI controller is enabled 234 * instead. 235 * 236 * This does not set the configuration valid bit of the router. To do 237 * that call usb4_switch_configuration_valid(). 238 */ 239int usb4_switch_setup(struct tb_switch *sw) 240{ 241 struct tb_switch *parent = tb_switch_parent(sw); 242 struct tb_port *down; 243 bool tbt3, xhci; 244 u32 val = 0; 245 int ret; 246 247 usb4_switch_check_wakes(sw); 248 249 if (!tb_route(sw)) 250 return 0; 251 252 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1); 253 if (ret) 254 return ret; 255 256 down = tb_switch_downstream_port(sw); 257 sw->link_usb4 = link_is_usb4(down); 258 tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT"); 259 260 xhci = val & ROUTER_CS_6_HCI; 261 tbt3 = !(val & ROUTER_CS_6_TNS); 262 263 tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n", 264 tbt3 ? "yes" : "no", xhci ? "yes" : "no"); 265 266 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 267 if (ret) 268 return ret; 269 270 if (tb_acpi_may_tunnel_usb3() && sw->link_usb4 && 271 tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) { 272 val |= ROUTER_CS_5_UTO; 273 xhci = false; 274 } 275 276 /* 277 * Only enable PCIe tunneling if the parent router supports it 278 * and it is not disabled. 279 */ 280 if (tb_acpi_may_tunnel_pcie() && 281 tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) { 282 val |= ROUTER_CS_5_PTO; 283 /* 284 * xHCI can be enabled if PCIe tunneling is supported 285 * and the parent does not have any USB3 dowstream 286 * adapters (so we cannot do USB 3.x tunneling). 287 */ 288 if (xhci) 289 val |= ROUTER_CS_5_HCO; 290 } 291 292 /* TBT3 supported by the CM */ 293 val &= ~ROUTER_CS_5_CNS; 294 295 return tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 296} 297 298/** 299 * usb4_switch_configuration_valid() - Set tunneling configuration to be valid 300 * @sw: USB4 router 301 * 302 * Sets configuration valid bit for the router. Must be called before 303 * any tunnels can be set through the router and after 304 * usb4_switch_setup() has been called. Can be called to host and device 305 * routers (does nothing for the latter). 306 * 307 * Returns %0 in success and negative errno otherwise. 308 */ 309int usb4_switch_configuration_valid(struct tb_switch *sw) 310{ 311 u32 val; 312 int ret; 313 314 if (!tb_route(sw)) 315 return 0; 316 317 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 318 if (ret) 319 return ret; 320 321 val |= ROUTER_CS_5_CV; 322 323 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 324 if (ret) 325 return ret; 326 327 return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR, 328 ROUTER_CS_6_CR, 50); 329} 330 331/** 332 * usb4_switch_read_uid() - Read UID from USB4 router 333 * @sw: USB4 router 334 * @uid: UID is stored here 335 * 336 * Reads 64-bit UID from USB4 router config space. 337 */ 338int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid) 339{ 340 return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2); 341} 342 343static int usb4_switch_drom_read_block(void *data, 344 unsigned int dwaddress, void *buf, 345 size_t dwords) 346{ 347 struct tb_switch *sw = data; 348 u8 status = 0; 349 u32 metadata; 350 int ret; 351 352 metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK; 353 metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) & 354 USB4_DROM_ADDRESS_MASK; 355 356 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_DROM_READ, &metadata, 357 &status, NULL, 0, buf, dwords); 358 if (ret) 359 return ret; 360 361 return status ? -EIO : 0; 362} 363 364/** 365 * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM 366 * @sw: USB4 router 367 * @address: Byte address inside DROM to start reading 368 * @buf: Buffer where the DROM content is stored 369 * @size: Number of bytes to read from DROM 370 * 371 * Uses USB4 router operations to read router DROM. For devices this 372 * should always work but for hosts it may return %-EOPNOTSUPP in which 373 * case the host router does not have DROM. 374 */ 375int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf, 376 size_t size) 377{ 378 return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES, 379 usb4_switch_drom_read_block, sw); 380} 381 382/** 383 * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding 384 * @sw: USB4 router 385 * 386 * Checks whether conditions are met so that lane bonding can be 387 * established with the upstream router. Call only for device routers. 388 */ 389bool usb4_switch_lane_bonding_possible(struct tb_switch *sw) 390{ 391 struct tb_port *up; 392 int ret; 393 u32 val; 394 395 up = tb_upstream_port(sw); 396 ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1); 397 if (ret) 398 return false; 399 400 return !!(val & PORT_CS_18_BE); 401} 402 403/** 404 * usb4_switch_set_wake() - Enabled/disable wake 405 * @sw: USB4 router 406 * @flags: Wakeup flags (%0 to disable) 407 * 408 * Enables/disables router to wake up from sleep. 409 */ 410int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags) 411{ 412 struct usb4_port *usb4; 413 struct tb_port *port; 414 u64 route = tb_route(sw); 415 u32 val; 416 int ret; 417 418 /* 419 * Enable wakes coming from all USB4 downstream ports (from 420 * child routers). For device routers do this also for the 421 * upstream USB4 port. 422 */ 423 tb_switch_for_each_port(sw, port) { 424 if (!tb_port_is_null(port)) 425 continue; 426 if (!route && tb_is_upstream_port(port)) 427 continue; 428 if (!port->cap_usb4) 429 continue; 430 431 ret = tb_port_read(port, &val, TB_CFG_PORT, 432 port->cap_usb4 + PORT_CS_19, 1); 433 if (ret) 434 return ret; 435 436 val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4); 437 438 if (tb_is_upstream_port(port)) { 439 val |= PORT_CS_19_WOU4; 440 } else { 441 bool configured = val & PORT_CS_19_PC; 442 usb4 = port->usb4; 443 444 if (((flags & TB_WAKE_ON_CONNECT) | 445 device_may_wakeup(&usb4->dev)) && !configured) 446 val |= PORT_CS_19_WOC; 447 if (((flags & TB_WAKE_ON_DISCONNECT) | 448 device_may_wakeup(&usb4->dev)) && configured) 449 val |= PORT_CS_19_WOD; 450 if ((flags & TB_WAKE_ON_USB4) && configured) 451 val |= PORT_CS_19_WOU4; 452 } 453 454 ret = tb_port_write(port, &val, TB_CFG_PORT, 455 port->cap_usb4 + PORT_CS_19, 1); 456 if (ret) 457 return ret; 458 } 459 460 /* 461 * Enable wakes from PCIe, USB 3.x and DP on this router. Only 462 * needed for device routers. 463 */ 464 if (route) { 465 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 466 if (ret) 467 return ret; 468 469 val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU | ROUTER_CS_5_WOD); 470 if (flags & TB_WAKE_ON_USB3) 471 val |= ROUTER_CS_5_WOU; 472 if (flags & TB_WAKE_ON_PCIE) 473 val |= ROUTER_CS_5_WOP; 474 if (flags & TB_WAKE_ON_DP) 475 val |= ROUTER_CS_5_WOD; 476 477 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 478 if (ret) 479 return ret; 480 } 481 482 return 0; 483} 484 485/** 486 * usb4_switch_set_sleep() - Prepare the router to enter sleep 487 * @sw: USB4 router 488 * 489 * Sets sleep bit for the router. Returns when the router sleep ready 490 * bit has been asserted. 491 */ 492int usb4_switch_set_sleep(struct tb_switch *sw) 493{ 494 int ret; 495 u32 val; 496 497 /* Set sleep bit and wait for sleep ready to be asserted */ 498 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 499 if (ret) 500 return ret; 501 502 val |= ROUTER_CS_5_SLP; 503 504 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 505 if (ret) 506 return ret; 507 508 return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR, 509 ROUTER_CS_6_SLPR, 500); 510} 511 512/** 513 * usb4_switch_nvm_sector_size() - Return router NVM sector size 514 * @sw: USB4 router 515 * 516 * If the router supports NVM operations this function returns the NVM 517 * sector size in bytes. If NVM operations are not supported returns 518 * %-EOPNOTSUPP. 519 */ 520int usb4_switch_nvm_sector_size(struct tb_switch *sw) 521{ 522 u32 metadata; 523 u8 status; 524 int ret; 525 526 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &metadata, 527 &status); 528 if (ret) 529 return ret; 530 531 if (status) 532 return status == 0x2 ? -EOPNOTSUPP : -EIO; 533 534 return metadata & USB4_NVM_SECTOR_SIZE_MASK; 535} 536 537static int usb4_switch_nvm_read_block(void *data, 538 unsigned int dwaddress, void *buf, size_t dwords) 539{ 540 struct tb_switch *sw = data; 541 u8 status = 0; 542 u32 metadata; 543 int ret; 544 545 metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) & 546 USB4_NVM_READ_LENGTH_MASK; 547 metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) & 548 USB4_NVM_READ_OFFSET_MASK; 549 550 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_READ, &metadata, 551 &status, NULL, 0, buf, dwords); 552 if (ret) 553 return ret; 554 555 return status ? -EIO : 0; 556} 557 558/** 559 * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM 560 * @sw: USB4 router 561 * @address: Starting address in bytes 562 * @buf: Read data is placed here 563 * @size: How many bytes to read 564 * 565 * Reads NVM contents of the router. If NVM is not supported returns 566 * %-EOPNOTSUPP. 567 */ 568int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, 569 size_t size) 570{ 571 return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES, 572 usb4_switch_nvm_read_block, sw); 573} 574 575/** 576 * usb4_switch_nvm_set_offset() - Set NVM write offset 577 * @sw: USB4 router 578 * @address: Start offset 579 * 580 * Explicitly sets NVM write offset. Normally when writing to NVM this 581 * is done automatically by usb4_switch_nvm_write(). 582 * 583 * Returns %0 in success and negative errno if there was a failure. 584 */ 585int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address) 586{ 587 u32 metadata, dwaddress; 588 u8 status = 0; 589 int ret; 590 591 dwaddress = address / 4; 592 metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) & 593 USB4_NVM_SET_OFFSET_MASK; 594 595 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &metadata, 596 &status); 597 if (ret) 598 return ret; 599 600 return status ? -EIO : 0; 601} 602 603static int usb4_switch_nvm_write_next_block(void *data, unsigned int dwaddress, 604 const void *buf, size_t dwords) 605{ 606 struct tb_switch *sw = data; 607 u8 status; 608 int ret; 609 610 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_WRITE, NULL, &status, 611 buf, dwords, NULL, 0); 612 if (ret) 613 return ret; 614 615 return status ? -EIO : 0; 616} 617 618/** 619 * usb4_switch_nvm_write() - Write to the router NVM 620 * @sw: USB4 router 621 * @address: Start address where to write in bytes 622 * @buf: Pointer to the data to write 623 * @size: Size of @buf in bytes 624 * 625 * Writes @buf to the router NVM using USB4 router operations. If NVM 626 * write is not supported returns %-EOPNOTSUPP. 627 */ 628int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address, 629 const void *buf, size_t size) 630{ 631 int ret; 632 633 ret = usb4_switch_nvm_set_offset(sw, address); 634 if (ret) 635 return ret; 636 637 return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES, 638 usb4_switch_nvm_write_next_block, sw); 639} 640 641/** 642 * usb4_switch_nvm_authenticate() - Authenticate new NVM 643 * @sw: USB4 router 644 * 645 * After the new NVM has been written via usb4_switch_nvm_write(), this 646 * function triggers NVM authentication process. The router gets power 647 * cycled and if the authentication is successful the new NVM starts 648 * running. In case of failure returns negative errno. 649 * 650 * The caller should call usb4_switch_nvm_authenticate_status() to read 651 * the status of the authentication after power cycle. It should be the 652 * first router operation to avoid the status being lost. 653 */ 654int usb4_switch_nvm_authenticate(struct tb_switch *sw) 655{ 656 int ret; 657 658 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, NULL, NULL); 659 switch (ret) { 660 /* 661 * The router is power cycled once NVM_AUTH is started so it is 662 * expected to get any of the following errors back. 663 */ 664 case -EACCES: 665 case -ENOTCONN: 666 case -ETIMEDOUT: 667 return 0; 668 669 default: 670 return ret; 671 } 672} 673 674/** 675 * usb4_switch_nvm_authenticate_status() - Read status of last NVM authenticate 676 * @sw: USB4 router 677 * @status: Status code of the operation 678 * 679 * The function checks if there is status available from the last NVM 680 * authenticate router operation. If there is status then %0 is returned 681 * and the status code is placed in @status. Returns negative errno in case 682 * of failure. 683 * 684 * Must be called before any other router operation. 685 */ 686int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status) 687{ 688 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 689 u16 opcode; 690 u32 val; 691 int ret; 692 693 if (cm_ops->usb4_switch_nvm_authenticate_status) { 694 ret = cm_ops->usb4_switch_nvm_authenticate_status(sw, status); 695 if (ret != -EOPNOTSUPP) 696 return ret; 697 } 698 699 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); 700 if (ret) 701 return ret; 702 703 /* Check that the opcode is correct */ 704 opcode = val & ROUTER_CS_26_OPCODE_MASK; 705 if (opcode == USB4_SWITCH_OP_NVM_AUTH) { 706 if (val & ROUTER_CS_26_OV) 707 return -EBUSY; 708 if (val & ROUTER_CS_26_ONS) 709 return -EOPNOTSUPP; 710 711 *status = (val & ROUTER_CS_26_STATUS_MASK) >> 712 ROUTER_CS_26_STATUS_SHIFT; 713 } else { 714 *status = 0; 715 } 716 717 return 0; 718} 719 720/** 721 * usb4_switch_credits_init() - Read buffer allocation parameters 722 * @sw: USB4 router 723 * 724 * Reads @sw buffer allocation parameters and initializes @sw buffer 725 * allocation fields accordingly. Specifically @sw->credits_allocation 726 * is set to %true if these parameters can be used in tunneling. 727 * 728 * Returns %0 on success and negative errno otherwise. 729 */ 730int usb4_switch_credits_init(struct tb_switch *sw) 731{ 732 int max_usb3, min_dp_aux, min_dp_main, max_pcie, max_dma; 733 int ret, length, i, nports; 734 const struct tb_port *port; 735 u32 data[USB4_DATA_DWORDS]; 736 u32 metadata = 0; 737 u8 status = 0; 738 739 memset(data, 0, sizeof(data)); 740 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_BUFFER_ALLOC, &metadata, 741 &status, NULL, 0, data, ARRAY_SIZE(data)); 742 if (ret) 743 return ret; 744 if (status) 745 return -EIO; 746 747 length = metadata & USB4_BA_LENGTH_MASK; 748 if (WARN_ON(length > ARRAY_SIZE(data))) 749 return -EMSGSIZE; 750 751 max_usb3 = -1; 752 min_dp_aux = -1; 753 min_dp_main = -1; 754 max_pcie = -1; 755 max_dma = -1; 756 757 tb_sw_dbg(sw, "credit allocation parameters:\n"); 758 759 for (i = 0; i < length; i++) { 760 u16 index, value; 761 762 index = data[i] & USB4_BA_INDEX_MASK; 763 value = (data[i] & USB4_BA_VALUE_MASK) >> USB4_BA_VALUE_SHIFT; 764 765 switch (index) { 766 case USB4_BA_MAX_USB3: 767 tb_sw_dbg(sw, " USB3: %u\n", value); 768 max_usb3 = value; 769 break; 770 case USB4_BA_MIN_DP_AUX: 771 tb_sw_dbg(sw, " DP AUX: %u\n", value); 772 min_dp_aux = value; 773 break; 774 case USB4_BA_MIN_DP_MAIN: 775 tb_sw_dbg(sw, " DP main: %u\n", value); 776 min_dp_main = value; 777 break; 778 case USB4_BA_MAX_PCIE: 779 tb_sw_dbg(sw, " PCIe: %u\n", value); 780 max_pcie = value; 781 break; 782 case USB4_BA_MAX_HI: 783 tb_sw_dbg(sw, " DMA: %u\n", value); 784 max_dma = value; 785 break; 786 default: 787 tb_sw_dbg(sw, " unknown credit allocation index %#x, skipping\n", 788 index); 789 break; 790 } 791 } 792 793 /* 794 * Validate the buffer allocation preferences. If we find 795 * issues, log a warning and fall back using the hard-coded 796 * values. 797 */ 798 799 /* Host router must report baMaxHI */ 800 if (!tb_route(sw) && max_dma < 0) { 801 tb_sw_warn(sw, "host router is missing baMaxHI\n"); 802 goto err_invalid; 803 } 804 805 nports = 0; 806 tb_switch_for_each_port(sw, port) { 807 if (tb_port_is_null(port)) 808 nports++; 809 } 810 811 /* Must have DP buffer allocation (multiple USB4 ports) */ 812 if (nports > 2 && (min_dp_aux < 0 || min_dp_main < 0)) { 813 tb_sw_warn(sw, "multiple USB4 ports require baMinDPaux/baMinDPmain\n"); 814 goto err_invalid; 815 } 816 817 tb_switch_for_each_port(sw, port) { 818 if (tb_port_is_dpout(port) && min_dp_main < 0) { 819 tb_sw_warn(sw, "missing baMinDPmain"); 820 goto err_invalid; 821 } 822 if ((tb_port_is_dpin(port) || tb_port_is_dpout(port)) && 823 min_dp_aux < 0) { 824 tb_sw_warn(sw, "missing baMinDPaux"); 825 goto err_invalid; 826 } 827 if ((tb_port_is_usb3_down(port) || tb_port_is_usb3_up(port)) && 828 max_usb3 < 0) { 829 tb_sw_warn(sw, "missing baMaxUSB3"); 830 goto err_invalid; 831 } 832 if ((tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) && 833 max_pcie < 0) { 834 tb_sw_warn(sw, "missing baMaxPCIe"); 835 goto err_invalid; 836 } 837 } 838 839 /* 840 * Buffer allocation passed the validation so we can use it in 841 * path creation. 842 */ 843 sw->credit_allocation = true; 844 if (max_usb3 > 0) 845 sw->max_usb3_credits = max_usb3; 846 if (min_dp_aux > 0) 847 sw->min_dp_aux_credits = min_dp_aux; 848 if (min_dp_main > 0) 849 sw->min_dp_main_credits = min_dp_main; 850 if (max_pcie > 0) 851 sw->max_pcie_credits = max_pcie; 852 if (max_dma > 0) 853 sw->max_dma_credits = max_dma; 854 855 return 0; 856 857err_invalid: 858 return -EINVAL; 859} 860 861/** 862 * usb4_switch_query_dp_resource() - Query availability of DP IN resource 863 * @sw: USB4 router 864 * @in: DP IN adapter 865 * 866 * For DP tunneling this function can be used to query availability of 867 * DP IN resource. Returns true if the resource is available for DP 868 * tunneling, false otherwise. 869 */ 870bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) 871{ 872 u32 metadata = in->port; 873 u8 status; 874 int ret; 875 876 ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &metadata, 877 &status); 878 /* 879 * If DP resource allocation is not supported assume it is 880 * always available. 881 */ 882 if (ret == -EOPNOTSUPP) 883 return true; 884 if (ret) 885 return false; 886 887 return !status; 888} 889 890/** 891 * usb4_switch_alloc_dp_resource() - Allocate DP IN resource 892 * @sw: USB4 router 893 * @in: DP IN adapter 894 * 895 * Allocates DP IN resource for DP tunneling using USB4 router 896 * operations. If the resource was allocated returns %0. Otherwise 897 * returns negative errno, in particular %-EBUSY if the resource is 898 * already allocated. 899 */ 900int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 901{ 902 u32 metadata = in->port; 903 u8 status; 904 int ret; 905 906 ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &metadata, 907 &status); 908 if (ret == -EOPNOTSUPP) 909 return 0; 910 if (ret) 911 return ret; 912 913 return status ? -EBUSY : 0; 914} 915 916/** 917 * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource 918 * @sw: USB4 router 919 * @in: DP IN adapter 920 * 921 * Releases the previously allocated DP IN resource. 922 */ 923int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 924{ 925 u32 metadata = in->port; 926 u8 status; 927 int ret; 928 929 ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &metadata, 930 &status); 931 if (ret == -EOPNOTSUPP) 932 return 0; 933 if (ret) 934 return ret; 935 936 return status ? -EIO : 0; 937} 938 939static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port) 940{ 941 struct tb_port *p; 942 int usb4_idx = 0; 943 944 /* Assume port is primary */ 945 tb_switch_for_each_port(sw, p) { 946 if (!tb_port_is_null(p)) 947 continue; 948 if (tb_is_upstream_port(p)) 949 continue; 950 if (!p->link_nr) { 951 if (p == port) 952 break; 953 usb4_idx++; 954 } 955 } 956 957 return usb4_idx; 958} 959 960/** 961 * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter 962 * @sw: USB4 router 963 * @port: USB4 port 964 * 965 * USB4 routers have direct mapping between USB4 ports and PCIe 966 * downstream adapters where the PCIe topology is extended. This 967 * function returns the corresponding downstream PCIe adapter or %NULL 968 * if no such mapping was possible. 969 */ 970struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw, 971 const struct tb_port *port) 972{ 973 int usb4_idx = usb4_port_idx(sw, port); 974 struct tb_port *p; 975 int pcie_idx = 0; 976 977 /* Find PCIe down port matching usb4_port */ 978 tb_switch_for_each_port(sw, p) { 979 if (!tb_port_is_pcie_down(p)) 980 continue; 981 982 if (pcie_idx == usb4_idx) 983 return p; 984 985 pcie_idx++; 986 } 987 988 return NULL; 989} 990 991/** 992 * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter 993 * @sw: USB4 router 994 * @port: USB4 port 995 * 996 * USB4 routers have direct mapping between USB4 ports and USB 3.x 997 * downstream adapters where the USB 3.x topology is extended. This 998 * function returns the corresponding downstream USB 3.x adapter or 999 * %NULL if no such mapping was possible. 1000 */ 1001struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw, 1002 const struct tb_port *port) 1003{ 1004 int usb4_idx = usb4_port_idx(sw, port); 1005 struct tb_port *p; 1006 int usb_idx = 0; 1007 1008 /* Find USB3 down port matching usb4_port */ 1009 tb_switch_for_each_port(sw, p) { 1010 if (!tb_port_is_usb3_down(p)) 1011 continue; 1012 1013 if (usb_idx == usb4_idx) 1014 return p; 1015 1016 usb_idx++; 1017 } 1018 1019 return NULL; 1020} 1021 1022/** 1023 * usb4_switch_add_ports() - Add USB4 ports for this router 1024 * @sw: USB4 router 1025 * 1026 * For USB4 router finds all USB4 ports and registers devices for each. 1027 * Can be called to any router. 1028 * 1029 * Return %0 in case of success and negative errno in case of failure. 1030 */ 1031int usb4_switch_add_ports(struct tb_switch *sw) 1032{ 1033 struct tb_port *port; 1034 1035 if (tb_switch_is_icm(sw) || !tb_switch_is_usb4(sw)) 1036 return 0; 1037 1038 tb_switch_for_each_port(sw, port) { 1039 struct usb4_port *usb4; 1040 1041 if (!tb_port_is_null(port)) 1042 continue; 1043 if (!port->cap_usb4) 1044 continue; 1045 1046 usb4 = usb4_port_device_add(port); 1047 if (IS_ERR(usb4)) { 1048 usb4_switch_remove_ports(sw); 1049 return PTR_ERR(usb4); 1050 } 1051 1052 port->usb4 = usb4; 1053 } 1054 1055 return 0; 1056} 1057 1058/** 1059 * usb4_switch_remove_ports() - Removes USB4 ports from this router 1060 * @sw: USB4 router 1061 * 1062 * Unregisters previously registered USB4 ports. 1063 */ 1064void usb4_switch_remove_ports(struct tb_switch *sw) 1065{ 1066 struct tb_port *port; 1067 1068 tb_switch_for_each_port(sw, port) { 1069 if (port->usb4) { 1070 usb4_port_device_remove(port->usb4); 1071 port->usb4 = NULL; 1072 } 1073 } 1074} 1075 1076/** 1077 * usb4_port_unlock() - Unlock USB4 downstream port 1078 * @port: USB4 port to unlock 1079 * 1080 * Unlocks USB4 downstream port so that the connection manager can 1081 * access the router below this port. 1082 */ 1083int usb4_port_unlock(struct tb_port *port) 1084{ 1085 int ret; 1086 u32 val; 1087 1088 ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1); 1089 if (ret) 1090 return ret; 1091 1092 val &= ~ADP_CS_4_LCK; 1093 return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1); 1094} 1095 1096/** 1097 * usb4_port_hotplug_enable() - Enables hotplug for a port 1098 * @port: USB4 port to operate on 1099 * 1100 * Enables hot plug events on a given port. This is only intended 1101 * to be used on lane, DP-IN, and DP-OUT adapters. 1102 */ 1103int usb4_port_hotplug_enable(struct tb_port *port) 1104{ 1105 int ret; 1106 u32 val; 1107 1108 ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_5, 1); 1109 if (ret) 1110 return ret; 1111 1112 val &= ~ADP_CS_5_DHP; 1113 return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1); 1114} 1115 1116static int usb4_port_set_configured(struct tb_port *port, bool configured) 1117{ 1118 int ret; 1119 u32 val; 1120 1121 if (!port->cap_usb4) 1122 return -EINVAL; 1123 1124 ret = tb_port_read(port, &val, TB_CFG_PORT, 1125 port->cap_usb4 + PORT_CS_19, 1); 1126 if (ret) 1127 return ret; 1128 1129 if (configured) 1130 val |= PORT_CS_19_PC; 1131 else 1132 val &= ~PORT_CS_19_PC; 1133 1134 return tb_port_write(port, &val, TB_CFG_PORT, 1135 port->cap_usb4 + PORT_CS_19, 1); 1136} 1137 1138/** 1139 * usb4_port_configure() - Set USB4 port configured 1140 * @port: USB4 router 1141 * 1142 * Sets the USB4 link to be configured for power management purposes. 1143 */ 1144int usb4_port_configure(struct tb_port *port) 1145{ 1146 return usb4_port_set_configured(port, true); 1147} 1148 1149/** 1150 * usb4_port_unconfigure() - Set USB4 port unconfigured 1151 * @port: USB4 router 1152 * 1153 * Sets the USB4 link to be unconfigured for power management purposes. 1154 */ 1155void usb4_port_unconfigure(struct tb_port *port) 1156{ 1157 usb4_port_set_configured(port, false); 1158} 1159 1160static int usb4_set_xdomain_configured(struct tb_port *port, bool configured) 1161{ 1162 int ret; 1163 u32 val; 1164 1165 if (!port->cap_usb4) 1166 return -EINVAL; 1167 1168 ret = tb_port_read(port, &val, TB_CFG_PORT, 1169 port->cap_usb4 + PORT_CS_19, 1); 1170 if (ret) 1171 return ret; 1172 1173 if (configured) 1174 val |= PORT_CS_19_PID; 1175 else 1176 val &= ~PORT_CS_19_PID; 1177 1178 return tb_port_write(port, &val, TB_CFG_PORT, 1179 port->cap_usb4 + PORT_CS_19, 1); 1180} 1181 1182/** 1183 * usb4_port_configure_xdomain() - Configure port for XDomain 1184 * @port: USB4 port connected to another host 1185 * @xd: XDomain that is connected to the port 1186 * 1187 * Marks the USB4 port as being connected to another host and updates 1188 * the link type. Returns %0 in success and negative errno in failure. 1189 */ 1190int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd) 1191{ 1192 xd->link_usb4 = link_is_usb4(port); 1193 return usb4_set_xdomain_configured(port, true); 1194} 1195 1196/** 1197 * usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain 1198 * @port: USB4 port that was connected to another host 1199 * 1200 * Clears USB4 port from being marked as XDomain. 1201 */ 1202void usb4_port_unconfigure_xdomain(struct tb_port *port) 1203{ 1204 usb4_set_xdomain_configured(port, false); 1205} 1206 1207static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit, 1208 u32 value, int timeout_msec) 1209{ 1210 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); 1211 1212 do { 1213 u32 val; 1214 int ret; 1215 1216 ret = tb_port_read(port, &val, TB_CFG_PORT, offset, 1); 1217 if (ret) 1218 return ret; 1219 1220 if ((val & bit) == value) 1221 return 0; 1222 1223 usleep_range(50, 100); 1224 } while (ktime_before(ktime_get(), timeout)); 1225 1226 return -ETIMEDOUT; 1227} 1228 1229static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords) 1230{ 1231 if (dwords > USB4_DATA_DWORDS) 1232 return -EINVAL; 1233 1234 return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2, 1235 dwords); 1236} 1237 1238static int usb4_port_write_data(struct tb_port *port, const void *data, 1239 size_t dwords) 1240{ 1241 if (dwords > USB4_DATA_DWORDS) 1242 return -EINVAL; 1243 1244 return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2, 1245 dwords); 1246} 1247 1248static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target, 1249 u8 index, u8 reg, void *buf, u8 size) 1250{ 1251 size_t dwords = DIV_ROUND_UP(size, 4); 1252 int ret; 1253 u32 val; 1254 1255 if (!port->cap_usb4) 1256 return -EINVAL; 1257 1258 val = reg; 1259 val |= size << PORT_CS_1_LENGTH_SHIFT; 1260 val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK; 1261 if (target == USB4_SB_TARGET_RETIMER) 1262 val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT); 1263 val |= PORT_CS_1_PND; 1264 1265 ret = tb_port_write(port, &val, TB_CFG_PORT, 1266 port->cap_usb4 + PORT_CS_1, 1); 1267 if (ret) 1268 return ret; 1269 1270 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1, 1271 PORT_CS_1_PND, 0, 500); 1272 if (ret) 1273 return ret; 1274 1275 ret = tb_port_read(port, &val, TB_CFG_PORT, 1276 port->cap_usb4 + PORT_CS_1, 1); 1277 if (ret) 1278 return ret; 1279 1280 if (val & PORT_CS_1_NR) 1281 return -ENODEV; 1282 if (val & PORT_CS_1_RC) 1283 return -EIO; 1284 1285 return buf ? usb4_port_read_data(port, buf, dwords) : 0; 1286} 1287 1288static int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target, 1289 u8 index, u8 reg, const void *buf, u8 size) 1290{ 1291 size_t dwords = DIV_ROUND_UP(size, 4); 1292 int ret; 1293 u32 val; 1294 1295 if (!port->cap_usb4) 1296 return -EINVAL; 1297 1298 if (buf) { 1299 ret = usb4_port_write_data(port, buf, dwords); 1300 if (ret) 1301 return ret; 1302 } 1303 1304 val = reg; 1305 val |= size << PORT_CS_1_LENGTH_SHIFT; 1306 val |= PORT_CS_1_WNR_WRITE; 1307 val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK; 1308 if (target == USB4_SB_TARGET_RETIMER) 1309 val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT); 1310 val |= PORT_CS_1_PND; 1311 1312 ret = tb_port_write(port, &val, TB_CFG_PORT, 1313 port->cap_usb4 + PORT_CS_1, 1); 1314 if (ret) 1315 return ret; 1316 1317 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1, 1318 PORT_CS_1_PND, 0, 500); 1319 if (ret) 1320 return ret; 1321 1322 ret = tb_port_read(port, &val, TB_CFG_PORT, 1323 port->cap_usb4 + PORT_CS_1, 1); 1324 if (ret) 1325 return ret; 1326 1327 if (val & PORT_CS_1_NR) 1328 return -ENODEV; 1329 if (val & PORT_CS_1_RC) 1330 return -EIO; 1331 1332 return 0; 1333} 1334 1335static int usb4_port_sb_opcode_err_to_errno(u32 val) 1336{ 1337 switch (val) { 1338 case 0: 1339 return 0; 1340 case USB4_SB_OPCODE_ERR: 1341 return -EAGAIN; 1342 case USB4_SB_OPCODE_ONS: 1343 return -EOPNOTSUPP; 1344 default: 1345 return -EIO; 1346 } 1347} 1348 1349static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target, 1350 u8 index, enum usb4_sb_opcode opcode, int timeout_msec) 1351{ 1352 ktime_t timeout; 1353 u32 val; 1354 int ret; 1355 1356 val = opcode; 1357 ret = usb4_port_sb_write(port, target, index, USB4_SB_OPCODE, &val, 1358 sizeof(val)); 1359 if (ret) 1360 return ret; 1361 1362 timeout = ktime_add_ms(ktime_get(), timeout_msec); 1363 1364 do { 1365 /* Check results */ 1366 ret = usb4_port_sb_read(port, target, index, USB4_SB_OPCODE, 1367 &val, sizeof(val)); 1368 if (ret) 1369 return ret; 1370 1371 if (val != opcode) 1372 return usb4_port_sb_opcode_err_to_errno(val); 1373 } while (ktime_before(ktime_get(), timeout)); 1374 1375 return -ETIMEDOUT; 1376} 1377 1378static int usb4_port_set_router_offline(struct tb_port *port, bool offline) 1379{ 1380 u32 val = !offline; 1381 int ret; 1382 1383 ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, 1384 USB4_SB_METADATA, &val, sizeof(val)); 1385 if (ret) 1386 return ret; 1387 1388 val = USB4_SB_OPCODE_ROUTER_OFFLINE; 1389 return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, 1390 USB4_SB_OPCODE, &val, sizeof(val)); 1391} 1392 1393/** 1394 * usb4_port_router_offline() - Put the USB4 port to offline mode 1395 * @port: USB4 port 1396 * 1397 * This function puts the USB4 port into offline mode. In this mode the 1398 * port does not react on hotplug events anymore. This needs to be 1399 * called before retimer access is done when the USB4 links is not up. 1400 * 1401 * Returns %0 in case of success and negative errno if there was an 1402 * error. 1403 */ 1404int usb4_port_router_offline(struct tb_port *port) 1405{ 1406 return usb4_port_set_router_offline(port, true); 1407} 1408 1409/** 1410 * usb4_port_router_online() - Put the USB4 port back to online 1411 * @port: USB4 port 1412 * 1413 * Makes the USB4 port functional again. 1414 */ 1415int usb4_port_router_online(struct tb_port *port) 1416{ 1417 return usb4_port_set_router_offline(port, false); 1418} 1419 1420/** 1421 * usb4_port_enumerate_retimers() - Send RT broadcast transaction 1422 * @port: USB4 port 1423 * 1424 * This forces the USB4 port to send broadcast RT transaction which 1425 * makes the retimers on the link to assign index to themselves. Returns 1426 * %0 in case of success and negative errno if there was an error. 1427 */ 1428int usb4_port_enumerate_retimers(struct tb_port *port) 1429{ 1430 u32 val; 1431 1432 val = USB4_SB_OPCODE_ENUMERATE_RETIMERS; 1433 return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, 1434 USB4_SB_OPCODE, &val, sizeof(val)); 1435} 1436 1437/** 1438 * usb4_port_clx_supported() - Check if CLx is supported by the link 1439 * @port: Port to check for CLx support for 1440 * 1441 * PORT_CS_18_CPS bit reflects if the link supports CLx including 1442 * active cables (if connected on the link). 1443 */ 1444bool usb4_port_clx_supported(struct tb_port *port) 1445{ 1446 int ret; 1447 u32 val; 1448 1449 ret = tb_port_read(port, &val, TB_CFG_PORT, 1450 port->cap_usb4 + PORT_CS_18, 1); 1451 if (ret) 1452 return false; 1453 1454 return !!(val & PORT_CS_18_CPS); 1455} 1456 1457/** 1458 * usb4_port_margining_caps() - Read USB4 port marginig capabilities 1459 * @port: USB4 port 1460 * @caps: Array with at least two elements to hold the results 1461 * 1462 * Reads the USB4 port lane margining capabilities into @caps. 1463 */ 1464int usb4_port_margining_caps(struct tb_port *port, u32 *caps) 1465{ 1466 int ret; 1467 1468 ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0, 1469 USB4_SB_OPCODE_READ_LANE_MARGINING_CAP, 500); 1470 if (ret) 1471 return ret; 1472 1473 return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0, 1474 USB4_SB_DATA, caps, sizeof(*caps) * 2); 1475} 1476 1477/** 1478 * usb4_port_hw_margin() - Run hardware lane margining on port 1479 * @port: USB4 port 1480 * @lanes: Which lanes to run (must match the port capabilities). Can be 1481 * %0, %1 or %7. 1482 * @ber_level: BER level contour value 1483 * @timing: Perform timing margining instead of voltage 1484 * @right_high: Use Right/high margin instead of left/low 1485 * @results: Array with at least two elements to hold the results 1486 * 1487 * Runs hardware lane margining on USB4 port and returns the result in 1488 * @results. 1489 */ 1490int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes, 1491 unsigned int ber_level, bool timing, bool right_high, 1492 u32 *results) 1493{ 1494 u32 val; 1495 int ret; 1496 1497 val = lanes; 1498 if (timing) 1499 val |= USB4_MARGIN_HW_TIME; 1500 if (right_high) 1501 val |= USB4_MARGIN_HW_RH; 1502 if (ber_level) 1503 val |= (ber_level << USB4_MARGIN_HW_BER_SHIFT) & 1504 USB4_MARGIN_HW_BER_MASK; 1505 1506 ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, 1507 USB4_SB_METADATA, &val, sizeof(val)); 1508 if (ret) 1509 return ret; 1510 1511 ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0, 1512 USB4_SB_OPCODE_RUN_HW_LANE_MARGINING, 2500); 1513 if (ret) 1514 return ret; 1515 1516 return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0, 1517 USB4_SB_DATA, results, sizeof(*results) * 2); 1518} 1519 1520/** 1521 * usb4_port_sw_margin() - Run software lane margining on port 1522 * @port: USB4 port 1523 * @lanes: Which lanes to run (must match the port capabilities). Can be 1524 * %0, %1 or %7. 1525 * @timing: Perform timing margining instead of voltage 1526 * @right_high: Use Right/high margin instead of left/low 1527 * @counter: What to do with the error counter 1528 * 1529 * Runs software lane margining on USB4 port. Read back the error 1530 * counters by calling usb4_port_sw_margin_errors(). Returns %0 in 1531 * success and negative errno otherwise. 1532 */ 1533int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing, 1534 bool right_high, u32 counter) 1535{ 1536 u32 val; 1537 int ret; 1538 1539 val = lanes; 1540 if (timing) 1541 val |= USB4_MARGIN_SW_TIME; 1542 if (right_high) 1543 val |= USB4_MARGIN_SW_RH; 1544 val |= (counter << USB4_MARGIN_SW_COUNTER_SHIFT) & 1545 USB4_MARGIN_SW_COUNTER_MASK; 1546 1547 ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, 1548 USB4_SB_METADATA, &val, sizeof(val)); 1549 if (ret) 1550 return ret; 1551 1552 return usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0, 1553 USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500); 1554} 1555 1556/** 1557 * usb4_port_sw_margin_errors() - Read the software margining error counters 1558 * @port: USB4 port 1559 * @errors: Error metadata is copied here. 1560 * 1561 * This reads back the software margining error counters from the port. 1562 * Returns %0 in success and negative errno otherwise. 1563 */ 1564int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors) 1565{ 1566 int ret; 1567 1568 ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0, 1569 USB4_SB_OPCODE_READ_SW_MARGIN_ERR, 150); 1570 if (ret) 1571 return ret; 1572 1573 return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0, 1574 USB4_SB_METADATA, errors, sizeof(*errors)); 1575} 1576 1577static inline int usb4_port_retimer_op(struct tb_port *port, u8 index, 1578 enum usb4_sb_opcode opcode, 1579 int timeout_msec) 1580{ 1581 return usb4_port_sb_op(port, USB4_SB_TARGET_RETIMER, index, opcode, 1582 timeout_msec); 1583} 1584 1585/** 1586 * usb4_port_retimer_set_inbound_sbtx() - Enable sideband channel transactions 1587 * @port: USB4 port 1588 * @index: Retimer index 1589 * 1590 * Enables sideband channel transations on SBTX. Can be used when USB4 1591 * link does not go up, for example if there is no device connected. 1592 */ 1593int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index) 1594{ 1595 int ret; 1596 1597 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX, 1598 500); 1599 1600 if (ret != -ENODEV) 1601 return ret; 1602 1603 /* 1604 * Per the USB4 retimer spec, the retimer is not required to 1605 * send an RT (Retimer Transaction) response for the first 1606 * SET_INBOUND_SBTX command 1607 */ 1608 return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX, 1609 500); 1610} 1611 1612/** 1613 * usb4_port_retimer_unset_inbound_sbtx() - Disable sideband channel transactions 1614 * @port: USB4 port 1615 * @index: Retimer index 1616 * 1617 * Disables sideband channel transations on SBTX. The reverse of 1618 * usb4_port_retimer_set_inbound_sbtx(). 1619 */ 1620int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index) 1621{ 1622 return usb4_port_retimer_op(port, index, 1623 USB4_SB_OPCODE_UNSET_INBOUND_SBTX, 500); 1624} 1625 1626/** 1627 * usb4_port_retimer_read() - Read from retimer sideband registers 1628 * @port: USB4 port 1629 * @index: Retimer index 1630 * @reg: Sideband register to read 1631 * @buf: Data from @reg is stored here 1632 * @size: Number of bytes to read 1633 * 1634 * Function reads retimer sideband registers starting from @reg. The 1635 * retimer is connected to @port at @index. Returns %0 in case of 1636 * success, and read data is copied to @buf. If there is no retimer 1637 * present at given @index returns %-ENODEV. In any other failure 1638 * returns negative errno. 1639 */ 1640int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf, 1641 u8 size) 1642{ 1643 return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, reg, buf, 1644 size); 1645} 1646 1647/** 1648 * usb4_port_retimer_write() - Write to retimer sideband registers 1649 * @port: USB4 port 1650 * @index: Retimer index 1651 * @reg: Sideband register to write 1652 * @buf: Data that is written starting from @reg 1653 * @size: Number of bytes to write 1654 * 1655 * Writes retimer sideband registers starting from @reg. The retimer is 1656 * connected to @port at @index. Returns %0 in case of success. If there 1657 * is no retimer present at given @index returns %-ENODEV. In any other 1658 * failure returns negative errno. 1659 */ 1660int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg, 1661 const void *buf, u8 size) 1662{ 1663 return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, reg, buf, 1664 size); 1665} 1666 1667/** 1668 * usb4_port_retimer_is_last() - Is the retimer last on-board retimer 1669 * @port: USB4 port 1670 * @index: Retimer index 1671 * 1672 * If the retimer at @index is last one (connected directly to the 1673 * Type-C port) this function returns %1. If it is not returns %0. If 1674 * the retimer is not present returns %-ENODEV. Otherwise returns 1675 * negative errno. 1676 */ 1677int usb4_port_retimer_is_last(struct tb_port *port, u8 index) 1678{ 1679 u32 metadata; 1680 int ret; 1681 1682 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER, 1683 500); 1684 if (ret) 1685 return ret; 1686 1687 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata, 1688 sizeof(metadata)); 1689 return ret ? ret : metadata & 1; 1690} 1691 1692/** 1693 * usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size 1694 * @port: USB4 port 1695 * @index: Retimer index 1696 * 1697 * Reads NVM sector size (in bytes) of a retimer at @index. This 1698 * operation can be used to determine whether the retimer supports NVM 1699 * upgrade for example. Returns sector size in bytes or negative errno 1700 * in case of error. Specifically returns %-ENODEV if there is no 1701 * retimer at @index. 1702 */ 1703int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index) 1704{ 1705 u32 metadata; 1706 int ret; 1707 1708 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE, 1709 500); 1710 if (ret) 1711 return ret; 1712 1713 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata, 1714 sizeof(metadata)); 1715 return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK; 1716} 1717 1718/** 1719 * usb4_port_retimer_nvm_set_offset() - Set NVM write offset 1720 * @port: USB4 port 1721 * @index: Retimer index 1722 * @address: Start offset 1723 * 1724 * Exlicitly sets NVM write offset. Normally when writing to NVM this is 1725 * done automatically by usb4_port_retimer_nvm_write(). 1726 * 1727 * Returns %0 in success and negative errno if there was a failure. 1728 */ 1729int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index, 1730 unsigned int address) 1731{ 1732 u32 metadata, dwaddress; 1733 int ret; 1734 1735 dwaddress = address / 4; 1736 metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) & 1737 USB4_NVM_SET_OFFSET_MASK; 1738 1739 ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata, 1740 sizeof(metadata)); 1741 if (ret) 1742 return ret; 1743 1744 return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_SET_OFFSET, 1745 500); 1746} 1747 1748struct retimer_info { 1749 struct tb_port *port; 1750 u8 index; 1751}; 1752 1753static int usb4_port_retimer_nvm_write_next_block(void *data, 1754 unsigned int dwaddress, const void *buf, size_t dwords) 1755 1756{ 1757 const struct retimer_info *info = data; 1758 struct tb_port *port = info->port; 1759 u8 index = info->index; 1760 int ret; 1761 1762 ret = usb4_port_retimer_write(port, index, USB4_SB_DATA, 1763 buf, dwords * 4); 1764 if (ret) 1765 return ret; 1766 1767 return usb4_port_retimer_op(port, index, 1768 USB4_SB_OPCODE_NVM_BLOCK_WRITE, 1000); 1769} 1770 1771/** 1772 * usb4_port_retimer_nvm_write() - Write to retimer NVM 1773 * @port: USB4 port 1774 * @index: Retimer index 1775 * @address: Byte address where to start the write 1776 * @buf: Data to write 1777 * @size: Size in bytes how much to write 1778 * 1779 * Writes @size bytes from @buf to the retimer NVM. Used for NVM 1780 * upgrade. Returns %0 if the data was written successfully and negative 1781 * errno in case of failure. Specifically returns %-ENODEV if there is 1782 * no retimer at @index. 1783 */ 1784int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address, 1785 const void *buf, size_t size) 1786{ 1787 struct retimer_info info = { .port = port, .index = index }; 1788 int ret; 1789 1790 ret = usb4_port_retimer_nvm_set_offset(port, index, address); 1791 if (ret) 1792 return ret; 1793 1794 return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES, 1795 usb4_port_retimer_nvm_write_next_block, &info); 1796} 1797 1798/** 1799 * usb4_port_retimer_nvm_authenticate() - Start retimer NVM upgrade 1800 * @port: USB4 port 1801 * @index: Retimer index 1802 * 1803 * After the new NVM image has been written via usb4_port_retimer_nvm_write() 1804 * this function can be used to trigger the NVM upgrade process. If 1805 * successful the retimer restarts with the new NVM and may not have the 1806 * index set so one needs to call usb4_port_enumerate_retimers() to 1807 * force index to be assigned. 1808 */ 1809int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index) 1810{ 1811 u32 val; 1812 1813 /* 1814 * We need to use the raw operation here because once the 1815 * authentication completes the retimer index is not set anymore 1816 * so we do not get back the status now. 1817 */ 1818 val = USB4_SB_OPCODE_NVM_AUTH_WRITE; 1819 return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, 1820 USB4_SB_OPCODE, &val, sizeof(val)); 1821} 1822 1823/** 1824 * usb4_port_retimer_nvm_authenticate_status() - Read status of NVM upgrade 1825 * @port: USB4 port 1826 * @index: Retimer index 1827 * @status: Raw status code read from metadata 1828 * 1829 * This can be called after usb4_port_retimer_nvm_authenticate() and 1830 * usb4_port_enumerate_retimers() to fetch status of the NVM upgrade. 1831 * 1832 * Returns %0 if the authentication status was successfully read. The 1833 * completion metadata (the result) is then stored into @status. If 1834 * reading the status fails, returns negative errno. 1835 */ 1836int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index, 1837 u32 *status) 1838{ 1839 u32 metadata, val; 1840 int ret; 1841 1842 ret = usb4_port_retimer_read(port, index, USB4_SB_OPCODE, &val, 1843 sizeof(val)); 1844 if (ret) 1845 return ret; 1846 1847 ret = usb4_port_sb_opcode_err_to_errno(val); 1848 switch (ret) { 1849 case 0: 1850 *status = 0; 1851 return 0; 1852 1853 case -EAGAIN: 1854 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, 1855 &metadata, sizeof(metadata)); 1856 if (ret) 1857 return ret; 1858 1859 *status = metadata & USB4_SB_METADATA_NVM_AUTH_WRITE_MASK; 1860 return 0; 1861 1862 default: 1863 return ret; 1864 } 1865} 1866 1867static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress, 1868 void *buf, size_t dwords) 1869{ 1870 const struct retimer_info *info = data; 1871 struct tb_port *port = info->port; 1872 u8 index = info->index; 1873 u32 metadata; 1874 int ret; 1875 1876 metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT; 1877 if (dwords < USB4_DATA_DWORDS) 1878 metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT; 1879 1880 ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata, 1881 sizeof(metadata)); 1882 if (ret) 1883 return ret; 1884 1885 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_READ, 500); 1886 if (ret) 1887 return ret; 1888 1889 return usb4_port_retimer_read(port, index, USB4_SB_DATA, buf, 1890 dwords * 4); 1891} 1892 1893/** 1894 * usb4_port_retimer_nvm_read() - Read contents of retimer NVM 1895 * @port: USB4 port 1896 * @index: Retimer index 1897 * @address: NVM address (in bytes) to start reading 1898 * @buf: Data read from NVM is stored here 1899 * @size: Number of bytes to read 1900 * 1901 * Reads retimer NVM and copies the contents to @buf. Returns %0 if the 1902 * read was successful and negative errno in case of failure. 1903 * Specifically returns %-ENODEV if there is no retimer at @index. 1904 */ 1905int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index, 1906 unsigned int address, void *buf, size_t size) 1907{ 1908 struct retimer_info info = { .port = port, .index = index }; 1909 1910 return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES, 1911 usb4_port_retimer_nvm_read_block, &info); 1912} 1913 1914static inline unsigned int 1915usb4_usb3_port_max_bandwidth(const struct tb_port *port, unsigned int bw) 1916{ 1917 /* Take the possible bandwidth limitation into account */ 1918 if (port->max_bw) 1919 return min(bw, port->max_bw); 1920 return bw; 1921} 1922 1923/** 1924 * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate 1925 * @port: USB3 adapter port 1926 * 1927 * Return maximum supported link rate of a USB3 adapter in Mb/s. 1928 * Negative errno in case of error. 1929 */ 1930int usb4_usb3_port_max_link_rate(struct tb_port *port) 1931{ 1932 int ret, lr; 1933 u32 val; 1934 1935 if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port)) 1936 return -EINVAL; 1937 1938 ret = tb_port_read(port, &val, TB_CFG_PORT, 1939 port->cap_adap + ADP_USB3_CS_4, 1); 1940 if (ret) 1941 return ret; 1942 1943 lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT; 1944 ret = lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000; 1945 1946 return usb4_usb3_port_max_bandwidth(port, ret); 1947} 1948 1949/** 1950 * usb4_usb3_port_actual_link_rate() - Established USB3 link rate 1951 * @port: USB3 adapter port 1952 * 1953 * Return actual established link rate of a USB3 adapter in Mb/s. If the 1954 * link is not up returns %0 and negative errno in case of failure. 1955 */ 1956int usb4_usb3_port_actual_link_rate(struct tb_port *port) 1957{ 1958 int ret, lr; 1959 u32 val; 1960 1961 if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port)) 1962 return -EINVAL; 1963 1964 ret = tb_port_read(port, &val, TB_CFG_PORT, 1965 port->cap_adap + ADP_USB3_CS_4, 1); 1966 if (ret) 1967 return ret; 1968 1969 if (!(val & ADP_USB3_CS_4_ULV)) 1970 return 0; 1971 1972 lr = val & ADP_USB3_CS_4_ALR_MASK; 1973 ret = lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000; 1974 1975 return usb4_usb3_port_max_bandwidth(port, ret); 1976} 1977 1978static int usb4_usb3_port_cm_request(struct tb_port *port, bool request) 1979{ 1980 int ret; 1981 u32 val; 1982 1983 if (!tb_port_is_usb3_down(port)) 1984 return -EINVAL; 1985 if (tb_route(port->sw)) 1986 return -EINVAL; 1987 1988 ret = tb_port_read(port, &val, TB_CFG_PORT, 1989 port->cap_adap + ADP_USB3_CS_2, 1); 1990 if (ret) 1991 return ret; 1992 1993 if (request) 1994 val |= ADP_USB3_CS_2_CMR; 1995 else 1996 val &= ~ADP_USB3_CS_2_CMR; 1997 1998 ret = tb_port_write(port, &val, TB_CFG_PORT, 1999 port->cap_adap + ADP_USB3_CS_2, 1); 2000 if (ret) 2001 return ret; 2002 2003 /* 2004 * We can use val here directly as the CMR bit is in the same place 2005 * as HCA. Just mask out others. 2006 */ 2007 val &= ADP_USB3_CS_2_CMR; 2008 return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1, 2009 ADP_USB3_CS_1_HCA, val, 1500); 2010} 2011 2012static inline int usb4_usb3_port_set_cm_request(struct tb_port *port) 2013{ 2014 return usb4_usb3_port_cm_request(port, true); 2015} 2016 2017static inline int usb4_usb3_port_clear_cm_request(struct tb_port *port) 2018{ 2019 return usb4_usb3_port_cm_request(port, false); 2020} 2021 2022static unsigned int usb3_bw_to_mbps(u32 bw, u8 scale) 2023{ 2024 unsigned long uframes; 2025 2026 uframes = bw * 512UL << scale; 2027 return DIV_ROUND_CLOSEST(uframes * 8000, MEGA); 2028} 2029 2030static u32 mbps_to_usb3_bw(unsigned int mbps, u8 scale) 2031{ 2032 unsigned long uframes; 2033 2034 /* 1 uframe is 1/8 ms (125 us) -> 1 / 8000 s */ 2035 uframes = ((unsigned long)mbps * MEGA) / 8000; 2036 return DIV_ROUND_UP(uframes, 512UL << scale); 2037} 2038 2039static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port, 2040 int *upstream_bw, 2041 int *downstream_bw) 2042{ 2043 u32 val, bw, scale; 2044 int ret; 2045 2046 ret = tb_port_read(port, &val, TB_CFG_PORT, 2047 port->cap_adap + ADP_USB3_CS_2, 1); 2048 if (ret) 2049 return ret; 2050 2051 ret = tb_port_read(port, &scale, TB_CFG_PORT, 2052 port->cap_adap + ADP_USB3_CS_3, 1); 2053 if (ret) 2054 return ret; 2055 2056 scale &= ADP_USB3_CS_3_SCALE_MASK; 2057 2058 bw = val & ADP_USB3_CS_2_AUBW_MASK; 2059 *upstream_bw = usb3_bw_to_mbps(bw, scale); 2060 2061 bw = (val & ADP_USB3_CS_2_ADBW_MASK) >> ADP_USB3_CS_2_ADBW_SHIFT; 2062 *downstream_bw = usb3_bw_to_mbps(bw, scale); 2063 2064 return 0; 2065} 2066 2067/** 2068 * usb4_usb3_port_allocated_bandwidth() - Bandwidth allocated for USB3 2069 * @port: USB3 adapter port 2070 * @upstream_bw: Allocated upstream bandwidth is stored here 2071 * @downstream_bw: Allocated downstream bandwidth is stored here 2072 * 2073 * Stores currently allocated USB3 bandwidth into @upstream_bw and 2074 * @downstream_bw in Mb/s. Returns %0 in case of success and negative 2075 * errno in failure. 2076 */ 2077int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw, 2078 int *downstream_bw) 2079{ 2080 int ret; 2081 2082 ret = usb4_usb3_port_set_cm_request(port); 2083 if (ret) 2084 return ret; 2085 2086 ret = usb4_usb3_port_read_allocated_bandwidth(port, upstream_bw, 2087 downstream_bw); 2088 usb4_usb3_port_clear_cm_request(port); 2089 2090 return ret; 2091} 2092 2093static int usb4_usb3_port_read_consumed_bandwidth(struct tb_port *port, 2094 int *upstream_bw, 2095 int *downstream_bw) 2096{ 2097 u32 val, bw, scale; 2098 int ret; 2099 2100 ret = tb_port_read(port, &val, TB_CFG_PORT, 2101 port->cap_adap + ADP_USB3_CS_1, 1); 2102 if (ret) 2103 return ret; 2104 2105 ret = tb_port_read(port, &scale, TB_CFG_PORT, 2106 port->cap_adap + ADP_USB3_CS_3, 1); 2107 if (ret) 2108 return ret; 2109 2110 scale &= ADP_USB3_CS_3_SCALE_MASK; 2111 2112 bw = val & ADP_USB3_CS_1_CUBW_MASK; 2113 *upstream_bw = usb3_bw_to_mbps(bw, scale); 2114 2115 bw = (val & ADP_USB3_CS_1_CDBW_MASK) >> ADP_USB3_CS_1_CDBW_SHIFT; 2116 *downstream_bw = usb3_bw_to_mbps(bw, scale); 2117 2118 return 0; 2119} 2120 2121static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port, 2122 int upstream_bw, 2123 int downstream_bw) 2124{ 2125 u32 val, ubw, dbw, scale; 2126 int ret, max_bw; 2127 2128 /* Figure out suitable scale */ 2129 scale = 0; 2130 max_bw = max(upstream_bw, downstream_bw); 2131 while (scale < 64) { 2132 if (mbps_to_usb3_bw(max_bw, scale) < 4096) 2133 break; 2134 scale++; 2135 } 2136 2137 if (WARN_ON(scale >= 64)) 2138 return -EINVAL; 2139 2140 ret = tb_port_write(port, &scale, TB_CFG_PORT, 2141 port->cap_adap + ADP_USB3_CS_3, 1); 2142 if (ret) 2143 return ret; 2144 2145 ubw = mbps_to_usb3_bw(upstream_bw, scale); 2146 dbw = mbps_to_usb3_bw(downstream_bw, scale); 2147 2148 tb_port_dbg(port, "scaled bandwidth %u/%u, scale %u\n", ubw, dbw, scale); 2149 2150 ret = tb_port_read(port, &val, TB_CFG_PORT, 2151 port->cap_adap + ADP_USB3_CS_2, 1); 2152 if (ret) 2153 return ret; 2154 2155 val &= ~(ADP_USB3_CS_2_AUBW_MASK | ADP_USB3_CS_2_ADBW_MASK); 2156 val |= dbw << ADP_USB3_CS_2_ADBW_SHIFT; 2157 val |= ubw; 2158 2159 return tb_port_write(port, &val, TB_CFG_PORT, 2160 port->cap_adap + ADP_USB3_CS_2, 1); 2161} 2162 2163/** 2164 * usb4_usb3_port_allocate_bandwidth() - Allocate bandwidth for USB3 2165 * @port: USB3 adapter port 2166 * @upstream_bw: New upstream bandwidth 2167 * @downstream_bw: New downstream bandwidth 2168 * 2169 * This can be used to set how much bandwidth is allocated for the USB3 2170 * tunneled isochronous traffic. @upstream_bw and @downstream_bw are the 2171 * new values programmed to the USB3 adapter allocation registers. If 2172 * the values are lower than what is currently consumed the allocation 2173 * is set to what is currently consumed instead (consumed bandwidth 2174 * cannot be taken away by CM). The actual new values are returned in 2175 * @upstream_bw and @downstream_bw. 2176 * 2177 * Returns %0 in case of success and negative errno if there was a 2178 * failure. 2179 */ 2180int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw, 2181 int *downstream_bw) 2182{ 2183 int ret, consumed_up, consumed_down, allocate_up, allocate_down; 2184 2185 ret = usb4_usb3_port_set_cm_request(port); 2186 if (ret) 2187 return ret; 2188 2189 ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up, 2190 &consumed_down); 2191 if (ret) 2192 goto err_request; 2193 2194 /* Don't allow it go lower than what is consumed */ 2195 allocate_up = max(*upstream_bw, consumed_up); 2196 allocate_down = max(*downstream_bw, consumed_down); 2197 2198 ret = usb4_usb3_port_write_allocated_bandwidth(port, allocate_up, 2199 allocate_down); 2200 if (ret) 2201 goto err_request; 2202 2203 *upstream_bw = allocate_up; 2204 *downstream_bw = allocate_down; 2205 2206err_request: 2207 usb4_usb3_port_clear_cm_request(port); 2208 return ret; 2209} 2210 2211/** 2212 * usb4_usb3_port_release_bandwidth() - Release allocated USB3 bandwidth 2213 * @port: USB3 adapter port 2214 * @upstream_bw: New allocated upstream bandwidth 2215 * @downstream_bw: New allocated downstream bandwidth 2216 * 2217 * Releases USB3 allocated bandwidth down to what is actually consumed. 2218 * The new bandwidth is returned in @upstream_bw and @downstream_bw. 2219 * 2220 * Returns 0% in success and negative errno in case of failure. 2221 */ 2222int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw, 2223 int *downstream_bw) 2224{ 2225 int ret, consumed_up, consumed_down; 2226 2227 ret = usb4_usb3_port_set_cm_request(port); 2228 if (ret) 2229 return ret; 2230 2231 ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up, 2232 &consumed_down); 2233 if (ret) 2234 goto err_request; 2235 2236 /* 2237 * Always keep 1000 Mb/s to make sure xHCI has at least some 2238 * bandwidth available for isochronous traffic. 2239 */ 2240 if (consumed_up < 1000) 2241 consumed_up = 1000; 2242 if (consumed_down < 1000) 2243 consumed_down = 1000; 2244 2245 ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up, 2246 consumed_down); 2247 if (ret) 2248 goto err_request; 2249 2250 *upstream_bw = consumed_up; 2251 *downstream_bw = consumed_down; 2252 2253err_request: 2254 usb4_usb3_port_clear_cm_request(port); 2255 return ret; 2256} 2257 2258static bool is_usb4_dpin(const struct tb_port *port) 2259{ 2260 if (!tb_port_is_dpin(port)) 2261 return false; 2262 if (!tb_switch_is_usb4(port->sw)) 2263 return false; 2264 return true; 2265} 2266 2267/** 2268 * usb4_dp_port_set_cm_id() - Assign CM ID to the DP IN adapter 2269 * @port: DP IN adapter 2270 * @cm_id: CM ID to assign 2271 * 2272 * Sets CM ID for the @port. Returns %0 on success and negative errno 2273 * otherwise. Speficially returns %-EOPNOTSUPP if the @port does not 2274 * support this. 2275 */ 2276int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id) 2277{ 2278 u32 val; 2279 int ret; 2280 2281 if (!is_usb4_dpin(port)) 2282 return -EOPNOTSUPP; 2283 2284 ret = tb_port_read(port, &val, TB_CFG_PORT, 2285 port->cap_adap + ADP_DP_CS_2, 1); 2286 if (ret) 2287 return ret; 2288 2289 val &= ~ADP_DP_CS_2_CM_ID_MASK; 2290 val |= cm_id << ADP_DP_CS_2_CM_ID_SHIFT; 2291 2292 return tb_port_write(port, &val, TB_CFG_PORT, 2293 port->cap_adap + ADP_DP_CS_2, 1); 2294} 2295 2296/** 2297 * usb4_dp_port_bandwidth_mode_supported() - Is the bandwidth allocation mode 2298 * supported 2299 * @port: DP IN adapter to check 2300 * 2301 * Can be called to any DP IN adapter. Returns true if the adapter 2302 * supports USB4 bandwidth allocation mode, false otherwise. 2303 */ 2304bool usb4_dp_port_bandwidth_mode_supported(struct tb_port *port) 2305{ 2306 int ret; 2307 u32 val; 2308 2309 if (!is_usb4_dpin(port)) 2310 return false; 2311 2312 ret = tb_port_read(port, &val, TB_CFG_PORT, 2313 port->cap_adap + DP_LOCAL_CAP, 1); 2314 if (ret) 2315 return false; 2316 2317 return !!(val & DP_COMMON_CAP_BW_MODE); 2318} 2319 2320/** 2321 * usb4_dp_port_bandwidth_mode_enabled() - Is the bandwidth allocation mode 2322 * enabled 2323 * @port: DP IN adapter to check 2324 * 2325 * Can be called to any DP IN adapter. Returns true if the bandwidth 2326 * allocation mode has been enabled, false otherwise. 2327 */ 2328bool usb4_dp_port_bandwidth_mode_enabled(struct tb_port *port) 2329{ 2330 int ret; 2331 u32 val; 2332 2333 if (!is_usb4_dpin(port)) 2334 return false; 2335 2336 ret = tb_port_read(port, &val, TB_CFG_PORT, 2337 port->cap_adap + ADP_DP_CS_8, 1); 2338 if (ret) 2339 return false; 2340 2341 return !!(val & ADP_DP_CS_8_DPME); 2342} 2343 2344/** 2345 * usb4_dp_port_set_cm_bandwidth_mode_supported() - Set/clear CM support for 2346 * bandwidth allocation mode 2347 * @port: DP IN adapter 2348 * @supported: Does the CM support bandwidth allocation mode 2349 * 2350 * Can be called to any DP IN adapter. Sets or clears the CM support bit 2351 * of the DP IN adapter. Returns %0 in success and negative errno 2352 * otherwise. Specifically returns %-OPNOTSUPP if the passed in adapter 2353 * does not support this. 2354 */ 2355int usb4_dp_port_set_cm_bandwidth_mode_supported(struct tb_port *port, 2356 bool supported) 2357{ 2358 u32 val; 2359 int ret; 2360 2361 if (!is_usb4_dpin(port)) 2362 return -EOPNOTSUPP; 2363 2364 ret = tb_port_read(port, &val, TB_CFG_PORT, 2365 port->cap_adap + ADP_DP_CS_2, 1); 2366 if (ret) 2367 return ret; 2368 2369 if (supported) 2370 val |= ADP_DP_CS_2_CMMS; 2371 else 2372 val &= ~ADP_DP_CS_2_CMMS; 2373 2374 return tb_port_write(port, &val, TB_CFG_PORT, 2375 port->cap_adap + ADP_DP_CS_2, 1); 2376} 2377 2378/** 2379 * usb4_dp_port_group_id() - Return Group ID assigned for the adapter 2380 * @port: DP IN adapter 2381 * 2382 * Reads bandwidth allocation Group ID from the DP IN adapter and 2383 * returns it. If the adapter does not support setting Group_ID 2384 * %-EOPNOTSUPP is returned. 2385 */ 2386int usb4_dp_port_group_id(struct tb_port *port) 2387{ 2388 u32 val; 2389 int ret; 2390 2391 if (!is_usb4_dpin(port)) 2392 return -EOPNOTSUPP; 2393 2394 ret = tb_port_read(port, &val, TB_CFG_PORT, 2395 port->cap_adap + ADP_DP_CS_2, 1); 2396 if (ret) 2397 return ret; 2398 2399 return (val & ADP_DP_CS_2_GROUP_ID_MASK) >> ADP_DP_CS_2_GROUP_ID_SHIFT; 2400} 2401 2402/** 2403 * usb4_dp_port_set_group_id() - Set adapter Group ID 2404 * @port: DP IN adapter 2405 * @group_id: Group ID for the adapter 2406 * 2407 * Sets bandwidth allocation mode Group ID for the DP IN adapter. 2408 * Returns %0 in case of success and negative errno otherwise. 2409 * Specifically returns %-EOPNOTSUPP if the adapter does not support 2410 * this. 2411 */ 2412int usb4_dp_port_set_group_id(struct tb_port *port, int group_id) 2413{ 2414 u32 val; 2415 int ret; 2416 2417 if (!is_usb4_dpin(port)) 2418 return -EOPNOTSUPP; 2419 2420 ret = tb_port_read(port, &val, TB_CFG_PORT, 2421 port->cap_adap + ADP_DP_CS_2, 1); 2422 if (ret) 2423 return ret; 2424 2425 val &= ~ADP_DP_CS_2_GROUP_ID_MASK; 2426 val |= group_id << ADP_DP_CS_2_GROUP_ID_SHIFT; 2427 2428 return tb_port_write(port, &val, TB_CFG_PORT, 2429 port->cap_adap + ADP_DP_CS_2, 1); 2430} 2431 2432/** 2433 * usb4_dp_port_nrd() - Read non-reduced rate and lanes 2434 * @port: DP IN adapter 2435 * @rate: Non-reduced rate in Mb/s is placed here 2436 * @lanes: Non-reduced lanes are placed here 2437 * 2438 * Reads the non-reduced rate and lanes from the DP IN adapter. Returns 2439 * %0 in success and negative errno otherwise. Specifically returns 2440 * %-EOPNOTSUPP if the adapter does not support this. 2441 */ 2442int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes) 2443{ 2444 u32 val, tmp; 2445 int ret; 2446 2447 if (!is_usb4_dpin(port)) 2448 return -EOPNOTSUPP; 2449 2450 ret = tb_port_read(port, &val, TB_CFG_PORT, 2451 port->cap_adap + ADP_DP_CS_2, 1); 2452 if (ret) 2453 return ret; 2454 2455 tmp = (val & ADP_DP_CS_2_NRD_MLR_MASK) >> ADP_DP_CS_2_NRD_MLR_SHIFT; 2456 switch (tmp) { 2457 case DP_COMMON_CAP_RATE_RBR: 2458 *rate = 1620; 2459 break; 2460 case DP_COMMON_CAP_RATE_HBR: 2461 *rate = 2700; 2462 break; 2463 case DP_COMMON_CAP_RATE_HBR2: 2464 *rate = 5400; 2465 break; 2466 case DP_COMMON_CAP_RATE_HBR3: 2467 *rate = 8100; 2468 break; 2469 } 2470 2471 tmp = val & ADP_DP_CS_2_NRD_MLC_MASK; 2472 switch (tmp) { 2473 case DP_COMMON_CAP_1_LANE: 2474 *lanes = 1; 2475 break; 2476 case DP_COMMON_CAP_2_LANES: 2477 *lanes = 2; 2478 break; 2479 case DP_COMMON_CAP_4_LANES: 2480 *lanes = 4; 2481 break; 2482 } 2483 2484 return 0; 2485} 2486 2487/** 2488 * usb4_dp_port_set_nrd() - Set non-reduced rate and lanes 2489 * @port: DP IN adapter 2490 * @rate: Non-reduced rate in Mb/s 2491 * @lanes: Non-reduced lanes 2492 * 2493 * Before the capabilities reduction this function can be used to set 2494 * the non-reduced values for the DP IN adapter. Returns %0 in success 2495 * and negative errno otherwise. If the adapter does not support this 2496 * %-EOPNOTSUPP is returned. 2497 */ 2498int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes) 2499{ 2500 u32 val; 2501 int ret; 2502 2503 if (!is_usb4_dpin(port)) 2504 return -EOPNOTSUPP; 2505 2506 ret = tb_port_read(port, &val, TB_CFG_PORT, 2507 port->cap_adap + ADP_DP_CS_2, 1); 2508 if (ret) 2509 return ret; 2510 2511 val &= ~ADP_DP_CS_2_NRD_MLR_MASK; 2512 2513 switch (rate) { 2514 case 1620: 2515 break; 2516 case 2700: 2517 val |= (DP_COMMON_CAP_RATE_HBR << ADP_DP_CS_2_NRD_MLR_SHIFT) 2518 & ADP_DP_CS_2_NRD_MLR_MASK; 2519 break; 2520 case 5400: 2521 val |= (DP_COMMON_CAP_RATE_HBR2 << ADP_DP_CS_2_NRD_MLR_SHIFT) 2522 & ADP_DP_CS_2_NRD_MLR_MASK; 2523 break; 2524 case 8100: 2525 val |= (DP_COMMON_CAP_RATE_HBR3 << ADP_DP_CS_2_NRD_MLR_SHIFT) 2526 & ADP_DP_CS_2_NRD_MLR_MASK; 2527 break; 2528 default: 2529 return -EINVAL; 2530 } 2531 2532 val &= ~ADP_DP_CS_2_NRD_MLC_MASK; 2533 2534 switch (lanes) { 2535 case 1: 2536 break; 2537 case 2: 2538 val |= DP_COMMON_CAP_2_LANES; 2539 break; 2540 case 4: 2541 val |= DP_COMMON_CAP_4_LANES; 2542 break; 2543 default: 2544 return -EINVAL; 2545 } 2546 2547 return tb_port_write(port, &val, TB_CFG_PORT, 2548 port->cap_adap + ADP_DP_CS_2, 1); 2549} 2550 2551/** 2552 * usb4_dp_port_granularity() - Return granularity for the bandwidth values 2553 * @port: DP IN adapter 2554 * 2555 * Reads the programmed granularity from @port. If the DP IN adapter does 2556 * not support bandwidth allocation mode returns %-EOPNOTSUPP and negative 2557 * errno in other error cases. 2558 */ 2559int usb4_dp_port_granularity(struct tb_port *port) 2560{ 2561 u32 val; 2562 int ret; 2563 2564 if (!is_usb4_dpin(port)) 2565 return -EOPNOTSUPP; 2566 2567 ret = tb_port_read(port, &val, TB_CFG_PORT, 2568 port->cap_adap + ADP_DP_CS_2, 1); 2569 if (ret) 2570 return ret; 2571 2572 val &= ADP_DP_CS_2_GR_MASK; 2573 val >>= ADP_DP_CS_2_GR_SHIFT; 2574 2575 switch (val) { 2576 case ADP_DP_CS_2_GR_0_25G: 2577 return 250; 2578 case ADP_DP_CS_2_GR_0_5G: 2579 return 500; 2580 case ADP_DP_CS_2_GR_1G: 2581 return 1000; 2582 } 2583 2584 return -EINVAL; 2585} 2586 2587/** 2588 * usb4_dp_port_set_granularity() - Set granularity for the bandwidth values 2589 * @port: DP IN adapter 2590 * @granularity: Granularity in Mb/s. Supported values: 1000, 500 and 250. 2591 * 2592 * Sets the granularity used with the estimated, allocated and requested 2593 * bandwidth. Returns %0 in success and negative errno otherwise. If the 2594 * adapter does not support this %-EOPNOTSUPP is returned. 2595 */ 2596int usb4_dp_port_set_granularity(struct tb_port *port, int granularity) 2597{ 2598 u32 val; 2599 int ret; 2600 2601 if (!is_usb4_dpin(port)) 2602 return -EOPNOTSUPP; 2603 2604 ret = tb_port_read(port, &val, TB_CFG_PORT, 2605 port->cap_adap + ADP_DP_CS_2, 1); 2606 if (ret) 2607 return ret; 2608 2609 val &= ~ADP_DP_CS_2_GR_MASK; 2610 2611 switch (granularity) { 2612 case 250: 2613 val |= ADP_DP_CS_2_GR_0_25G << ADP_DP_CS_2_GR_SHIFT; 2614 break; 2615 case 500: 2616 val |= ADP_DP_CS_2_GR_0_5G << ADP_DP_CS_2_GR_SHIFT; 2617 break; 2618 case 1000: 2619 val |= ADP_DP_CS_2_GR_1G << ADP_DP_CS_2_GR_SHIFT; 2620 break; 2621 default: 2622 return -EINVAL; 2623 } 2624 2625 return tb_port_write(port, &val, TB_CFG_PORT, 2626 port->cap_adap + ADP_DP_CS_2, 1); 2627} 2628 2629/** 2630 * usb4_dp_port_set_estimated_bandwidth() - Set estimated bandwidth 2631 * @port: DP IN adapter 2632 * @bw: Estimated bandwidth in Mb/s. 2633 * 2634 * Sets the estimated bandwidth to @bw. Set the granularity by calling 2635 * usb4_dp_port_set_granularity() before calling this. The @bw is round 2636 * down to the closest granularity multiplier. Returns %0 in success 2637 * and negative errno otherwise. Specifically returns %-EOPNOTSUPP if 2638 * the adapter does not support this. 2639 */ 2640int usb4_dp_port_set_estimated_bandwidth(struct tb_port *port, int bw) 2641{ 2642 u32 val, granularity; 2643 int ret; 2644 2645 if (!is_usb4_dpin(port)) 2646 return -EOPNOTSUPP; 2647 2648 ret = usb4_dp_port_granularity(port); 2649 if (ret < 0) 2650 return ret; 2651 granularity = ret; 2652 2653 ret = tb_port_read(port, &val, TB_CFG_PORT, 2654 port->cap_adap + ADP_DP_CS_2, 1); 2655 if (ret) 2656 return ret; 2657 2658 val &= ~ADP_DP_CS_2_ESTIMATED_BW_MASK; 2659 val |= (bw / granularity) << ADP_DP_CS_2_ESTIMATED_BW_SHIFT; 2660 2661 return tb_port_write(port, &val, TB_CFG_PORT, 2662 port->cap_adap + ADP_DP_CS_2, 1); 2663} 2664 2665/** 2666 * usb4_dp_port_allocated_bandwidth() - Return allocated bandwidth 2667 * @port: DP IN adapter 2668 * 2669 * Reads and returns allocated bandwidth for @port in Mb/s (taking into 2670 * account the programmed granularity). Returns negative errno in case 2671 * of error. 2672 */ 2673int usb4_dp_port_allocated_bandwidth(struct tb_port *port) 2674{ 2675 u32 val, granularity; 2676 int ret; 2677 2678 if (!is_usb4_dpin(port)) 2679 return -EOPNOTSUPP; 2680 2681 ret = usb4_dp_port_granularity(port); 2682 if (ret < 0) 2683 return ret; 2684 granularity = ret; 2685 2686 ret = tb_port_read(port, &val, TB_CFG_PORT, 2687 port->cap_adap + DP_STATUS, 1); 2688 if (ret) 2689 return ret; 2690 2691 val &= DP_STATUS_ALLOCATED_BW_MASK; 2692 val >>= DP_STATUS_ALLOCATED_BW_SHIFT; 2693 2694 return val * granularity; 2695} 2696 2697static int __usb4_dp_port_set_cm_ack(struct tb_port *port, bool ack) 2698{ 2699 u32 val; 2700 int ret; 2701 2702 ret = tb_port_read(port, &val, TB_CFG_PORT, 2703 port->cap_adap + ADP_DP_CS_2, 1); 2704 if (ret) 2705 return ret; 2706 2707 if (ack) 2708 val |= ADP_DP_CS_2_CA; 2709 else 2710 val &= ~ADP_DP_CS_2_CA; 2711 2712 return tb_port_write(port, &val, TB_CFG_PORT, 2713 port->cap_adap + ADP_DP_CS_2, 1); 2714} 2715 2716static inline int usb4_dp_port_set_cm_ack(struct tb_port *port) 2717{ 2718 return __usb4_dp_port_set_cm_ack(port, true); 2719} 2720 2721static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port, 2722 int timeout_msec) 2723{ 2724 ktime_t end; 2725 u32 val; 2726 int ret; 2727 2728 ret = __usb4_dp_port_set_cm_ack(port, false); 2729 if (ret) 2730 return ret; 2731 2732 end = ktime_add_ms(ktime_get(), timeout_msec); 2733 do { 2734 ret = tb_port_read(port, &val, TB_CFG_PORT, 2735 port->cap_adap + ADP_DP_CS_8, 1); 2736 if (ret) 2737 return ret; 2738 2739 if (!(val & ADP_DP_CS_8_DR)) 2740 break; 2741 2742 usleep_range(50, 100); 2743 } while (ktime_before(ktime_get(), end)); 2744 2745 if (val & ADP_DP_CS_8_DR) 2746 return -ETIMEDOUT; 2747 2748 ret = tb_port_read(port, &val, TB_CFG_PORT, 2749 port->cap_adap + ADP_DP_CS_2, 1); 2750 if (ret) 2751 return ret; 2752 2753 val &= ~ADP_DP_CS_2_CA; 2754 return tb_port_write(port, &val, TB_CFG_PORT, 2755 port->cap_adap + ADP_DP_CS_2, 1); 2756} 2757 2758/** 2759 * usb4_dp_port_allocate_bandwidth() - Set allocated bandwidth 2760 * @port: DP IN adapter 2761 * @bw: New allocated bandwidth in Mb/s 2762 * 2763 * Communicates the new allocated bandwidth with the DPCD (graphics 2764 * driver). Takes into account the programmed granularity. Returns %0 in 2765 * success and negative errno in case of error. 2766 */ 2767int usb4_dp_port_allocate_bandwidth(struct tb_port *port, int bw) 2768{ 2769 u32 val, granularity; 2770 int ret; 2771 2772 if (!is_usb4_dpin(port)) 2773 return -EOPNOTSUPP; 2774 2775 ret = usb4_dp_port_granularity(port); 2776 if (ret < 0) 2777 return ret; 2778 granularity = ret; 2779 2780 ret = tb_port_read(port, &val, TB_CFG_PORT, 2781 port->cap_adap + DP_STATUS, 1); 2782 if (ret) 2783 return ret; 2784 2785 val &= ~DP_STATUS_ALLOCATED_BW_MASK; 2786 val |= (bw / granularity) << DP_STATUS_ALLOCATED_BW_SHIFT; 2787 2788 ret = tb_port_write(port, &val, TB_CFG_PORT, 2789 port->cap_adap + DP_STATUS, 1); 2790 if (ret) 2791 return ret; 2792 2793 ret = usb4_dp_port_set_cm_ack(port); 2794 if (ret) 2795 return ret; 2796 2797 return usb4_dp_port_wait_and_clear_cm_ack(port, 500); 2798} 2799 2800/** 2801 * usb4_dp_port_requested_bandwidth() - Read requested bandwidth 2802 * @port: DP IN adapter 2803 * 2804 * Reads the DPCD (graphics driver) requested bandwidth and returns it 2805 * in Mb/s. Takes the programmed granularity into account. In case of 2806 * error returns negative errno. Specifically returns %-EOPNOTSUPP if 2807 * the adapter does not support bandwidth allocation mode, and %ENODATA 2808 * if there is no active bandwidth request from the graphics driver. 2809 */ 2810int usb4_dp_port_requested_bandwidth(struct tb_port *port) 2811{ 2812 u32 val, granularity; 2813 int ret; 2814 2815 if (!is_usb4_dpin(port)) 2816 return -EOPNOTSUPP; 2817 2818 ret = usb4_dp_port_granularity(port); 2819 if (ret < 0) 2820 return ret; 2821 granularity = ret; 2822 2823 ret = tb_port_read(port, &val, TB_CFG_PORT, 2824 port->cap_adap + ADP_DP_CS_8, 1); 2825 if (ret) 2826 return ret; 2827 2828 if (!(val & ADP_DP_CS_8_DR)) 2829 return -ENODATA; 2830 2831 return (val & ADP_DP_CS_8_REQUESTED_BW_MASK) * granularity; 2832} 2833 2834/** 2835 * usb4_pci_port_set_ext_encapsulation() - Enable/disable extended encapsulation 2836 * @port: PCIe adapter 2837 * @enable: Enable/disable extended encapsulation 2838 * 2839 * Enables or disables extended encapsulation used in PCIe tunneling. Caller 2840 * needs to make sure both adapters support this before enabling. Returns %0 on 2841 * success and negative errno otherwise. 2842 */ 2843int usb4_pci_port_set_ext_encapsulation(struct tb_port *port, bool enable) 2844{ 2845 u32 val; 2846 int ret; 2847 2848 if (!tb_port_is_pcie_up(port) && !tb_port_is_pcie_down(port)) 2849 return -EINVAL; 2850 2851 ret = tb_port_read(port, &val, TB_CFG_PORT, 2852 port->cap_adap + ADP_PCIE_CS_1, 1); 2853 if (ret) 2854 return ret; 2855 2856 if (enable) 2857 val |= ADP_PCIE_CS_1_EE; 2858 else 2859 val &= ~ADP_PCIE_CS_1_EE; 2860 2861 return tb_port_write(port, &val, TB_CFG_PORT, 2862 port->cap_adap + ADP_PCIE_CS_1, 1); 2863} 2864