Lines Matching refs:xhci

22 #include "xhci.h"
23 #include "xhci-trace.h"
24 #include "xhci-debugfs.h"
25 #include "xhci-dbgcap.h"
87 void xhci_quiesce(struct xhci_hcd *xhci)
94 halted = readl(&xhci->op_regs->status) & STS_HALT;
98 cmd = readl(&xhci->op_regs->command);
100 writel(cmd, &xhci->op_regs->command);
111 int xhci_halt(struct xhci_hcd *xhci)
115 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
116 xhci_quiesce(xhci);
118 ret = xhci_handshake(&xhci->op_regs->status,
121 xhci_warn(xhci, "Host halt failed, %d\n", ret);
125 xhci->xhc_state |= XHCI_STATE_HALTED;
126 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
134 int xhci_start(struct xhci_hcd *xhci)
139 temp = readl(&xhci->op_regs->command);
141 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
143 writel(temp, &xhci->op_regs->command);
149 ret = xhci_handshake(&xhci->op_regs->status,
152 xhci_err(xhci, "Host took too long to start, "
157 xhci->xhc_state = 0;
158 xhci->run_graceperiod = jiffies + msecs_to_jiffies(500);
171 int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
177 state = readl(&xhci->op_regs->status);
180 xhci_warn(xhci, "Host not accessible, reset failed.\n");
185 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
189 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
190 command = readl(&xhci->op_regs->command);
192 writel(command, &xhci->op_regs->command);
201 if (xhci->quirks & XHCI_INTEL_HOST)
204 ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us);
208 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
209 usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
211 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
217 ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us);
219 xhci->usb2_rhub.bus_state.port_c_suspend = 0;
220 xhci->usb2_rhub.bus_state.suspended_ports = 0;
221 xhci->usb2_rhub.bus_state.resuming_ports = 0;
222 xhci->usb3_rhub.bus_state.port_c_suspend = 0;
223 xhci->usb3_rhub.bus_state.suspended_ports = 0;
224 xhci->usb3_rhub.bus_state.resuming_ports = 0;
229 static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
231 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
252 if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !domain ||
256 xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
259 val = readl(&xhci->op_regs->command);
261 writel(val, &xhci->op_regs->command);
264 val = readl(&xhci->op_regs->status);
266 writel(val, &xhci->op_regs->status);
269 val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
271 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
272 val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
274 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
276 intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1),
277 ARRAY_SIZE(xhci->run_regs->ir_set));
282 ir = &xhci->run_regs->ir_set[i];
283 val = xhci_read_64(xhci, &ir->erst_base);
285 xhci_write_64(xhci, 0, &ir->erst_base);
286 val= xhci_read_64(xhci, &ir->erst_dequeue);
288 xhci_write_64(xhci, 0, &ir->erst_dequeue);
292 err = xhci_handshake(&xhci->op_regs->status,
296 xhci_info(xhci, "Fault detected\n");
327 struct xhci_hcd *xhci;
333 xhci = from_timer(xhci, t, comp_mode_recovery_timer);
334 rhub = &xhci->usb3_rhub;
347 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
350 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
360 if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1))
361 mod_timer(&xhci->comp_mode_recovery_timer,
372 * status event is generated when entering compliance mode (per xhci spec),
375 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
377 xhci->port_status_u0 = 0;
378 timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
380 xhci->comp_mode_recovery_timer.expires = jiffies +
383 add_timer(&xhci->comp_mode_recovery_timer);
384 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
415 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
417 return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1));
430 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
433 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
434 spin_lock_init(&xhci->lock);
435 if (xhci->hci_version == 0x95 && link_quirk) {
436 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
438 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
440 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
443 retval = xhci_mem_init(xhci, GFP_KERNEL);
444 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
448 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
449 compliance_mode_recovery_timer_init(xhci);
457 static int xhci_run_finished(struct xhci_hcd *xhci)
459 struct xhci_interrupter *ir = xhci->interrupter;
464 * Enable interrupts before starting the host (xhci 4.2 and 5.5.2).
467 spin_lock_irqsave(&xhci->lock, flags);
469 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable interrupts");
470 temp = readl(&xhci->op_regs->command);
472 writel(temp, &xhci->op_regs->command);
474 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable primary interrupter");
477 if (xhci_start(xhci)) {
478 xhci_halt(xhci);
479 spin_unlock_irqrestore(&xhci->lock, flags);
483 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
485 if (xhci->quirks & XHCI_NEC_HOST)
486 xhci_ring_cmd_db(xhci);
488 spin_unlock_irqrestore(&xhci->lock, flags);
510 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
511 struct xhci_interrupter *ir = xhci->interrupter;
518 return xhci_run_finished(xhci);
520 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
522 temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
524 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
527 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
531 temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
534 if (xhci->quirks & XHCI_NEC_HOST) {
537 command = xhci_alloc_command(xhci, false, GFP_KERNEL);
541 ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0,
544 xhci_free_command(xhci, command);
546 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
549 xhci_create_dbc_dev(xhci);
551 xhci_debugfs_init(xhci);
553 if (xhci_has_one_roothub(xhci))
554 return xhci_run_finished(xhci);
574 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
575 struct xhci_interrupter *ir = xhci->interrupter;
577 mutex_lock(&xhci->mutex);
581 mutex_unlock(&xhci->mutex);
585 xhci_remove_dbc_dev(xhci);
587 spin_lock_irq(&xhci->lock);
588 xhci->xhc_state |= XHCI_STATE_HALTED;
589 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
590 xhci_halt(xhci);
591 xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
592 spin_unlock_irq(&xhci->lock);
595 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
596 (!(xhci_all_ports_seen_u0(xhci)))) {
597 del_timer_sync(&xhci->comp_mode_recovery_timer);
598 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
603 if (xhci->quirks & XHCI_AMD_PLL_FIX)
606 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
608 temp = readl(&xhci->op_regs->status);
609 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
612 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
613 xhci_mem_cleanup(xhci);
614 xhci_debugfs_exit(xhci);
615 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
617 readl(&xhci->op_regs->status));
618 mutex_unlock(&xhci->mutex);
633 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
635 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
639 xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
644 if (xhci->shared_hcd) {
645 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
646 del_timer_sync(&xhci->shared_hcd->rh_timer);
649 spin_lock_irq(&xhci->lock);
650 xhci_halt(xhci);
656 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP ||
657 xhci->quirks & XHCI_RESET_TO_DEFAULT)
658 xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
660 spin_unlock_irq(&xhci->lock);
662 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
664 readl(&xhci->op_regs->status));
669 static void xhci_save_registers(struct xhci_hcd *xhci)
671 struct xhci_interrupter *ir = xhci->interrupter;
673 xhci->s3.command = readl(&xhci->op_regs->command);
674 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
675 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
676 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
682 ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
683 ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
688 static void xhci_restore_registers(struct xhci_hcd *xhci)
690 struct xhci_interrupter *ir = xhci->interrupter;
692 writel(xhci->s3.command, &xhci->op_regs->command);
693 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
694 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
695 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
697 xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base);
698 xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue);
703 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
708 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
710 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
711 xhci->cmd_ring->dequeue) &
713 xhci->cmd_ring->cycle_state;
714 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
717 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
729 static void xhci_clear_command_ring(struct xhci_hcd *xhci)
734 ring = xhci->cmd_ring;
764 xhci_set_cmd_ring_deq(xhci);
776 static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci,
784 spin_lock_irqsave(&xhci->lock, flags);
801 xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n",
805 spin_unlock_irqrestore(&xhci->lock, flags);
808 static bool xhci_pending_portevent(struct xhci_hcd *xhci)
815 status = readl(&xhci->op_regs->status);
821 * being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
824 port_index = xhci->usb2_rhub.num_ports;
825 ports = xhci->usb2_rhub.ports;
832 port_index = xhci->usb3_rhub.num_ports;
833 ports = xhci->usb3_rhub.ports;
849 int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
853 struct usb_hcd *hcd = xhci_to_hcd(xhci);
861 (xhci->shared_hcd && xhci->shared_hcd->state != HC_STATE_SUSPENDED))
865 xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup);
866 xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup);
871 xhci_dbc_suspend(xhci);
874 xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
878 if (xhci->shared_hcd) {
879 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
880 del_timer_sync(&xhci->shared_hcd->rh_timer);
883 if (xhci->quirks & XHCI_SUSPEND_DELAY)
886 spin_lock_irq(&xhci->lock);
888 if (xhci->shared_hcd)
889 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
894 command = readl(&xhci->op_regs->command);
896 writel(command, &xhci->op_regs->command);
899 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
901 if (xhci_handshake(&xhci->op_regs->status,
903 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
904 spin_unlock_irq(&xhci->lock);
907 xhci_clear_command_ring(xhci);
910 xhci_save_registers(xhci);
913 command = readl(&xhci->op_regs->command);
915 writel(command, &xhci->op_regs->command);
916 xhci->broken_suspend = 0;
917 if (xhci_handshake(&xhci->op_regs->status,
925 * if SRE and HCE bits are not set (as per xhci
928 res = readl(&xhci->op_regs->status);
929 if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) &&
932 xhci->broken_suspend = 1;
934 xhci_warn(xhci, "WARN: xHC save state timeout\n");
935 spin_unlock_irq(&xhci->lock);
939 spin_unlock_irq(&xhci->lock);
945 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
946 (!(xhci_all_ports_seen_u0(xhci)))) {
947 del_timer_sync(&xhci->comp_mode_recovery_timer);
948 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
963 int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
967 struct usb_hcd *hcd = xhci_to_hcd(xhci);
981 if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) ||
982 time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange))
986 if (xhci->shared_hcd)
987 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
989 spin_lock_irq(&xhci->lock);
991 if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
999 retval = xhci_handshake(&xhci->op_regs->status,
1002 xhci_warn(xhci, "Controller not ready at resume %d\n",
1004 spin_unlock_irq(&xhci->lock);
1008 xhci_restore_registers(xhci);
1010 xhci_set_cmd_ring_deq(xhci);
1013 command = readl(&xhci->op_regs->command);
1015 writel(command, &xhci->op_regs->command);
1021 if (xhci_handshake(&xhci->op_regs->status,
1023 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
1024 spin_unlock_irq(&xhci->lock);
1029 temp = readl(&xhci->op_regs->status);
1033 !(xhci->xhc_state & XHCI_STATE_REMOVING)) {
1035 if (!xhci->broken_suspend)
1036 xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
1040 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1041 !(xhci_all_ports_seen_u0(xhci))) {
1042 del_timer_sync(&xhci->comp_mode_recovery_timer);
1043 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1048 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
1049 if (xhci->shared_hcd)
1050 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
1052 xhci_dbg(xhci, "Stop HCD\n");
1053 xhci_halt(xhci);
1054 xhci_zero_64b_regs(xhci);
1055 retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
1056 spin_unlock_irq(&xhci->lock);
1060 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1061 temp = readl(&xhci->op_regs->status);
1062 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
1063 xhci_disable_interrupter(xhci->interrupter);
1065 xhci_dbg(xhci, "cleaning up memory\n");
1066 xhci_mem_cleanup(xhci);
1067 xhci_debugfs_exit(xhci);
1068 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1069 readl(&xhci->op_regs->status));
1075 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1081 xhci_dbg(xhci, "Start the primary HCD\n");
1083 if (!retval && xhci->shared_hcd) {
1084 xhci_dbg(xhci, "Start the secondary HCD\n");
1085 retval = xhci_run(xhci->shared_hcd);
1089 if (xhci->shared_hcd)
1090 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1095 command = readl(&xhci->op_regs->command);
1097 writel(command, &xhci->op_regs->command);
1098 xhci_handshake(&xhci->op_regs->status, STS_HALT,
1110 spin_unlock_irq(&xhci->lock);
1112 xhci_dbc_resume(xhci);
1122 if (xhci->usb3_rhub.bus_state.suspended_ports ||
1123 xhci->usb3_rhub.bus_state.bus_suspended)
1126 pending_portevent = xhci_pending_portevent(xhci);
1131 pending_portevent = xhci_pending_portevent(xhci);
1135 if (xhci->shared_hcd)
1136 usb_hcd_resume_root_hub(xhci->shared_hcd);
1146 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1147 compliance_mode_recovery_timer_init(xhci);
1149 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
1153 xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
1155 if (xhci->shared_hcd) {
1156 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1157 usb_hcd_poll_rh_status(xhci->shared_hcd);
1287 struct xhci_hcd *xhci;
1289 xhci = hcd_to_xhci(hcd);
1294 if (xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) {
1303 struct xhci_hcd *xhci;
1306 xhci = hcd_to_xhci(hcd);
1311 if ((xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) && unmap_temp_buf)
1375 struct xhci_hcd *xhci;
1387 xhci = hcd_to_xhci(hcd);
1389 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1390 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
1395 virt_dev = xhci->devs[udev->slot_id];
1397 xhci_dbg(xhci, "xHCI %s called with udev and "
1403 if (xhci->xhc_state & XHCI_STATE_HALTED)
1409 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1419 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1430 out_ctx = xhci->devs[slot_id]->out_ctx;
1431 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1435 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1437 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1440 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1443 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1451 command = xhci_alloc_command(xhci, true, mem_flags);
1455 command->in_ctx = xhci->devs[slot_id]->in_ctx;
1458 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1464 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1465 xhci->devs[slot_id]->out_ctx, ep_index);
1467 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
1475 ret = xhci_configure_endpoint(xhci, urb->dev, command,
1495 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1512 ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;
1517 if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) {
1518 xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n");
1547 ret = xhci_check_maxpacket(xhci, slot_id,
1557 spin_lock_irqsave(&xhci->lock, flags);
1559 if (xhci->xhc_state & XHCI_STATE_DYING) {
1560 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n",
1566 xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n",
1572 xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n");
1580 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1584 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1588 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1592 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1601 spin_unlock_irqrestore(&xhci->lock, flags);
1641 struct xhci_hcd *xhci;
1650 xhci = hcd_to_xhci(hcd);
1651 spin_lock_irqsave(&xhci->lock, flags);
1661 vdev = xhci->devs[urb->dev->slot_id];
1668 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1673 temp = readl(&xhci->op_regs->status);
1674 if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) {
1675 xhci_hc_died(xhci);
1685 xhci_err(xhci, "Canceled URB td not found on endpoint ring");
1694 if (xhci->xhc_state & XHCI_STATE_HALTED) {
1695 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1711 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1734 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1740 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
1742 xhci_ring_cmd_db(xhci);
1745 spin_unlock_irqrestore(&xhci->lock, flags);
1752 spin_unlock_irqrestore(&xhci->lock, flags);
1768 * the xhci->devs[slot_id] structure.
1773 struct xhci_hcd *xhci;
1785 xhci = hcd_to_xhci(hcd);
1786 if (xhci->xhc_state & XHCI_STATE_DYING)
1789 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1792 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1797 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1798 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1801 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1807 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1815 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
1816 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1827 xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index);
1829 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1831 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1851 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1856 struct xhci_hcd *xhci;
1872 xhci = hcd_to_xhci(hcd);
1873 if (xhci->xhc_state & XHCI_STATE_DYING)
1882 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1887 virt_dev = xhci->devs[udev->slot_id];
1891 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1902 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1912 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1922 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1942 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1945 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1954 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1963 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1975 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1980 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1988 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1996 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
2025 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2030 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2038 static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
2046 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
2075 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2080 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2088 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
2109 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
2133 * Must be called with xhci->lock held.
2135 static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
2140 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2141 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
2142 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2145 xhci->num_active_eps, added_eps,
2146 xhci->limit_active_eps);
2149 xhci->num_active_eps += added_eps;
2150 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2152 xhci->num_active_eps);
2160 * Must be called with xhci->lock held.
2162 static void xhci_free_host_resources(struct xhci_hcd *xhci,
2167 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2168 xhci->num_active_eps -= num_failed_eps;
2169 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2172 xhci->num_active_eps);
2179 * Must be called with xhci->lock held.
2181 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
2186 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
2187 xhci->num_active_eps -= num_dropped_eps;
2189 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2192 xhci->num_active_eps);
2227 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2235 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2257 static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2314 static int xhci_check_bw_table(struct xhci_hcd *xhci,
2330 return xhci_check_ss_bw(xhci, virt_dev);
2351 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2354 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2355 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2359 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2364 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2447 xhci_warn(xhci, "Not enough bandwidth. "
2470 xhci->rh_bw[port_index].num_active_tts;
2473 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2482 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2517 static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2532 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2535 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2578 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2594 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2597 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2649 void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2657 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2669 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2683 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2699 xhci_drop_ep_from_interval_table(xhci,
2707 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2711 xhci_add_ep_to_interval_table(xhci,
2719 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2723 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2736 xhci_drop_ep_from_interval_table(xhci,
2748 xhci_add_ep_to_interval_table(xhci,
2762 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2776 spin_lock_irqsave(&xhci->lock, flags);
2778 if (xhci->xhc_state & XHCI_STATE_DYING) {
2779 spin_unlock_irqrestore(&xhci->lock, flags);
2783 virt_dev = xhci->devs[udev->slot_id];
2787 spin_unlock_irqrestore(&xhci->lock, flags);
2788 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2793 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2794 xhci_reserve_host_resources(xhci, ctrl_ctx)) {
2795 spin_unlock_irqrestore(&xhci->lock, flags);
2796 xhci_warn(xhci, "Not enough host resources, "
2798 xhci->num_active_eps);
2801 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2802 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
2803 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2804 xhci_free_host_resources(xhci, ctrl_ctx);
2805 spin_unlock_irqrestore(&xhci->lock, flags);
2806 xhci_warn(xhci, "Not enough bandwidth\n");
2810 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
2816 ret = xhci_queue_configure_endpoint(xhci, command,
2820 ret = xhci_queue_evaluate_context(xhci, command,
2824 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2825 xhci_free_host_resources(xhci, ctrl_ctx);
2826 spin_unlock_irqrestore(&xhci->lock, flags);
2827 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2831 xhci_ring_cmd_db(xhci);
2832 spin_unlock_irqrestore(&xhci->lock, flags);
2838 ret = xhci_configure_endpoint_result(xhci, udev,
2841 ret = xhci_evaluate_context_result(xhci, udev,
2844 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2845 spin_lock_irqsave(&xhci->lock, flags);
2850 xhci_free_host_resources(xhci, ctrl_ctx);
2852 xhci_finish_resource_reservation(xhci, ctrl_ctx);
2853 spin_unlock_irqrestore(&xhci->lock, flags);
2858 static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
2864 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
2866 xhci_free_stream_info(xhci, ep->stream_info);
2879 * else should be touching the xhci->devs[slot_id] structure, so we
2880 * don't need to take the xhci->lock for manipulating that.
2886 struct xhci_hcd *xhci;
2895 xhci = hcd_to_xhci(hcd);
2896 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
2897 (xhci->xhc_state & XHCI_STATE_REMOVING))
2900 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2901 virt_dev = xhci->devs[udev->slot_id];
2903 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
2912 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2928 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2940 ret = xhci_configure_endpoint(xhci, udev, command,
2950 xhci_free_endpoint_ring(xhci, virt_dev, i);
2951 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2954 xhci_zero_in_ctx(xhci, virt_dev);
2966 xhci_free_endpoint_ring(xhci, virt_dev, i);
2968 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2971 xhci_debugfs_create_endpoint(xhci, virt_dev, i);
2983 struct xhci_hcd *xhci;
2990 xhci = hcd_to_xhci(hcd);
2992 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2993 virt_dev = xhci->devs[udev->slot_id];
2997 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
2998 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
3002 xhci_zero_in_ctx(xhci, virt_dev);
3006 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
3014 xhci_slot_copy(xhci, in_ctx, out_ctx);
3021 struct xhci_hcd *xhci;
3028 xhci = hcd_to_xhci(hcd);
3030 spin_lock_irqsave(&xhci->lock, flags);
3036 vdev = xhci->devs[udev->slot_id];
3045 spin_unlock_irqrestore(&xhci->lock, flags);
3051 xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n",
3055 spin_unlock_irqrestore(&xhci->lock, flags);
3067 * endpoint. Refer to the additional note in xhci spcification section 4.6.8.
3073 struct xhci_hcd *xhci;
3084 xhci = hcd_to_xhci(hcd);
3088 vdev = xhci->devs[udev->slot_id];
3101 spin_lock_irqsave(&xhci->lock, flags);
3104 spin_unlock_irqrestore(&xhci->lock, flags);
3107 spin_unlock_irqrestore(&xhci->lock, flags);
3118 stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT);
3122 cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT);
3126 spin_lock_irqsave(&xhci->lock, flags);
3139 spin_unlock_irqrestore(&xhci->lock, flags);
3140 xhci_free_command(xhci, cfg_cmd);
3144 err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
3147 spin_unlock_irqrestore(&xhci->lock, flags);
3148 xhci_free_command(xhci, cfg_cmd);
3149 xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ",
3154 xhci_ring_cmd_db(xhci);
3155 spin_unlock_irqrestore(&xhci->lock, flags);
3159 spin_lock_irqsave(&xhci->lock, flags);
3164 spin_unlock_irqrestore(&xhci->lock, flags);
3165 xhci_free_command(xhci, cfg_cmd);
3166 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3171 xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
3173 xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
3175 err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
3178 spin_unlock_irqrestore(&xhci->lock, flags);
3179 xhci_free_command(xhci, cfg_cmd);
3180 xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ",
3185 xhci_ring_cmd_db(xhci);
3186 spin_unlock_irqrestore(&xhci->lock, flags);
3190 xhci_free_command(xhci, cfg_cmd);
3192 xhci_free_command(xhci, stop_cmd);
3193 spin_lock_irqsave(&xhci->lock, flags);
3196 spin_unlock_irqrestore(&xhci->lock, flags);
3199 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
3209 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
3213 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
3220 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3223 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
3226 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
3230 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
3231 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
3239 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
3252 max_streams = HCC_MAX_PSA(xhci->hcc_params);
3254 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
3265 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
3276 ret = xhci_check_streams_endpoint(xhci, udev,
3283 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
3297 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
3308 if (!xhci->devs[slot_id])
3313 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3316 xhci_warn(xhci, "WARN Can't disable streams for "
3325 xhci_warn(xhci, "WARN Can't disable streams for "
3329 xhci_warn(xhci, "WARN xhci_free_streams() called "
3359 struct xhci_hcd *xhci;
3376 xhci = hcd_to_xhci(hcd);
3377 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3381 if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
3382 HCC_MAX_PSA(xhci->hcc_params) < 4) {
3383 xhci_dbg(xhci, "xHCI controller does not support streams.\n");
3387 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
3393 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3395 xhci_free_command(xhci, config_cmd);
3403 spin_lock_irqsave(&xhci->lock, flags);
3404 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3407 xhci_free_command(xhci, config_cmd);
3408 spin_unlock_irqrestore(&xhci->lock, flags);
3412 xhci_warn(xhci, "WARN: endpoints can't handle "
3414 xhci_free_command(xhci, config_cmd);
3415 spin_unlock_irqrestore(&xhci->lock, flags);
3418 vdev = xhci->devs[udev->slot_id];
3426 spin_unlock_irqrestore(&xhci->lock, flags);
3432 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3433 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3439 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3455 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3457 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3459 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3465 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3470 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3480 spin_lock_irqsave(&xhci->lock, flags);
3484 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3488 xhci_free_command(xhci, config_cmd);
3489 spin_unlock_irqrestore(&xhci->lock, flags);
3493 xhci_debugfs_create_stream_files(xhci, vdev, ep_index);
3502 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3509 xhci_endpoint_zero(xhci, vdev, eps[i]);
3511 xhci_free_command(xhci, config_cmd);
3526 struct xhci_hcd *xhci;
3534 xhci = hcd_to_xhci(hcd);
3535 vdev = xhci->devs[udev->slot_id];
3538 spin_lock_irqsave(&xhci->lock, flags);
3539 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3542 spin_unlock_irqrestore(&xhci->lock, flags);
3554 spin_unlock_irqrestore(&xhci->lock, flags);
3555 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3564 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3565 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3568 xhci_endpoint_copy(xhci, command->in_ctx,
3573 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3576 spin_unlock_irqrestore(&xhci->lock, flags);
3581 ret = xhci_configure_endpoint(xhci, udev, command,
3590 spin_lock_irqsave(&xhci->lock, flags);
3593 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3601 spin_unlock_irqrestore(&xhci->lock, flags);
3611 * Must be called with xhci->lock held.
3613 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3626 xhci->num_active_eps -= num_dropped_eps;
3628 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3632 xhci->num_active_eps);
3658 struct xhci_hcd *xhci;
3668 xhci = hcd_to_xhci(hcd);
3670 virt_dev = xhci->devs[slot_id];
3672 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3689 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3700 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3707 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3714 reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
3716 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3721 spin_lock_irqsave(&xhci->lock, flags);
3723 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
3725 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3726 spin_unlock_irqrestore(&xhci->lock, flags);
3729 xhci_ring_cmd_db(xhci);
3730 spin_unlock_irqrestore(&xhci->lock, flags);
3743 xhci_warn(xhci, "Timeout waiting for reset device command\n");
3748 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
3750 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3751 xhci_dbg(xhci, "Not freeing device rings.\n");
3756 xhci_dbg(xhci, "Successful reset device command.\n");
3759 if (xhci_is_vendor_info_code(xhci, ret))
3761 xhci_warn(xhci, "Unknown completion code %u for "
3768 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3769 spin_lock_irqsave(&xhci->lock, flags);
3771 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3772 spin_unlock_irqrestore(&xhci->lock, flags);
3780 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
3782 xhci_free_stream_info(xhci, ep->stream_info);
3788 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3789 xhci_free_endpoint_ring(xhci, virt_dev, i);
3792 xhci_drop_ep_from_interval_table(xhci,
3801 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3806 xhci_free_command(xhci, reset_device_cmd);
3817 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3828 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3838 virt_dev = xhci->devs[udev->slot_id];
3839 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3846 xhci_disable_slot(xhci, udev->slot_id);
3848 spin_lock_irqsave(&xhci->lock, flags);
3849 xhci_free_virt_device(xhci, udev->slot_id);
3850 spin_unlock_irqrestore(&xhci->lock, flags);
3854 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
3861 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3865 xhci_debugfs_remove_slot(xhci, slot_id);
3867 spin_lock_irqsave(&xhci->lock, flags);
3869 state = readl(&xhci->op_regs->status);
3870 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3871 (xhci->xhc_state & XHCI_STATE_HALTED)) {
3872 spin_unlock_irqrestore(&xhci->lock, flags);
3877 ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3880 spin_unlock_irqrestore(&xhci->lock, flags);
3884 xhci_ring_cmd_db(xhci);
3885 spin_unlock_irqrestore(&xhci->lock, flags);
3890 xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n",
3893 xhci_free_command(xhci, command);
3902 * Must be called with xhci->lock held.
3904 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3906 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3907 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3910 xhci->num_active_eps, xhci->limit_active_eps);
3913 xhci->num_active_eps += 1;
3914 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3916 xhci->num_active_eps);
3927 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3934 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3938 spin_lock_irqsave(&xhci->lock, flags);
3939 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
3941 spin_unlock_irqrestore(&xhci->lock, flags);
3942 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3943 xhci_free_command(xhci, command);
3946 xhci_ring_cmd_db(xhci);
3947 spin_unlock_irqrestore(&xhci->lock, flags);
3953 xhci_err(xhci, "Error while assigning device slot ID: %s\n",
3955 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
3957 readl(&xhci->cap_regs->hcs_params1)));
3958 xhci_free_command(xhci, command);
3962 xhci_free_command(xhci, command);
3964 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3965 spin_lock_irqsave(&xhci->lock, flags);
3966 ret = xhci_reserve_host_control_ep_resources(xhci);
3968 spin_unlock_irqrestore(&xhci->lock, flags);
3969 xhci_warn(xhci, "Not enough host resources, "
3971 xhci->num_active_eps);
3974 spin_unlock_irqrestore(&xhci->lock, flags);
3980 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
3981 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
3984 vdev = xhci->devs[slot_id];
3985 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
3990 xhci_debugfs_create_slot(xhci, slot_id);
3996 if (xhci->quirks & XHCI_RESET_ON_RESUME)
4004 xhci_disable_slot(xhci, udev->slot_id);
4005 xhci_free_virt_device(xhci, udev->slot_id);
4021 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4027 mutex_lock(&xhci->mutex);
4029 if (xhci->xhc_state) { /* dying, removing or halted */
4035 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4041 virt_dev = xhci->devs[udev->slot_id];
4049 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
4054 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4060 xhci_dbg(xhci, "Slot already in default state\n");
4065 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
4073 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
4076 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4087 xhci_setup_addressable_virt_dev(xhci, udev);
4090 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
4094 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
4098 spin_lock_irqsave(&xhci->lock, flags);
4100 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
4103 spin_unlock_irqrestore(&xhci->lock, flags);
4104 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4108 xhci_ring_cmd_db(xhci);
4109 spin_unlock_irqrestore(&xhci->lock, flags);
4121 xhci_warn(xhci, "Timeout while waiting for setup device command\n");
4126 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
4133 mutex_unlock(&xhci->mutex);
4134 ret = xhci_disable_slot(xhci, udev->slot_id);
4135 xhci_free_virt_device(xhci, udev->slot_id);
4147 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4151 xhci_err(xhci,
4154 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
4160 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
4161 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4163 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4166 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
4168 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
4169 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4172 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
4178 trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
4183 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4186 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4190 mutex_unlock(&xhci->mutex);
4226 static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
4236 command = xhci_alloc_command_with_ctx(xhci, true, GFP_KERNEL);
4240 spin_lock_irqsave(&xhci->lock, flags);
4242 virt_dev = xhci->devs[udev->slot_id];
4247 * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
4251 spin_unlock_irqrestore(&xhci->lock, flags);
4252 xhci_free_command(xhci, command);
4259 spin_unlock_irqrestore(&xhci->lock, flags);
4260 xhci_free_command(xhci, command);
4261 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4266 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4267 spin_unlock_irqrestore(&xhci->lock, flags);
4270 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4275 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
4279 ret = xhci_configure_endpoint(xhci, udev, command,
4283 spin_lock_irqsave(&xhci->lock, flags);
4285 spin_unlock_irqrestore(&xhci->lock, flags);
4288 xhci_free_command(xhci, command);
4300 static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
4307 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
4359 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4368 if (xhci->quirks & XHCI_HW_LPM_DISABLE)
4371 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
4382 spin_lock_irqsave(&xhci->lock, flags);
4384 ports = xhci->usb2_rhub.ports;
4390 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4398 * systems. See XHCI_DEFAULT_BESL definition in xhci.h
4408 spin_unlock_irqrestore(&xhci->lock, flags);
4410 ret = xhci_change_max_exit_latency(xhci, udev,
4414 spin_lock_irqsave(&xhci->lock, flags);
4421 hird = xhci_calculate_hird_besl(xhci, udev);
4438 spin_unlock_irqrestore(&xhci->lock, flags);
4439 xhci_change_max_exit_latency(xhci, udev, 0);
4447 spin_unlock_irqrestore(&xhci->lock, flags);
4455 static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
4461 for (i = 0; i < xhci->num_ext_caps; i++) {
4462 if (xhci->ext_caps[i] & capability) {
4464 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
4465 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
4476 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4487 if (xhci->hw_lpm_support == 1 &&
4489 xhci, portnum, XHCI_HLC)) {
4493 if (xhci_check_usb2_port_capability(xhci, portnum,
4598 static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
4612 if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST))
4662 static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
4676 if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST))
4693 static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4700 return xhci_calculate_u1_timeout(xhci, udev, desc);
4702 return xhci_calculate_u2_timeout(xhci, udev, desc);
4707 static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4715 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4732 static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4741 if (xhci_update_timeout_for_endpoint(xhci, udev,
4748 static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4760 if (xhci->quirks & XHCI_INTEL_HOST && tier > 3)
4762 if (xhci->quirks & XHCI_ZHAOXIN_HOST && tier > 2)
4780 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4799 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4833 if (xhci_update_timeout_for_interface(xhci, udev,
4888 struct xhci_hcd *xhci;
4894 xhci = hcd_to_xhci(hcd);
4899 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4900 !xhci->devs[udev->slot_id])
4903 if (xhci_check_tier_policy(xhci, udev, state) < 0)
4908 port = xhci->usb3_rhub.ports[udev->portnum - 1];
4921 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4930 struct xhci_hcd *xhci;
4933 xhci = hcd_to_xhci(hcd);
4934 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4935 !xhci->devs[udev->slot_id])
4939 return xhci_change_max_exit_latency(xhci, udev, mel);
4975 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4988 vdev = xhci->devs[hdev->slot_id];
4990 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
4994 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
5000 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
5002 xhci_free_command(xhci, config_cmd);
5006 spin_lock_irqsave(&xhci->lock, flags);
5008 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
5009 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
5010 xhci_free_command(xhci, config_cmd);
5011 spin_unlock_irqrestore(&xhci->lock, flags);
5015 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
5017 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
5029 if (xhci->hci_version > 0x95) {
5030 xhci_dbg(xhci, "xHCI version %x needs hub "
5032 (unsigned int) xhci->hci_version);
5044 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
5048 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
5050 (unsigned int) xhci->hci_version);
5053 spin_unlock_irqrestore(&xhci->lock, flags);
5055 xhci_dbg(xhci, "Set up %s for hub device.\n",
5056 (xhci->hci_version > 0x95) ?
5062 if (xhci->hci_version > 0x95)
5063 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5066 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5069 xhci_free_command(xhci, config_cmd);
5076 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5078 return readl(&xhci->run_regs->microframe_index) >> 3;
5081 static void xhci_hcd_init_usb2_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
5083 xhci->usb2_rhub.hcd = hcd;
5094 static void xhci_hcd_init_usb3_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
5107 if (xhci->usb3_rhub.min_rev == 0x1)
5110 minor_rev = xhci->usb3_rhub.min_rev / 0x10;
5126 xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
5129 xhci->usb3_rhub.hcd = hcd;
5134 struct xhci_hcd *xhci;
5151 xhci = hcd_to_xhci(hcd);
5154 xhci_hcd_init_usb3_data(xhci, hcd);
5158 mutex_init(&xhci->mutex);
5159 xhci->main_hcd = hcd;
5160 xhci->cap_regs = hcd->regs;
5161 xhci->op_regs = hcd->regs +
5162 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
5163 xhci->run_regs = hcd->regs +
5164 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
5166 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
5167 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
5168 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
5169 xhci->hci_version = HC_VERSION(readl(&xhci->cap_regs->hc_capbase));
5170 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
5171 if (xhci->hci_version > 0x100)
5172 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
5174 /* xhci-plat or xhci-pci might have set max_interrupters already */
5175 if ((!xhci->max_interrupters) ||
5176 xhci->max_interrupters > HCS_MAX_INTRS(xhci->hcs_params1))
5177 xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1);
5179 xhci->quirks |= quirks;
5182 get_quirks(dev, xhci);
5184 /* In xhci controllers which follow xhci 1.0 spec gives a spurious
5188 if (xhci->hci_version > 0x96)
5189 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
5192 retval = xhci_halt(xhci);
5196 xhci_zero_64b_regs(xhci);
5198 xhci_dbg(xhci, "Resetting HCD\n");
5200 retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
5203 xhci_dbg(xhci, "Reset complete\n");
5209 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
5212 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
5213 xhci->hcc_params &= ~BIT(0);
5217 if (HCC_64BIT_ADDR(xhci->hcc_params) &&
5219 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
5229 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
5233 xhci_dbg(xhci, "Calling HCD init\n");
5238 xhci_dbg(xhci, "Called HCD init\n");
5241 xhci_hcd_init_usb3_data(xhci, hcd);
5243 xhci_hcd_init_usb2_data(xhci, hcd);
5245 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n",
5246 xhci->hcc_params, xhci->hci_version, xhci->quirks);
5255 struct xhci_hcd *xhci;
5261 xhci = hcd_to_xhci(hcd);
5263 spin_lock_irqsave(&xhci->lock, flags);
5268 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT;
5269 xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
5270 spin_unlock_irqrestore(&xhci->lock, flags);
5274 .description = "xhci-hcd",