Lines Matching refs:xhci

22 #include "xhci.h"
23 #include "xhci-trace.h"
24 #include "xhci-mtk.h"
25 #include "xhci-debugfs.h"
26 #include "xhci-dbgcap.h"
88 void xhci_quiesce(struct xhci_hcd *xhci)
95 halted = readl(&xhci->op_regs->status) & STS_HALT;
99 cmd = readl(&xhci->op_regs->command);
101 writel(cmd, &xhci->op_regs->command);
112 int xhci_halt(struct xhci_hcd *xhci)
115 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
116 xhci_quiesce(xhci);
118 ret = xhci_handshake(&xhci->op_regs->status,
121 xhci_warn(xhci, "Host halt failed, %d\n", ret);
124 xhci->xhc_state |= XHCI_STATE_HALTED;
125 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
132 int xhci_start(struct xhci_hcd *xhci)
137 temp = readl(&xhci->op_regs->command);
139 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
141 writel(temp, &xhci->op_regs->command);
147 ret = xhci_handshake(&xhci->op_regs->status,
150 xhci_err(xhci, "Host took too long to start, "
155 xhci->xhc_state = 0;
156 xhci->run_graceperiod = jiffies + msecs_to_jiffies(500);
169 int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
175 state = readl(&xhci->op_regs->status);
178 xhci_warn(xhci, "Host not accessible, reset failed.\n");
183 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
187 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
188 command = readl(&xhci->op_regs->command);
190 writel(command, &xhci->op_regs->command);
199 if (xhci->quirks & XHCI_INTEL_HOST)
202 ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us);
206 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
207 usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
209 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
215 ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us);
217 xhci->usb2_rhub.bus_state.port_c_suspend = 0;
218 xhci->usb2_rhub.bus_state.suspended_ports = 0;
219 xhci->usb2_rhub.bus_state.resuming_ports = 0;
220 xhci->usb3_rhub.bus_state.port_c_suspend = 0;
221 xhci->usb3_rhub.bus_state.suspended_ports = 0;
222 xhci->usb3_rhub.bus_state.resuming_ports = 0;
227 static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
229 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
250 if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !domain ||
254 xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
257 val = readl(&xhci->op_regs->command);
259 writel(val, &xhci->op_regs->command);
262 val = readl(&xhci->op_regs->status);
264 writel(val, &xhci->op_regs->status);
267 val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
269 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
270 val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
272 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
274 intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1),
275 ARRAY_SIZE(xhci->run_regs->ir_set));
280 ir = &xhci->run_regs->ir_set[i];
281 val = xhci_read_64(xhci, &ir->erst_base);
283 xhci_write_64(xhci, 0, &ir->erst_base);
284 val= xhci_read_64(xhci, &ir->erst_dequeue);
286 xhci_write_64(xhci, 0, &ir->erst_dequeue);
290 err = xhci_handshake(&xhci->op_regs->status,
294 xhci_info(xhci, "Fault detected\n");
301 static int xhci_setup_msi(struct xhci_hcd *xhci)
307 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
311 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
317 0, "xhci_hcd", xhci_to_hcd(xhci));
319 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
330 static int xhci_setup_msix(struct xhci_hcd *xhci)
333 struct usb_hcd *hcd = xhci_to_hcd(xhci);
339 * with max number of interrupters based on the xhci HCSPARAMS1.
343 xhci->msix_count = min(num_online_cpus() + 1,
344 HCS_MAX_INTRS(xhci->hcs_params1));
346 ret = pci_alloc_irq_vectors(pdev, xhci->msix_count, xhci->msix_count,
349 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
354 for (i = 0; i < xhci->msix_count; i++) {
356 "xhci_hcd", xhci_to_hcd(xhci));
365 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
367 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
373 static void xhci_cleanup_msix(struct xhci_hcd *xhci)
375 struct usb_hcd *hcd = xhci_to_hcd(xhci);
378 if (xhci->quirks & XHCI_PLAT)
388 for (i = 0; i < xhci->msix_count; i++)
389 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
391 free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci));
398 static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
400 struct usb_hcd *hcd = xhci_to_hcd(xhci);
406 for (i = 0; i < xhci->msix_count; i++)
413 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
417 /* The xhci platform device has set up IRQs through usb_add_hcd. */
418 if (xhci->quirks & XHCI_PLAT)
421 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
426 if (xhci->quirks & XHCI_BROKEN_MSI)
434 ret = xhci_setup_msix(xhci);
437 ret = xhci_setup_msi(xhci);
445 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
458 xhci_err(xhci, "request interrupt %d failed\n",
473 static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
477 static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
485 struct xhci_hcd *xhci;
491 xhci = from_timer(xhci, t, comp_mode_recovery_timer);
492 rhub = &xhci->usb3_rhub;
501 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
504 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
506 hcd = xhci->shared_hcd;
515 if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1))
516 mod_timer(&xhci->comp_mode_recovery_timer,
527 * status event is generated when entering compliance mode (per xhci spec),
530 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
532 xhci->port_status_u0 = 0;
533 timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
535 xhci->comp_mode_recovery_timer.expires = jiffies +
538 add_timer(&xhci->comp_mode_recovery_timer);
539 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
570 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
572 return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1));
585 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
588 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
589 spin_lock_init(&xhci->lock);
590 if (xhci->hci_version == 0x95 && link_quirk) {
591 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
593 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
595 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
598 retval = xhci_mem_init(xhci, GFP_KERNEL);
599 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
603 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
604 compliance_mode_recovery_timer_init(xhci);
613 static int xhci_run_finished(struct xhci_hcd *xhci)
615 if (xhci_start(xhci)) {
616 xhci_halt(xhci);
619 xhci->shared_hcd->state = HC_STATE_RUNNING;
620 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
622 if (xhci->quirks & XHCI_NEC_HOST)
623 xhci_ring_cmd_db(xhci);
625 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
647 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
655 return xhci_run_finished(xhci);
657 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
663 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
665 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
668 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
670 temp = readl(&xhci->ir_set->irq_control);
672 temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
673 writel(temp, &xhci->ir_set->irq_control);
676 temp = readl(&xhci->op_regs->command);
678 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
680 writel(temp, &xhci->op_regs->command);
682 temp = readl(&xhci->ir_set->irq_pending);
683 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
685 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
686 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
688 if (xhci->quirks & XHCI_NEC_HOST) {
691 command = xhci_alloc_command(xhci, false, GFP_KERNEL);
695 ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0,
698 xhci_free_command(xhci, command);
700 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
705 xhci_dbc_init(xhci);
707 xhci_debugfs_init(xhci);
725 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
727 mutex_lock(&xhci->mutex);
731 mutex_unlock(&xhci->mutex);
735 xhci_dbc_exit(xhci);
737 spin_lock_irq(&xhci->lock);
738 xhci->xhc_state |= XHCI_STATE_HALTED;
739 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
740 xhci_halt(xhci);
741 xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
742 spin_unlock_irq(&xhci->lock);
744 xhci_cleanup_msix(xhci);
747 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
748 (!(xhci_all_ports_seen_u0(xhci)))) {
749 del_timer_sync(&xhci->comp_mode_recovery_timer);
750 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
755 if (xhci->quirks & XHCI_AMD_PLL_FIX)
758 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
760 temp = readl(&xhci->op_regs->status);
761 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
762 temp = readl(&xhci->ir_set->irq_pending);
763 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
765 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
766 xhci_mem_cleanup(xhci);
767 xhci_debugfs_exit(xhci);
768 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
770 readl(&xhci->op_regs->status));
771 mutex_unlock(&xhci->mutex);
785 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
787 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
791 xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
796 if (xhci->shared_hcd) {
797 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
798 del_timer_sync(&xhci->shared_hcd->rh_timer);
801 spin_lock_irq(&xhci->lock);
802 xhci_halt(xhci);
808 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP ||
809 xhci->quirks & XHCI_RESET_TO_DEFAULT)
810 xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
812 spin_unlock_irq(&xhci->lock);
814 xhci_cleanup_msix(xhci);
816 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
818 readl(&xhci->op_regs->status));
823 static void xhci_save_registers(struct xhci_hcd *xhci)
825 xhci->s3.command = readl(&xhci->op_regs->command);
826 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
827 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
828 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
829 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
830 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
831 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
832 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
833 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
836 static void xhci_restore_registers(struct xhci_hcd *xhci)
838 writel(xhci->s3.command, &xhci->op_regs->command);
839 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
840 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
841 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
842 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
843 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
844 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
845 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
846 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
849 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
854 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
856 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
857 xhci->cmd_ring->dequeue) &
859 xhci->cmd_ring->cycle_state;
860 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
863 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
875 static void xhci_clear_command_ring(struct xhci_hcd *xhci)
880 ring = xhci->cmd_ring;
910 xhci_set_cmd_ring_deq(xhci);
922 static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci,
930 spin_lock_irqsave(&xhci->lock, flags);
947 xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n",
951 spin_unlock_irqrestore(&xhci->lock, flags);
954 static bool xhci_pending_portevent(struct xhci_hcd *xhci)
961 status = readl(&xhci->op_regs->status);
967 * being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
970 port_index = xhci->usb2_rhub.num_ports;
971 ports = xhci->usb2_rhub.ports;
978 port_index = xhci->usb3_rhub.num_ports;
979 ports = xhci->usb3_rhub.ports;
995 int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
999 struct usb_hcd *hcd = xhci_to_hcd(xhci);
1007 xhci->shared_hcd->state != HC_STATE_SUSPENDED)
1011 xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup);
1012 xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup);
1017 xhci_dbc_suspend(xhci);
1020 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
1023 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1024 del_timer_sync(&xhci->shared_hcd->rh_timer);
1026 if (xhci->quirks & XHCI_SUSPEND_DELAY)
1029 spin_lock_irq(&xhci->lock);
1031 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1036 command = readl(&xhci->op_regs->command);
1038 writel(command, &xhci->op_regs->command);
1041 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
1043 if (xhci_handshake(&xhci->op_regs->status,
1045 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
1046 spin_unlock_irq(&xhci->lock);
1049 xhci_clear_command_ring(xhci);
1052 xhci_save_registers(xhci);
1055 command = readl(&xhci->op_regs->command);
1057 writel(command, &xhci->op_regs->command);
1058 xhci->broken_suspend = 0;
1059 if (xhci_handshake(&xhci->op_regs->status,
1067 * if SRE and HCE bits are not set (as per xhci
1070 res = readl(&xhci->op_regs->status);
1071 if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) &&
1074 xhci->broken_suspend = 1;
1076 xhci_warn(xhci, "WARN: xHC save state timeout\n");
1077 spin_unlock_irq(&xhci->lock);
1081 spin_unlock_irq(&xhci->lock);
1087 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1088 (!(xhci_all_ports_seen_u0(xhci)))) {
1089 del_timer_sync(&xhci->comp_mode_recovery_timer);
1090 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1097 xhci_msix_sync_irqs(xhci);
1109 int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1112 struct usb_hcd *hcd = xhci_to_hcd(xhci);
1126 if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) ||
1127 time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange))
1131 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1133 spin_lock_irq(&xhci->lock);
1135 if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
1143 retval = xhci_handshake(&xhci->op_regs->status,
1146 xhci_warn(xhci, "Controller not ready at resume %d\n",
1148 spin_unlock_irq(&xhci->lock);
1152 xhci_restore_registers(xhci);
1154 xhci_set_cmd_ring_deq(xhci);
1157 command = readl(&xhci->op_regs->command);
1159 writel(command, &xhci->op_regs->command);
1165 if (xhci_handshake(&xhci->op_regs->status,
1167 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
1168 spin_unlock_irq(&xhci->lock);
1173 temp = readl(&xhci->op_regs->status);
1178 if (!xhci->broken_suspend)
1179 xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
1183 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1184 !(xhci_all_ports_seen_u0(xhci))) {
1185 del_timer_sync(&xhci->comp_mode_recovery_timer);
1186 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1191 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
1192 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
1194 xhci_dbg(xhci, "Stop HCD\n");
1195 xhci_halt(xhci);
1196 xhci_zero_64b_regs(xhci);
1197 retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
1198 spin_unlock_irq(&xhci->lock);
1201 xhci_cleanup_msix(xhci);
1203 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1204 temp = readl(&xhci->op_regs->status);
1205 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
1206 temp = readl(&xhci->ir_set->irq_pending);
1207 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
1209 xhci_dbg(xhci, "cleaning up memory\n");
1210 xhci_mem_cleanup(xhci);
1211 xhci_debugfs_exit(xhci);
1212 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1213 readl(&xhci->op_regs->status));
1222 secondary_hcd = xhci->shared_hcd;
1224 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1230 xhci_dbg(xhci, "Start the primary HCD\n");
1233 xhci_dbg(xhci, "Start the secondary HCD\n");
1237 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1242 command = readl(&xhci->op_regs->command);
1244 writel(command, &xhci->op_regs->command);
1245 xhci_handshake(&xhci->op_regs->status, STS_HALT,
1257 spin_unlock_irq(&xhci->lock);
1259 xhci_dbc_resume(xhci);
1268 pending_portevent = xhci_pending_portevent(xhci);
1271 pending_portevent = xhci_pending_portevent(xhci);
1275 usb_hcd_resume_root_hub(xhci->shared_hcd);
1285 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1286 compliance_mode_recovery_timer_init(xhci);
1288 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
1292 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1293 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1294 usb_hcd_poll_rh_status(xhci->shared_hcd);
1386 struct xhci_hcd *xhci;
1398 xhci = hcd_to_xhci(hcd);
1400 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1401 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
1406 virt_dev = xhci->devs[udev->slot_id];
1408 xhci_dbg(xhci, "xHCI %s called with udev and "
1414 if (xhci->xhc_state & XHCI_STATE_HALTED)
1420 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1430 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1441 out_ctx = xhci->devs[slot_id]->out_ctx;
1442 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1446 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1448 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1451 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1454 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1462 command = xhci_alloc_command(xhci, true, mem_flags);
1466 command->in_ctx = xhci->devs[slot_id]->in_ctx;
1469 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1475 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1476 xhci->devs[slot_id]->out_ctx, ep_index);
1478 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
1486 ret = xhci_configure_endpoint(xhci, urb->dev, command,
1506 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1523 ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;
1527 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1530 if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) {
1531 xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n");
1560 ret = xhci_check_maxpacket(xhci, slot_id,
1570 spin_lock_irqsave(&xhci->lock, flags);
1572 if (xhci->xhc_state & XHCI_STATE_DYING) {
1573 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n",
1579 xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n",
1585 xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n");
1593 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1597 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1601 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1605 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1614 spin_unlock_irqrestore(&xhci->lock, flags);
1654 struct xhci_hcd *xhci;
1663 xhci = hcd_to_xhci(hcd);
1664 spin_lock_irqsave(&xhci->lock, flags);
1674 vdev = xhci->devs[urb->dev->slot_id];
1681 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1686 temp = readl(&xhci->op_regs->status);
1687 if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) {
1688 xhci_hc_died(xhci);
1698 xhci_err(xhci, "Canceled URB td not found on endpoint ring");
1707 if (xhci->xhc_state & XHCI_STATE_HALTED) {
1708 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1724 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1742 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1751 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
1753 xhci_ring_cmd_db(xhci);
1756 spin_unlock_irqrestore(&xhci->lock, flags);
1763 spin_unlock_irqrestore(&xhci->lock, flags);
1779 * the xhci->devs[slot_id] structure.
1784 struct xhci_hcd *xhci;
1796 xhci = hcd_to_xhci(hcd);
1797 if (xhci->xhc_state & XHCI_STATE_DYING)
1800 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1803 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1808 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1809 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1812 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1818 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1826 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
1827 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1838 xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index);
1840 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1842 if (xhci->quirks & XHCI_MTK_HOST)
1845 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1864 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1869 struct xhci_hcd *xhci;
1885 xhci = hcd_to_xhci(hcd);
1886 if (xhci->xhc_state & XHCI_STATE_DYING)
1895 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1900 virt_dev = xhci->devs[udev->slot_id];
1904 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1915 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1925 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1935 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1941 if (xhci->quirks & XHCI_MTK_HOST) {
1944 xhci_ring_free(xhci, virt_dev->eps[ep_index].new_ring);
1964 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1967 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1975 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1984 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1996 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2001 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
2009 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
2017 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
2046 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2051 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2059 static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
2067 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
2096 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2101 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2109 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
2130 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
2154 * Must be called with xhci->lock held.
2156 static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
2161 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2162 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
2163 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2166 xhci->num_active_eps, added_eps,
2167 xhci->limit_active_eps);
2170 xhci->num_active_eps += added_eps;
2171 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2173 xhci->num_active_eps);
2181 * Must be called with xhci->lock held.
2183 static void xhci_free_host_resources(struct xhci_hcd *xhci,
2188 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2189 xhci->num_active_eps -= num_failed_eps;
2190 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2193 xhci->num_active_eps);
2200 * Must be called with xhci->lock held.
2202 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
2207 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
2208 xhci->num_active_eps -= num_dropped_eps;
2210 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2213 xhci->num_active_eps);
2249 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2257 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2279 static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2336 static int xhci_check_bw_table(struct xhci_hcd *xhci,
2352 return xhci_check_ss_bw(xhci, virt_dev);
2373 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2376 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2377 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2381 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2386 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2469 xhci_warn(xhci, "Not enough bandwidth. "
2492 xhci->rh_bw[port_index].num_active_tts;
2495 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2504 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2539 static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2554 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2557 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2603 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2619 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2622 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2677 void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2685 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2697 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2711 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2727 xhci_drop_ep_from_interval_table(xhci,
2735 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2739 xhci_add_ep_to_interval_table(xhci,
2747 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2751 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2764 xhci_drop_ep_from_interval_table(xhci,
2776 xhci_add_ep_to_interval_table(xhci,
2790 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2804 spin_lock_irqsave(&xhci->lock, flags);
2806 if (xhci->xhc_state & XHCI_STATE_DYING) {
2807 spin_unlock_irqrestore(&xhci->lock, flags);
2811 virt_dev = xhci->devs[udev->slot_id];
2815 spin_unlock_irqrestore(&xhci->lock, flags);
2816 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2821 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2822 xhci_reserve_host_resources(xhci, ctrl_ctx)) {
2823 spin_unlock_irqrestore(&xhci->lock, flags);
2824 xhci_warn(xhci, "Not enough host resources, "
2826 xhci->num_active_eps);
2829 if ((xhci->quirks & XHCI_SW_BW_CHECKING) && !ctx_change &&
2830 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
2831 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2832 xhci_free_host_resources(xhci, ctrl_ctx);
2833 spin_unlock_irqrestore(&xhci->lock, flags);
2834 xhci_warn(xhci, "Not enough bandwidth\n");
2838 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
2844 ret = xhci_queue_configure_endpoint(xhci, command,
2848 ret = xhci_queue_evaluate_context(xhci, command,
2852 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2853 xhci_free_host_resources(xhci, ctrl_ctx);
2854 spin_unlock_irqrestore(&xhci->lock, flags);
2855 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2859 xhci_ring_cmd_db(xhci);
2860 spin_unlock_irqrestore(&xhci->lock, flags);
2866 ret = xhci_configure_endpoint_result(xhci, udev,
2869 ret = xhci_evaluate_context_result(xhci, udev,
2872 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2873 spin_lock_irqsave(&xhci->lock, flags);
2878 xhci_free_host_resources(xhci, ctrl_ctx);
2880 xhci_finish_resource_reservation(xhci, ctrl_ctx);
2881 spin_unlock_irqrestore(&xhci->lock, flags);
2886 static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
2892 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
2894 xhci_free_stream_info(xhci, ep->stream_info);
2907 * else should be touching the xhci->devs[slot_id] structure, so we
2908 * don't need to take the xhci->lock for manipulating that.
2914 struct xhci_hcd *xhci;
2923 xhci = hcd_to_xhci(hcd);
2924 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
2925 (xhci->xhc_state & XHCI_STATE_REMOVING))
2928 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2929 virt_dev = xhci->devs[udev->slot_id];
2931 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
2940 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2956 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2968 ret = xhci_configure_endpoint(xhci, udev, command,
2978 xhci_free_endpoint_ring(xhci, virt_dev, i);
2979 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2982 xhci_zero_in_ctx(xhci, virt_dev);
2994 xhci_free_endpoint_ring(xhci, virt_dev, i);
2996 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2999 xhci_debugfs_create_endpoint(xhci, virt_dev, i);
3010 struct xhci_hcd *xhci;
3017 xhci = hcd_to_xhci(hcd);
3019 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
3020 virt_dev = xhci->devs[udev->slot_id];
3024 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3025 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
3029 xhci_zero_in_ctx(xhci, virt_dev);
3032 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
3040 xhci_slot_copy(xhci, in_ctx, out_ctx);
3044 static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
3054 in_ctx = xhci->devs[slot_id]->in_ctx;
3057 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3062 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
3063 xhci->devs[slot_id]->out_ctx, ep_index);
3064 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
3068 xhci_warn(xhci, "WARN Cannot submit config ep after "
3070 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
3078 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
3079 xhci->devs[slot_id]->out_ctx, ctrl_ctx,
3083 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id,
3089 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
3094 xhci_find_new_dequeue_state(xhci, slot_id, ep_index, stream_id, td,
3103 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
3104 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
3106 xhci_queue_new_dequeue_state(xhci, slot_id,
3114 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3117 xhci_setup_input_ctx_for_quirk(xhci, slot_id,
3125 struct xhci_hcd *xhci;
3132 xhci = hcd_to_xhci(hcd);
3134 spin_lock_irqsave(&xhci->lock, flags);
3140 vdev = xhci->devs[udev->slot_id];
3151 spin_unlock_irqrestore(&xhci->lock, flags);
3157 xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n",
3161 spin_unlock_irqrestore(&xhci->lock, flags);
3173 * endpoint. Refer to the additional note in xhci spcification section 4.6.8.
3179 struct xhci_hcd *xhci;
3190 xhci = hcd_to_xhci(hcd);
3194 vdev = xhci->devs[udev->slot_id];
3209 spin_lock_irqsave(&xhci->lock, flags);
3212 spin_unlock_irqrestore(&xhci->lock, flags);
3215 spin_unlock_irqrestore(&xhci->lock, flags);
3226 stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT);
3230 cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT);
3234 spin_lock_irqsave(&xhci->lock, flags);
3247 spin_unlock_irqrestore(&xhci->lock, flags);
3248 xhci_free_command(xhci, cfg_cmd);
3252 err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
3255 spin_unlock_irqrestore(&xhci->lock, flags);
3256 xhci_free_command(xhci, cfg_cmd);
3257 xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ",
3262 xhci_ring_cmd_db(xhci);
3263 spin_unlock_irqrestore(&xhci->lock, flags);
3267 spin_lock_irqsave(&xhci->lock, flags);
3272 spin_unlock_irqrestore(&xhci->lock, flags);
3273 xhci_free_command(xhci, cfg_cmd);
3274 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3279 xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
3281 xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
3283 err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
3286 spin_unlock_irqrestore(&xhci->lock, flags);
3287 xhci_free_command(xhci, cfg_cmd);
3288 xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ",
3293 xhci_ring_cmd_db(xhci);
3294 spin_unlock_irqrestore(&xhci->lock, flags);
3298 xhci_free_command(xhci, cfg_cmd);
3300 xhci_free_command(xhci, stop_cmd);
3301 spin_lock_irqsave(&xhci->lock, flags);
3304 spin_unlock_irqrestore(&xhci->lock, flags);
3307 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
3317 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
3321 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
3328 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3331 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
3334 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
3338 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
3339 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
3347 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
3360 max_streams = HCC_MAX_PSA(xhci->hcc_params);
3362 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
3373 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
3384 ret = xhci_check_streams_endpoint(xhci, udev,
3391 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
3405 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
3416 if (!xhci->devs[slot_id])
3421 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3424 xhci_warn(xhci, "WARN Can't disable streams for "
3433 xhci_warn(xhci, "WARN Can't disable streams for "
3437 xhci_warn(xhci, "WARN xhci_free_streams() called "
3467 struct xhci_hcd *xhci;
3484 xhci = hcd_to_xhci(hcd);
3485 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3489 if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
3490 HCC_MAX_PSA(xhci->hcc_params) < 4) {
3491 xhci_dbg(xhci, "xHCI controller does not support streams.\n");
3495 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
3501 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3503 xhci_free_command(xhci, config_cmd);
3511 spin_lock_irqsave(&xhci->lock, flags);
3512 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3515 xhci_free_command(xhci, config_cmd);
3516 spin_unlock_irqrestore(&xhci->lock, flags);
3520 xhci_warn(xhci, "WARN: endpoints can't handle "
3522 xhci_free_command(xhci, config_cmd);
3523 spin_unlock_irqrestore(&xhci->lock, flags);
3526 vdev = xhci->devs[udev->slot_id];
3534 spin_unlock_irqrestore(&xhci->lock, flags);
3540 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3541 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3547 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3563 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3565 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3567 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3573 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3578 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3588 spin_lock_irqsave(&xhci->lock, flags);
3592 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3596 xhci_free_command(xhci, config_cmd);
3597 spin_unlock_irqrestore(&xhci->lock, flags);
3601 xhci_debugfs_create_stream_files(xhci, vdev, ep_index);
3610 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3617 xhci_endpoint_zero(xhci, vdev, eps[i]);
3619 xhci_free_command(xhci, config_cmd);
3634 struct xhci_hcd *xhci;
3642 xhci = hcd_to_xhci(hcd);
3643 vdev = xhci->devs[udev->slot_id];
3646 spin_lock_irqsave(&xhci->lock, flags);
3647 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3650 spin_unlock_irqrestore(&xhci->lock, flags);
3662 spin_unlock_irqrestore(&xhci->lock, flags);
3663 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3672 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3673 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3676 xhci_endpoint_copy(xhci, command->in_ctx,
3681 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3684 spin_unlock_irqrestore(&xhci->lock, flags);
3689 ret = xhci_configure_endpoint(xhci, udev, command,
3698 spin_lock_irqsave(&xhci->lock, flags);
3701 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3709 spin_unlock_irqrestore(&xhci->lock, flags);
3719 * Must be called with xhci->lock held.
3721 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3734 xhci->num_active_eps -= num_dropped_eps;
3736 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3740 xhci->num_active_eps);
3766 struct xhci_hcd *xhci;
3776 xhci = hcd_to_xhci(hcd);
3778 virt_dev = xhci->devs[slot_id];
3780 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3797 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3808 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3815 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3822 reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
3824 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3829 spin_lock_irqsave(&xhci->lock, flags);
3831 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
3833 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3834 spin_unlock_irqrestore(&xhci->lock, flags);
3837 xhci_ring_cmd_db(xhci);
3838 spin_unlock_irqrestore(&xhci->lock, flags);
3851 xhci_warn(xhci, "Timeout waiting for reset device command\n");
3856 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
3858 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3859 xhci_dbg(xhci, "Not freeing device rings.\n");
3864 xhci_dbg(xhci, "Successful reset device command.\n");
3867 if (xhci_is_vendor_info_code(xhci, ret))
3869 xhci_warn(xhci, "Unknown completion code %u for "
3876 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3877 spin_lock_irqsave(&xhci->lock, flags);
3879 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3880 spin_unlock_irqrestore(&xhci->lock, flags);
3888 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
3890 xhci_free_stream_info(xhci, ep->stream_info);
3896 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3897 xhci_free_endpoint_ring(xhci, virt_dev, i);
3900 xhci_drop_ep_from_interval_table(xhci,
3909 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3914 xhci_free_command(xhci, reset_device_cmd);
3925 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3936 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3946 virt_dev = xhci->devs[udev->slot_id];
3947 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3956 xhci_disable_slot(xhci, udev->slot_id);
3958 spin_lock_irqsave(&xhci->lock, flags);
3959 xhci_free_virt_device(xhci, udev->slot_id);
3960 spin_unlock_irqrestore(&xhci->lock, flags);
3964 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
3971 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3975 xhci_debugfs_remove_slot(xhci, slot_id);
3977 spin_lock_irqsave(&xhci->lock, flags);
3979 state = readl(&xhci->op_regs->status);
3980 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3981 (xhci->xhc_state & XHCI_STATE_HALTED)) {
3982 spin_unlock_irqrestore(&xhci->lock, flags);
3987 ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3990 spin_unlock_irqrestore(&xhci->lock, flags);
3994 xhci_ring_cmd_db(xhci);
3995 spin_unlock_irqrestore(&xhci->lock, flags);
4000 xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n",
4003 xhci_free_command(xhci, command);
4012 * Must be called with xhci->lock held.
4014 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
4016 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
4017 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
4020 xhci->num_active_eps, xhci->limit_active_eps);
4023 xhci->num_active_eps += 1;
4024 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
4026 xhci->num_active_eps);
4037 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4044 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
4048 spin_lock_irqsave(&xhci->lock, flags);
4049 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
4051 spin_unlock_irqrestore(&xhci->lock, flags);
4052 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
4053 xhci_free_command(xhci, command);
4056 xhci_ring_cmd_db(xhci);
4057 spin_unlock_irqrestore(&xhci->lock, flags);
4063 xhci_err(xhci, "Error while assigning device slot ID\n");
4064 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
4066 readl(&xhci->cap_regs->hcs_params1)));
4067 xhci_free_command(xhci, command);
4071 xhci_free_command(xhci, command);
4073 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
4074 spin_lock_irqsave(&xhci->lock, flags);
4075 ret = xhci_reserve_host_control_ep_resources(xhci);
4077 spin_unlock_irqrestore(&xhci->lock, flags);
4078 xhci_warn(xhci, "Not enough host resources, "
4080 xhci->num_active_eps);
4083 spin_unlock_irqrestore(&xhci->lock, flags);
4089 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
4090 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
4093 vdev = xhci->devs[slot_id];
4094 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
4099 xhci_debugfs_create_slot(xhci, slot_id);
4105 if (xhci->quirks & XHCI_RESET_ON_RESUME)
4113 xhci_disable_slot(xhci, udev->slot_id);
4114 xhci_free_virt_device(xhci, udev->slot_id);
4130 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4136 mutex_lock(&xhci->mutex);
4138 if (xhci->xhc_state) { /* dying, removing or halted */
4144 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4150 virt_dev = xhci->devs[udev->slot_id];
4158 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
4163 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4169 xhci_dbg(xhci, "Slot already in default state\n");
4174 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
4182 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
4185 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4196 xhci_setup_addressable_virt_dev(xhci, udev);
4199 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
4203 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
4207 spin_lock_irqsave(&xhci->lock, flags);
4209 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
4212 spin_unlock_irqrestore(&xhci->lock, flags);
4213 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4217 xhci_ring_cmd_db(xhci);
4218 spin_unlock_irqrestore(&xhci->lock, flags);
4230 xhci_warn(xhci, "Timeout while waiting for setup device command\n");
4235 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
4242 mutex_unlock(&xhci->mutex);
4243 ret = xhci_disable_slot(xhci, udev->slot_id);
4244 xhci_free_virt_device(xhci, udev->slot_id);
4247 xhci_setup_addressable_virt_dev(xhci, udev);
4258 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4262 xhci_err(xhci,
4265 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
4271 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
4272 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4274 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4277 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
4279 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
4280 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4283 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
4289 trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
4294 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4297 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4301 mutex_unlock(&xhci->mutex);
4337 static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
4347 spin_lock_irqsave(&xhci->lock, flags);
4349 virt_dev = xhci->devs[udev->slot_id];
4354 * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
4358 spin_unlock_irqrestore(&xhci->lock, flags);
4363 command = xhci->lpm_command;
4366 spin_unlock_irqrestore(&xhci->lock, flags);
4367 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4372 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4373 spin_unlock_irqrestore(&xhci->lock, flags);
4376 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4381 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
4385 ret = xhci_configure_endpoint(xhci, udev, command,
4389 spin_lock_irqsave(&xhci->lock, flags);
4391 spin_unlock_irqrestore(&xhci->lock, flags);
4403 static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
4410 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
4462 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4471 if (xhci->quirks & XHCI_HW_LPM_DISABLE)
4474 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
4485 spin_lock_irqsave(&xhci->lock, flags);
4487 ports = xhci->usb2_rhub.ports;
4493 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4501 * systems. See XHCI_DEFAULT_BESL definition in xhci.h
4511 spin_unlock_irqrestore(&xhci->lock, flags);
4513 /* USB 3.0 code dedicate one xhci->lpm_command->in_ctx
4521 ret = xhci_change_max_exit_latency(xhci, udev,
4527 spin_lock_irqsave(&xhci->lock, flags);
4534 hird = xhci_calculate_hird_besl(xhci, udev);
4551 spin_unlock_irqrestore(&xhci->lock, flags);
4553 xhci_change_max_exit_latency(xhci, udev, 0);
4562 spin_unlock_irqrestore(&xhci->lock, flags);
4570 static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
4576 for (i = 0; i < xhci->num_ext_caps; i++) {
4577 if (xhci->ext_caps[i] & capability) {
4579 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
4580 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
4591 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4602 if (xhci->hw_lpm_support == 1 &&
4604 xhci, portnum, XHCI_HLC)) {
4608 if (xhci_check_usb2_port_capability(xhci, portnum,
4713 static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
4727 if (xhci->quirks & XHCI_INTEL_HOST)
4777 static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
4791 if (xhci->quirks & XHCI_INTEL_HOST)
4808 static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4815 return xhci_calculate_u1_timeout(xhci, udev, desc);
4817 return xhci_calculate_u2_timeout(xhci, udev, desc);
4822 static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4830 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4847 static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4856 if (xhci_update_timeout_for_endpoint(xhci, udev,
4888 static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4892 if (xhci->quirks & XHCI_INTEL_HOST)
4906 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4922 if (xhci_check_tier_policy(xhci, udev, state) < 0)
4928 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4962 if (xhci_update_timeout_for_interface(xhci, udev,
5019 struct xhci_hcd *xhci;
5025 xhci = hcd_to_xhci(hcd);
5030 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
5031 !xhci->devs[udev->slot_id])
5036 port = xhci->usb3_rhub.ports[udev->portnum - 1];
5049 ret = xhci_change_max_exit_latency(xhci, udev, mel);
5058 struct xhci_hcd *xhci;
5061 xhci = hcd_to_xhci(hcd);
5062 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
5063 !xhci->devs[udev->slot_id])
5067 return xhci_change_max_exit_latency(xhci, udev, mel);
5103 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5116 vdev = xhci->devs[hdev->slot_id];
5118 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
5122 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
5128 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
5130 xhci_free_command(xhci, config_cmd);
5134 spin_lock_irqsave(&xhci->lock, flags);
5136 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
5137 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
5138 xhci_free_command(xhci, config_cmd);
5139 spin_unlock_irqrestore(&xhci->lock, flags);
5143 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
5145 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
5157 if (xhci->hci_version > 0x95) {
5158 xhci_dbg(xhci, "xHCI version %x needs hub "
5160 (unsigned int) xhci->hci_version);
5172 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
5176 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
5178 (unsigned int) xhci->hci_version);
5181 spin_unlock_irqrestore(&xhci->lock, flags);
5183 xhci_dbg(xhci, "Set up %s for hub device.\n",
5184 (xhci->hci_version > 0x95) ?
5190 if (xhci->hci_version > 0x95)
5191 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5194 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5197 xhci_free_command(xhci, config_cmd);
5204 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5206 return readl(&xhci->run_regs->microframe_index) >> 3;
5211 struct xhci_hcd *xhci;
5229 xhci = hcd_to_xhci(hcd);
5232 xhci->main_hcd = hcd;
5233 xhci->usb2_rhub.hcd = hcd;
5255 if (xhci->usb3_rhub.min_rev == 0x1)
5258 minor_rev = xhci->usb3_rhub.min_rev / 0x10;
5272 xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
5276 xhci->usb3_rhub.hcd = hcd;
5283 mutex_init(&xhci->mutex);
5284 xhci->cap_regs = hcd->regs;
5285 xhci->op_regs = hcd->regs +
5286 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
5287 xhci->run_regs = hcd->regs +
5288 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
5290 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
5291 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
5292 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
5293 xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase);
5294 xhci->hci_version = HC_VERSION(xhci->hcc_params);
5295 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
5296 if (xhci->hci_version > 0x100)
5297 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
5299 xhci->quirks |= quirks;
5301 get_quirks(dev, xhci);
5303 /* In xhci controllers which follow xhci 1.0 spec gives a spurious
5307 if (xhci->hci_version > 0x96)
5308 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
5311 retval = xhci_halt(xhci);
5315 xhci_zero_64b_regs(xhci);
5317 xhci_dbg(xhci, "Resetting HCD\n");
5319 retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
5322 xhci_dbg(xhci, "Reset complete\n");
5328 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
5331 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
5332 xhci->hcc_params &= ~BIT(0);
5336 if (HCC_64BIT_ADDR(xhci->hcc_params) &&
5338 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
5348 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
5352 xhci_dbg(xhci, "Calling HCD init\n");
5357 xhci_dbg(xhci, "Called HCD init\n");
5359 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n",
5360 xhci->hcc_params, xhci->hci_version, xhci->quirks);
5369 struct xhci_hcd *xhci;
5375 xhci = hcd_to_xhci(hcd);
5377 spin_lock_irqsave(&xhci->lock, flags);
5382 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT;
5383 xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
5384 spin_unlock_irqrestore(&xhci->lock, flags);
5388 .description = "xhci-hcd",