Lines Matching refs:ehci
6 /* this file is part of ehci-hcd.c */
20 * an ongoing challenge. That's in "ehci-sched.c".
37 qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf,
44 qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr);
45 qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32));
56 qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr);
57 qtd->hw_buf_hi[i] = cpu_to_hc32(ehci,
70 qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token);
79 qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
86 hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
87 hw->hw_alt_next = EHCI_LIST_END(ehci);
94 if (!(hw->hw_info1 & cpu_to_hc32(ehci, QH_TOGGLE_CTL))) {
98 epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f;
100 hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
105 hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
113 qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
126 if (qh->hw->hw_token & ACTIVE_BIT(ehci)) {
129 ehci_warn(ehci, "qh %p should be inactive!\n", qh);
131 qh_update(ehci, qh, qtd);
138 static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
143 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
147 spin_lock_irqsave(&ehci->lock, flags);
150 && ehci->rh_state == EHCI_RH_RUNNING)
151 qh_link_async(ehci, qh);
152 spin_unlock_irqrestore(&ehci->lock, flags);
155 static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh,
173 if (!ehci_is_TDI(ehci)
175 ehci_to_hcd(ehci)->self.root_hub) {
188 struct ehci_hcd *ehci,
237 ehci_dbg(ehci, "devpath %s ep%d%s 3strikes\n",
251 ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
255 ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
259 INCR(ehci->stats.unlink);
264 INCR(ehci->stats.complete);
268 ehci_dbg (ehci,
277 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
278 usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status);
281 static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
289 qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
333 ehci_urb_done(ehci, last->urb, last_status);
336 ehci_qtd_free (ehci, last);
346 token = hc32_to_cpu(ehci, qtd->hw_token);
354 ehci_dbg(ehci,
375 ehci_dbg(ehci,
387 qtd->hw_token = cpu_to_hc32(ehci,
390 hw->hw_token = cpu_to_hc32(ehci,
408 & EHCI_LIST_END(ehci))) {
415 && ehci->rh_state >= EHCI_RH_RUNNING)) {
423 if (ehci->rh_state < EHCI_RH_RUNNING) {
444 (hw->hw_token & ACTIVE_BIT(ehci))) {
445 token = hc32_to_cpu(ehci, hw->hw_token);
446 hw->hw_token &= ~ACTIVE_BIT(ehci);
453 ehci_clear_tt_buffer(ehci, qh, urb, token);
465 last_status = qtd_copy_status(ehci, urb,
469 & EHCI_LIST_END(ehci)))
487 ehci_clear_tt_buffer(ehci, qh, urb,
511 ehci_urb_done(ehci, last->urb, last_status);
512 ehci_qtd_free (ehci, last);
542 if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci))
556 struct ehci_hcd *ehci,
567 ehci_qtd_free (ehci, qtd);
576 struct ehci_hcd *ehci,
592 qtd = ehci_qtd_alloc (ehci, flags);
606 qtd_fill(ehci, qtd, urb->setup_dma,
613 qtd = ehci_qtd_alloc (ehci, flags);
617 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
657 this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token,
669 qtd->hw_alt_next = ehci->async->hw->hw_alt_next;
684 qtd = ehci_qtd_alloc (ehci, flags);
688 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
699 qtd->hw_alt_next = EHCI_LIST_END(ehci);
720 qtd = ehci_qtd_alloc (ehci, flags);
724 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
728 qtd_fill(ehci, qtd, 0, 0, token, 0);
734 qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
738 qtd_list_free (ehci, urb, head);
761 struct ehci_hcd *ehci,
765 struct ehci_qh *qh = ehci_qh_alloc (ehci, flags);
793 ehci_dbg(ehci, "bogus qh maxpacket %d\n", maxp);
822 } else if (urb->interval > ehci->periodic_size << 3) {
823 urb->interval = ehci->periodic_size << 3;
854 if (urb->interval > ehci->periodic_size)
855 urb->interval = ehci->periodic_size;
894 if (ehci_has_fsl_portno_bug(ehci))
902 if (tt && tt->hub != ehci_to_hcd(ehci)->self.root_hub)
932 ehci_dbg(ehci, "bogus dev %p speed %d\n", urb->dev,
935 qh_destroy(ehci, qh);
944 hw->hw_info1 = cpu_to_hc32(ehci, info1);
945 hw->hw_info2 = cpu_to_hc32(ehci, info2);
953 static void enable_async(struct ehci_hcd *ehci)
955 if (ehci->async_count++)
959 ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_ASYNC);
962 ehci_poll_ASS(ehci);
963 turn_on_io_watchdog(ehci);
966 static void disable_async(struct ehci_hcd *ehci)
968 if (--ehci->async_count)
972 WARN_ON(ehci->async->qh_next.qh || !list_empty(&ehci->async_unlink) ||
973 !list_empty(&ehci->async_idle));
976 ehci_poll_ASS(ehci);
981 static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
983 __hc32 dma = QH_NEXT(ehci, qh->qh_dma);
993 qh_refresh(ehci, qh);
996 head = ehci->async;
1009 enable_async(ehci);
1021 struct ehci_hcd *ehci,
1029 __hc32 qh_addr_mask = cpu_to_hc32(ehci, 0x7f);
1033 /* can't sleep here, we have ehci->lock... */
1034 qh = qh_make (ehci, urb, GFP_ATOMIC);
1068 qtd->hw_token = HALT_BIT(ehci);
1080 ehci_qtd_init(ehci, qtd, qtd->qtd_dma);
1087 qtd->hw_next = QTD_NEXT(ehci, dma);
1103 struct ehci_hcd *ehci,
1119 ehci_dbg(ehci,
1128 spin_lock_irqsave (&ehci->lock, flags);
1129 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
1133 rc = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
1137 qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
1139 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
1148 qh_link_async(ehci, qh);
1150 spin_unlock_irqrestore (&ehci->lock, flags);
1152 qtd_list_free (ehci, urb, qtd_list);
1173 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
1186 qtd = ehci_qtd_alloc(ehci, GFP_KERNEL);
1203 qtd_fill(ehci, qtd, urb->setup_dma,
1207 submit_async(ehci, urb, &qtd_list, GFP_ATOMIC);
1222 qtd_fill(ehci, qtd, buf, len, token, maxpacket);
1228 qtd->hw_alt_next = EHCI_LIST_END(ehci);
1235 qtd = ehci_qtd_alloc(ehci, GFP_ATOMIC);
1239 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
1243 qtd_fill(ehci, qtd, 0, 0, token | QTD_IOC, 0);
1245 submit_async(ehci, urb, &qtd_list, GFP_KERNEL);
1250 qtd_list_free(ehci, urb, head);
1257 static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
1263 list_add_tail(&qh->unlink_node, &ehci->async_unlink);
1266 prev = ehci->async;
1272 if (ehci->qh_scan_next == qh)
1273 ehci->qh_scan_next = qh->qh_next.qh;
1276 static void start_iaa_cycle(struct ehci_hcd *ehci)
1279 if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) {
1280 end_unlink_async(ehci);
1283 } else if (ehci->rh_state == EHCI_RH_RUNNING &&
1284 !ehci->iaa_in_progress) {
1289 ehci_writel(ehci, ehci->command | CMD_IAAD,
1290 &ehci->regs->command);
1291 ehci_readl(ehci, &ehci->regs->command);
1292 ehci->iaa_in_progress = true;
1293 ehci_enable_event(ehci, EHCI_HRTIMER_IAA_WATCHDOG, true);
1297 static void end_iaa_cycle(struct ehci_hcd *ehci)
1299 if (ehci->has_synopsys_hc_bug)
1300 ehci_writel(ehci, (u32) ehci->async->qh_dma,
1301 &ehci->regs->async_next);
1304 ehci->iaa_in_progress = false;
1306 end_unlink_async(ehci);
1311 static void end_unlink_async(struct ehci_hcd *ehci)
1316 if (list_empty(&ehci->async_unlink))
1318 qh = list_first_entry(&ehci->async_unlink, struct ehci_qh,
1325 early_exit = ehci->async_unlinking;
1328 if (ehci->rh_state < EHCI_RH_RUNNING)
1329 list_splice_tail_init(&ehci->async_unlink, &ehci->async_idle);
1341 list_move_tail(&qh->unlink_node, &ehci->async_idle);
1368 else if (qh->hw->hw_token & cpu_to_hc32(ehci, QTD_STS_HALT))
1377 if (qh_current != ehci->old_current ||
1378 qh_token != ehci->old_token) {
1379 ehci->old_current = qh_current;
1380 ehci->old_token = qh_token;
1381 ehci_enable_event(ehci,
1389 ehci->old_current = ~0; /* Prepare for next QH */
1392 if (!list_empty(&ehci->async_unlink))
1393 start_iaa_cycle(ehci);
1403 ehci->async_unlinking = true;
1404 while (!list_empty(&ehci->async_idle)) {
1405 qh = list_first_entry(&ehci->async_idle, struct ehci_qh,
1413 qh_completions(ehci, qh);
1415 ehci->rh_state == EHCI_RH_RUNNING)
1416 qh_link_async(ehci, qh);
1417 disable_async(ehci);
1419 ehci->async_unlinking = false;
1422 static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
1424 static void unlink_empty_async(struct ehci_hcd *ehci)
1431 for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) {
1435 if (qh->unlink_cycle != ehci->async_unlink_cycle)
1441 if (list_empty(&ehci->async_unlink) && qh_to_unlink) {
1443 start_unlink_async(ehci, qh_to_unlink);
1449 ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);
1450 ++ehci->async_unlink_cycle;
1457 static void unlink_empty_async_suspended(struct ehci_hcd *ehci)
1461 while (ehci->async->qh_next.qh) {
1462 qh = ehci->async->qh_next.qh;
1464 single_unlink_async(ehci, qh);
1471 /* caller must own ehci->lock */
1473 static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
1479 single_unlink_async(ehci, qh);
1480 start_iaa_cycle(ehci);
1485 static void scan_async (struct ehci_hcd *ehci)
1490 ehci->qh_scan_next = ehci->async->qh_next.qh;
1491 while (ehci->qh_scan_next) {
1492 qh = ehci->qh_scan_next;
1493 ehci->qh_scan_next = qh->qh_next.qh;
1501 * drops the lock. That's why ehci->qh_scan_next
1503 * gets unlinked then ehci->qh_scan_next is adjusted
1506 temp = qh_completions(ehci, qh);
1508 start_unlink_async(ehci, qh);
1511 qh->unlink_cycle = ehci->async_unlink_cycle;
1523 if (check_unlinks_later && ehci->rh_state == EHCI_RH_RUNNING &&
1524 !(ehci->enabled_hrtimer_events &
1526 ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);
1527 ++ehci->async_unlink_cycle;