Lines Matching defs:ehci
6 /* this file is part of ehci-hcd.c */
20 * an ongoing challenge. That's in "ehci-sched.c".
37 qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf,
45 qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr);
46 qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32));
57 qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr);
58 qtd->hw_buf_hi[i] = cpu_to_hc32(ehci,
71 qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token);
80 qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
87 hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
88 hw->hw_alt_next = EHCI_LIST_END(ehci);
95 if (!(hw->hw_info1 & cpu_to_hc32(ehci, QH_TOGGLE_CTL))) {
99 epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f;
101 hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
106 hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
114 qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
127 if (qh->hw->hw_token & ACTIVE_BIT(ehci)) {
130 ehci_warn(ehci, "qh %p should be inactive!\n", qh);
132 qh_update(ehci, qh, qtd);
139 static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
144 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
148 spin_lock_irqsave(&ehci->lock, flags);
151 && ehci->rh_state == EHCI_RH_RUNNING)
152 qh_link_async(ehci, qh);
153 spin_unlock_irqrestore(&ehci->lock, flags);
156 static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh,
174 if (!ehci_is_TDI(ehci)
176 ehci_to_hcd(ehci)->self.root_hub) {
189 struct ehci_hcd *ehci,
238 ehci_dbg(ehci, "devpath %s ep%d%s 3strikes\n",
252 ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
256 ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
260 INCR(ehci->stats.unlink);
265 INCR(ehci->stats.complete);
269 ehci_dbg (ehci,
278 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
279 usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status);
282 static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
290 qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
334 ehci_urb_done(ehci, last->urb, last_status);
337 ehci_qtd_free (ehci, last);
347 token = hc32_to_cpu(ehci, qtd->hw_token);
355 ehci_dbg(ehci,
376 ehci_dbg(ehci,
388 qtd->hw_token = cpu_to_hc32(ehci,
391 hw->hw_token = cpu_to_hc32(ehci,
409 & EHCI_LIST_END(ehci))) {
416 && ehci->rh_state >= EHCI_RH_RUNNING)) {
424 if (ehci->rh_state < EHCI_RH_RUNNING) {
445 (hw->hw_token & ACTIVE_BIT(ehci))) {
446 token = hc32_to_cpu(ehci, hw->hw_token);
447 hw->hw_token &= ~ACTIVE_BIT(ehci);
454 ehci_clear_tt_buffer(ehci, qh, urb, token);
466 last_status = qtd_copy_status(ehci, urb,
470 & EHCI_LIST_END(ehci)))
488 ehci_clear_tt_buffer(ehci, qh, urb,
512 ehci_urb_done(ehci, last->urb, last_status);
513 ehci_qtd_free (ehci, last);
543 if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci))
557 struct ehci_hcd *ehci,
568 ehci_qtd_free (ehci, qtd);
577 struct ehci_hcd *ehci,
593 qtd = ehci_qtd_alloc (ehci, flags);
607 qtd_fill(ehci, qtd, urb->setup_dma,
614 qtd = ehci_qtd_alloc (ehci, flags);
618 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
658 this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token,
670 qtd->hw_alt_next = ehci->async->hw->hw_alt_next;
685 qtd = ehci_qtd_alloc (ehci, flags);
689 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
700 qtd->hw_alt_next = EHCI_LIST_END(ehci);
721 qtd = ehci_qtd_alloc (ehci, flags);
725 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
729 qtd_fill(ehci, qtd, 0, 0, token, 0);
735 qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
739 qtd_list_free (ehci, urb, head);
762 struct ehci_hcd *ehci,
766 struct ehci_qh *qh = ehci_qh_alloc (ehci, flags);
794 ehci_dbg(ehci, "bogus qh maxpacket %d\n", maxp);
823 } else if (urb->interval > ehci->periodic_size << 3) {
824 urb->interval = ehci->periodic_size << 3;
855 if (urb->interval > ehci->periodic_size)
856 urb->interval = ehci->periodic_size;
895 if (ehci_has_fsl_portno_bug(ehci))
903 if (tt && tt->hub != ehci_to_hcd(ehci)->self.root_hub)
933 ehci_dbg(ehci, "bogus dev %p speed %d\n", urb->dev,
936 qh_destroy(ehci, qh);
945 hw->hw_info1 = cpu_to_hc32(ehci, info1);
946 hw->hw_info2 = cpu_to_hc32(ehci, info2);
954 static void enable_async(struct ehci_hcd *ehci)
956 if (ehci->async_count++)
960 ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_ASYNC);
963 ehci_poll_ASS(ehci);
964 turn_on_io_watchdog(ehci);
967 static void disable_async(struct ehci_hcd *ehci)
969 if (--ehci->async_count)
973 WARN_ON(ehci->async->qh_next.qh || !list_empty(&ehci->async_unlink) ||
974 !list_empty(&ehci->async_idle));
977 ehci_poll_ASS(ehci);
982 static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
984 __hc32 dma = QH_NEXT(ehci, qh->qh_dma);
994 qh_refresh(ehci, qh);
997 head = ehci->async;
1010 enable_async(ehci);
1022 struct ehci_hcd *ehci,
1030 __hc32 qh_addr_mask = cpu_to_hc32(ehci, 0x7f);
1034 /* can't sleep here, we have ehci->lock... */
1035 qh = qh_make (ehci, urb, GFP_ATOMIC);
1069 qtd->hw_token = HALT_BIT(ehci);
1081 ehci_qtd_init(ehci, qtd, qtd->qtd_dma);
1088 qtd->hw_next = QTD_NEXT(ehci, dma);
1104 struct ehci_hcd *ehci,
1120 ehci_dbg(ehci,
1129 spin_lock_irqsave (&ehci->lock, flags);
1130 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
1134 rc = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
1138 qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
1140 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
1149 qh_link_async(ehci, qh);
1151 spin_unlock_irqrestore (&ehci->lock, flags);
1153 qtd_list_free (ehci, urb, qtd_list);
1174 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
1187 qtd = ehci_qtd_alloc(ehci, GFP_KERNEL);
1204 qtd_fill(ehci, qtd, urb->setup_dma,
1208 submit_async(ehci, urb, &qtd_list, GFP_ATOMIC);
1223 qtd_fill(ehci, qtd, buf, len, token, maxpacket);
1229 qtd->hw_alt_next = EHCI_LIST_END(ehci);
1236 qtd = ehci_qtd_alloc(ehci, GFP_ATOMIC);
1240 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
1244 qtd_fill(ehci, qtd, 0, 0, token | QTD_IOC, 0);
1246 submit_async(ehci, urb, &qtd_list, GFP_KERNEL);
1251 qtd_list_free(ehci, urb, head);
1258 static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
1264 list_add_tail(&qh->unlink_node, &ehci->async_unlink);
1267 prev = ehci->async;
1273 if (ehci->qh_scan_next == qh)
1274 ehci->qh_scan_next = qh->qh_next.qh;
1277 static void start_iaa_cycle(struct ehci_hcd *ehci)
1280 if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) {
1281 end_unlink_async(ehci);
1284 } else if (ehci->rh_state == EHCI_RH_RUNNING &&
1285 !ehci->iaa_in_progress) {
1290 ehci_writel(ehci, ehci->command | CMD_IAAD,
1291 &ehci->regs->command);
1292 ehci_readl(ehci, &ehci->regs->command);
1293 ehci->iaa_in_progress = true;
1294 ehci_enable_event(ehci, EHCI_HRTIMER_IAA_WATCHDOG, true);
1298 static void end_iaa_cycle(struct ehci_hcd *ehci)
1300 if (ehci->has_synopsys_hc_bug)
1301 ehci_writel(ehci, (u32) ehci->async->qh_dma,
1302 &ehci->regs->async_next);
1305 ehci->iaa_in_progress = false;
1307 end_unlink_async(ehci);
1312 static void end_unlink_async(struct ehci_hcd *ehci)
1317 if (list_empty(&ehci->async_unlink))
1319 qh = list_first_entry(&ehci->async_unlink, struct ehci_qh,
1326 early_exit = ehci->async_unlinking;
1329 if (ehci->rh_state < EHCI_RH_RUNNING)
1330 list_splice_tail_init(&ehci->async_unlink, &ehci->async_idle);
1342 list_move_tail(&qh->unlink_node, &ehci->async_idle);
1369 else if (qh->hw->hw_token & cpu_to_hc32(ehci, QTD_STS_HALT))
1378 if (qh_current != ehci->old_current ||
1379 qh_token != ehci->old_token) {
1380 ehci->old_current = qh_current;
1381 ehci->old_token = qh_token;
1382 ehci_enable_event(ehci,
1390 ehci->old_current = ~0; /* Prepare for next QH */
1393 if (!list_empty(&ehci->async_unlink))
1394 start_iaa_cycle(ehci);
1404 ehci->async_unlinking = true;
1405 while (!list_empty(&ehci->async_idle)) {
1406 qh = list_first_entry(&ehci->async_idle, struct ehci_qh,
1414 qh_completions(ehci, qh);
1416 ehci->rh_state == EHCI_RH_RUNNING)
1417 qh_link_async(ehci, qh);
1418 disable_async(ehci);
1420 ehci->async_unlinking = false;
1423 static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
1425 static void unlink_empty_async(struct ehci_hcd *ehci)
1432 for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) {
1436 if (qh->unlink_cycle != ehci->async_unlink_cycle)
1442 if (list_empty(&ehci->async_unlink) && qh_to_unlink) {
1444 start_unlink_async(ehci, qh_to_unlink);
1450 ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);
1451 ++ehci->async_unlink_cycle;
1458 static void unlink_empty_async_suspended(struct ehci_hcd *ehci)
1462 while (ehci->async->qh_next.qh) {
1463 qh = ehci->async->qh_next.qh;
1465 single_unlink_async(ehci, qh);
1472 /* caller must own ehci->lock */
1474 static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
1480 single_unlink_async(ehci, qh);
1481 start_iaa_cycle(ehci);
1486 static void scan_async (struct ehci_hcd *ehci)
1491 ehci->qh_scan_next = ehci->async->qh_next.qh;
1492 while (ehci->qh_scan_next) {
1493 qh = ehci->qh_scan_next;
1494 ehci->qh_scan_next = qh->qh_next.qh;
1502 * drops the lock. That's why ehci->qh_scan_next
1504 * gets unlinked then ehci->qh_scan_next is adjusted
1507 temp = qh_completions(ehci, qh);
1509 start_unlink_async(ehci, qh);
1512 qh->unlink_cycle = ehci->async_unlink_cycle;
1524 if (check_unlinks_later && ehci->rh_state == EHCI_RH_RUNNING &&
1525 !(ehci->enabled_hrtimer_events &
1527 ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);
1528 ++ehci->async_unlink_cycle;