Lines Matching refs:ehci
7 /* this file is part of ehci-hcd.c */
32 periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic,
35 switch (hc32_to_cpu(ehci, tag)) {
49 shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic,
52 switch (hc32_to_cpu(ehci, tag)) {
62 /* caller must hold ehci->lock */
63 static void periodic_unlink(struct ehci_hcd *ehci, unsigned frame, void *ptr)
65 union ehci_shadow *prev_p = &ehci->pshadow[frame];
66 __hc32 *hw_p = &ehci->periodic[frame];
71 prev_p = periodic_next_shadow(ehci, prev_p,
72 Q_NEXT_TYPE(ehci, *hw_p));
73 hw_p = shadow_next_periodic(ehci, &here,
74 Q_NEXT_TYPE(ehci, *hw_p));
84 *prev_p = *periodic_next_shadow(ehci, &here,
85 Q_NEXT_TYPE(ehci, *hw_p));
87 if (!ehci->use_dummy_qh ||
88 *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p))
89 != EHCI_LIST_END(ehci))
90 *hw_p = *shadow_next_periodic(ehci, &here,
91 Q_NEXT_TYPE(ehci, *hw_p));
93 *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
137 struct ehci_hcd *ehci =
148 list_add_tail(&tt->tt_list, &ehci->tt_list);
194 static void bandwidth_dbg(struct ehci_hcd *ehci, int sign, char *type,
206 static void reserve_release_intr_bandwidth(struct ehci_hcd *ehci,
220 bandwidth_dbg(ehci, sign, "intr", &qh->ps);
231 ehci->bandwidth[i] += usecs;
240 ehci->bandwidth[i+j] += c_usecs;
348 struct ehci_hcd *ehci,
371 tt_usecs[i] = ehci->tt_budget[uf];
408 struct ehci_hcd *ehci,
422 for (; frame < ehci->periodic_size; frame += period) {
427 here = ehci->pshadow[frame];
428 type = Q_NEXT_TYPE(ehci, ehci->periodic[frame]);
430 switch (hc32_to_cpu(ehci, type)) {
432 type = Q_NEXT_TYPE(ehci, here.itd->hw_next);
440 mask = hc32_to_cpu(ehci,
447 type = Q_NEXT_TYPE(ehci, hw->hw_next);
454 mask = hc32_to_cpu(ehci, here.sitd
461 type = Q_NEXT_TYPE(ehci, here.sitd->hw_next);
466 ehci_dbg(ehci,
484 static void enable_periodic(struct ehci_hcd *ehci)
486 if (ehci->periodic_count++)
490 ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC);
493 ehci_poll_PSS(ehci);
494 turn_on_io_watchdog(ehci);
497 static void disable_periodic(struct ehci_hcd *ehci)
499 if (--ehci->periodic_count)
503 ehci_poll_PSS(ehci);
512 * no FSTN support (yet; ehci 0.96+)
514 static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
521 period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
529 for (i = qh->ps.phase; i < ehci->periodic_size; i += period) {
530 union ehci_shadow *prev = &ehci->pshadow[i];
531 __hc32 *hw_p = &ehci->periodic[i];
537 type = Q_NEXT_TYPE(ehci, *hw_p);
538 if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
540 prev = periodic_next_shadow(ehci, prev, type);
541 hw_p = shadow_next_periodic(ehci, &here, type);
562 *hw_p = QH_NEXT(ehci, qh->qh_dma);
570 ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->ps.bw_period
574 list_add(&qh->intr_node, &ehci->intr_qh_list);
577 ++ehci->intr_count;
578 enable_periodic(ehci);
581 static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
604 for (i = qh->ps.phase; i < ehci->periodic_size; i += period)
605 periodic_unlink(ehci, i, qh);
608 ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->ps.bw_period
615 hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
622 if (ehci->qh_scan_next == qh)
623 ehci->qh_scan_next = list_entry(qh->intr_node.next,
628 static void cancel_unlink_wait_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
642 static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
649 cancel_unlink_wait_intr(ehci, qh);
651 qh_unlink_periodic(ehci, qh);
661 qh->unlink_cycle = ehci->intr_unlink_cycle;
664 list_add_tail(&qh->unlink_node, &ehci->intr_unlink);
666 if (ehci->intr_unlinking)
668 else if (ehci->rh_state < EHCI_RH_RUNNING)
669 ehci_handle_intr_unlinks(ehci);
670 else if (ehci->intr_unlink.next == &qh->unlink_node) {
671 ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
672 ++ehci->intr_unlink_cycle;
681 static void start_unlink_intr_wait(struct ehci_hcd *ehci,
684 qh->unlink_cycle = ehci->intr_unlink_wait_cycle;
687 list_add_tail(&qh->unlink_node, &ehci->intr_unlink_wait);
689 if (ehci->rh_state < EHCI_RH_RUNNING)
690 ehci_handle_start_intr_unlinks(ehci);
691 else if (ehci->intr_unlink_wait.next == &qh->unlink_node) {
692 ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true);
693 ++ehci->intr_unlink_wait_cycle;
697 static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
703 hw->hw_next = EHCI_LIST_END(ehci);
706 qh_completions(ehci, qh);
709 if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
710 rc = qh_schedule(ehci, qh);
712 qh_refresh(ehci, qh);
713 qh_link_periodic(ehci, qh);
723 ehci_err(ehci, "can't reschedule qh %p, err %d\n",
729 --ehci->intr_count;
730 disable_periodic(ehci);
736 struct ehci_hcd *ehci,
749 usecs = ehci->uframe_periodic_max - usecs;
753 if (ehci->bandwidth[uframe] > usecs)
762 struct ehci_hcd *ehci,
776 if (!check_period(ehci, frame, uframe, qh->ps.bw_uperiod, qh->ps.usecs))
785 if (tt_available(ehci, &qh->ps, tt, frame, uframe)) {
790 if (!check_period(ehci, frame, i,
812 if (tt_no_collision(ehci, qh->ps.bw_period, qh->ps.udev, frame, mask)) {
813 if (!check_period(ehci, frame, uframe + qh->gap_uf + 1,
816 if (!check_period(ehci, frame, uframe + qh->gap_uf,
829 static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
837 hw->hw_next = EHCI_LIST_END(ehci);
841 ehci_dbg(ehci, "reused qh %p schedule\n", qh);
852 compute_tt_budget(ehci->tt_budget, tt);
863 frame = ++ehci->random_frame & (qh->ps.bw_period - 1);
865 status = check_intr_schedule(ehci,
874 status = check_intr_schedule(ehci, 0, 0, qh, &c_mask, tt);
880 qh->ps.phase = (qh->ps.period ? ehci->random_frame &
889 hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
890 hw->hw_info2 |= cpu_to_hc32(ehci, qh->ps.cs_mask);
891 reserve_release_intr_bandwidth(ehci, qh, 1);
898 struct ehci_hcd *ehci,
912 spin_lock_irqsave(&ehci->lock, flags);
914 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
918 status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
924 qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv);
930 status = qh_schedule(ehci, qh);
936 qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
941 qh_refresh(ehci, qh);
942 qh_link_periodic(ehci, qh);
945 cancel_unlink_wait_intr(ehci, qh);
949 ehci_to_hcd(ehci)->self.bandwidth_int_reqs++;
953 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
955 spin_unlock_irqrestore(&ehci->lock, flags);
957 qtd_list_free(ehci, urb, qtd_list);
962 static void scan_intr(struct ehci_hcd *ehci)
966 list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list,
975 * drops the lock. That's why ehci->qh_scan_next
977 * gets unlinked then ehci->qh_scan_next is adjusted
980 temp = qh_completions(ehci, qh);
982 start_unlink_intr(ehci, qh);
985 start_unlink_intr_wait(ehci, qh);
1011 struct ehci_hcd *ehci,
1042 stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum);
1043 stream->buf1 = cpu_to_hc32(ehci, buf1);
1044 stream->buf2 = cpu_to_hc32(ehci, multi);
1069 if (!ehci_is_TDI(ehci)
1071 ehci_to_hcd(ehci)->self.root_hub))
1108 stream->address = cpu_to_hc32(ehci, addr);
1119 iso_stream_find(struct ehci_hcd *ehci, struct urb *urb)
1132 spin_lock_irqsave(&ehci->lock, flags);
1139 iso_stream_init(ehci, stream, urb);
1144 ehci_dbg(ehci, "dev %s ep%d%s, not iso??\n",
1150 spin_unlock_irqrestore(&ehci->lock, flags);
1174 struct ehci_hcd *ehci,
1204 uframe->transaction = cpu_to_hc32(ehci, trans);
1222 /* caller must hold ehci->lock! */
1230 struct ehci_hcd *ehci,
1246 itd_sched_init(ehci, sched, stream, urb);
1254 spin_lock_irqsave(&ehci->lock, flags);
1264 if (itd->frame == ehci->now_frame)
1270 spin_unlock_irqrestore(&ehci->lock, flags);
1271 itd = dma_pool_alloc(ehci->itd_pool, mem_flags,
1273 spin_lock_irqsave(&ehci->lock, flags);
1276 spin_unlock_irqrestore(&ehci->lock, flags);
1286 spin_unlock_irqrestore(&ehci->lock, flags);
1296 static void reserve_release_iso_bandwidth(struct ehci_hcd *ehci,
1311 bandwidth_dbg(ehci, sign, "iso", &stream->ps);
1322 ehci->bandwidth[i] += usecs;
1334 ehci->bandwidth[i+j] += usecs;
1336 ehci->bandwidth[i+j] += c_usecs;
1354 struct ehci_hcd *ehci,
1362 usecs = ehci->uframe_periodic_max - stream->ps.usecs;
1366 if (ehci->bandwidth[uframe] > usecs)
1374 struct ehci_hcd *ehci,
1403 if (!tt_available(ehci, &stream->ps, tt, frame, uf))
1409 if (!tt_no_collision(ehci, stream->ps.bw_period,
1420 max_used = ehci->uframe_periodic_max - stream->ps.usecs;
1422 if (ehci->bandwidth[uf] > max_used)
1428 max_used = ehci->uframe_periodic_max -
1435 if (ehci->bandwidth[uf+i] > max_used)
1444 stream->splits = cpu_to_hc32(ehci, stream->ps.cs_mask);
1453 * Also avoid queue depths of less than ehci's worst irq latency (affected
1461 struct ehci_hcd *ehci,
1469 unsigned mod = ehci->periodic_size << 3;
1481 ehci_to_hcd(ehci), urb->ep))) {
1492 compute_tt_budget(ehci->tt_budget, tt);
1494 start = ((-(++ehci->random_frame)) << 3) & (period - 1);
1507 if (itd_slot_ok(ehci, stream, start))
1512 if (sitd_slot_ok(ehci, stream, start,
1520 ehci_dbg(ehci, "iso sched full %p", urb);
1529 reserve_release_iso_bandwidth(ehci, stream, 1);
1541 now = ehci_read_frame_index(ehci) & (mod - 1);
1544 if (ehci->i_thresh)
1545 next = now + ehci->i_thresh; /* uframe cache */
1550 if (ehci->isoc_count == 0)
1551 ehci->last_iso_frame = now >> 3;
1554 * Use ehci->last_iso_frame as the base. There can't be any
1557 base = ehci->last_iso_frame << 3;
1574 ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n",
1603 ehci_dbg(ehci, "iso underrun %p (%u+%u < %u) [%u]\n",
1630 ehci_dbg(ehci, "request %p would overflow (%u+%u >= %u)\n",
1654 itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream,
1660 itd->hw_next = EHCI_LIST_END(ehci);
1673 struct ehci_hcd *ehci,
1689 itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12);
1690 itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0);
1691 itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32));
1698 itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0);
1699 itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32));
1704 itd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
1706 union ehci_shadow *prev = &ehci->pshadow[frame];
1707 __hc32 *hw_p = &ehci->periodic[frame];
1713 type = Q_NEXT_TYPE(ehci, *hw_p);
1714 if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
1716 prev = periodic_next_shadow(ehci, prev, type);
1717 hw_p = shadow_next_periodic(ehci, &here, type);
1726 *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
1731 struct ehci_hcd *ehci,
1745 ehci_to_hcd(ehci)->self.bandwidth_allocated
1748 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1749 if (ehci->amd_pll_fix == 1)
1753 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
1769 itd_init(ehci, stream, itd);
1775 itd_patch(ehci, itd, iso_sched, packet, uframe);
1784 itd_link(ehci, frame & (ehci->periodic_size - 1), itd);
1794 ++ehci->isoc_count;
1795 enable_periodic(ehci);
1810 static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
1827 t = hc32_to_cpup(ehci, &itd->hw_transaction[uframe]);
1868 ehci_urb_done(ehci, urb, 0);
1872 --ehci->isoc_count;
1873 disable_periodic(ehci);
1875 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
1876 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1877 if (ehci->amd_pll_fix == 1)
1882 ehci_to_hcd(ehci)->self.bandwidth_allocated
1894 &ehci->cached_itd_list);
1895 start_free_itds(ehci);
1903 static int itd_submit(struct ehci_hcd *ehci, struct urb *urb,
1911 stream = iso_stream_find(ehci, urb);
1913 ehci_dbg(ehci, "can't get iso stream\n");
1917 ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
1923 ehci_dbg(ehci,
1934 status = itd_urb_transaction(stream, ehci, urb, mem_flags);
1936 ehci_dbg(ehci, "can't init itds\n");
1941 spin_lock_irqsave(&ehci->lock, flags);
1942 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
1946 status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
1949 status = iso_stream_schedule(ehci, urb, stream);
1951 itd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
1954 ehci_urb_done(ehci, urb, 0);
1956 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
1959 spin_unlock_irqrestore(&ehci->lock, flags);
1973 struct ehci_hcd *ehci,
2002 packet->transaction = cpu_to_hc32(ehci, trans);
2023 struct ehci_hcd *ehci,
2038 sitd_sched_init(ehci, iso_sched, stream, urb);
2041 spin_lock_irqsave(&ehci->lock, flags);
2056 if (sitd->frame == ehci->now_frame)
2062 spin_unlock_irqrestore(&ehci->lock, flags);
2063 sitd = dma_pool_alloc(ehci->sitd_pool, mem_flags,
2065 spin_lock_irqsave(&ehci->lock, flags);
2068 spin_unlock_irqrestore(&ehci->lock, flags);
2083 spin_unlock_irqrestore(&ehci->lock, flags);
2091 struct ehci_hcd *ehci,
2101 sitd->hw_next = EHCI_LIST_END(ehci);
2105 sitd->hw_backpointer = EHCI_LIST_END(ehci);
2108 sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp);
2109 sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32);
2111 sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1);
2114 sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32);
2119 sitd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
2122 sitd->sitd_next = ehci->pshadow[frame];
2123 sitd->hw_next = ehci->periodic[frame];
2124 ehci->pshadow[frame].sitd = sitd;
2127 ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD);
2132 struct ehci_hcd *ehci,
2147 ehci_to_hcd(ehci)->self.bandwidth_allocated
2150 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2151 if (ehci->amd_pll_fix == 1)
2155 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
2173 sitd_patch(ehci, stream, sitd, sched, packet);
2174 sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1),
2185 ++ehci->isoc_count;
2186 enable_periodic(ehci);
2204 static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
2215 t = hc32_to_cpup(ehci, &sitd->hw_results);
2248 ehci_urb_done(ehci, urb, 0);
2252 --ehci->isoc_count;
2253 disable_periodic(ehci);
2255 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
2256 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2257 if (ehci->amd_pll_fix == 1)
2262 ehci_to_hcd(ehci)->self.bandwidth_allocated
2274 &ehci->cached_sitd_list);
2275 start_free_itds(ehci);
2282 static int sitd_submit(struct ehci_hcd *ehci, struct urb *urb,
2290 stream = iso_stream_find(ehci, urb);
2292 ehci_dbg(ehci, "can't get iso stream\n");
2296 ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
2302 ehci_dbg(ehci,
2311 status = sitd_urb_transaction(stream, ehci, urb, mem_flags);
2313 ehci_dbg(ehci, "can't init sitds\n");
2318 spin_lock_irqsave(&ehci->lock, flags);
2319 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
2323 status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
2326 status = iso_stream_schedule(ehci, urb, stream);
2328 sitd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
2331 ehci_urb_done(ehci, urb, 0);
2333 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
2336 spin_unlock_irqrestore(&ehci->lock, flags);
2343 static void scan_isoc(struct ehci_hcd *ehci)
2346 unsigned fmask = ehci->periodic_size - 1;
2356 if (ehci->rh_state >= EHCI_RH_RUNNING) {
2357 uf = ehci_read_frame_index(ehci);
2361 now_frame = (ehci->last_iso_frame - 1) & fmask;
2364 ehci->now_frame = now_frame;
2366 frame = ehci->last_iso_frame;
2370 q_p = &ehci->pshadow[frame];
2371 hw_p = &ehci->periodic[frame];
2373 type = Q_NEXT_TYPE(ehci, *hw_p);
2377 switch (hc32_to_cpu(ehci, type)) {
2389 ITD_ACTIVE(ehci))
2395 type = Q_NEXT_TYPE(ehci,
2409 if (!ehci->use_dummy_qh ||
2410 q.itd->hw_next != EHCI_LIST_END(ehci))
2413 *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
2414 type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
2416 modified = itd_complete(ehci, q.itd);
2429 && (q.sitd->hw_results & SITD_ACTIVE(ehci))) {
2433 type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
2444 if (!ehci->use_dummy_qh ||
2445 q.sitd->hw_next != EHCI_LIST_END(ehci))
2448 *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
2449 type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
2451 modified = sitd_complete(ehci, q.sitd);
2455 ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n",
2467 if (unlikely(modified && ehci->isoc_count > 0))
2476 ehci->last_iso_frame = frame;