Lines Matching refs:qh

28  * @periodic: host pointer to qh/itd/sitd
37 return &periodic->qh->qh_next;
53 /* our ehci_shadow.qh is actually software part */
55 return &periodic->qh->hw->hw_next;
207 struct ehci_qh *qh, int sign)
211 int usecs = qh->ps.usecs;
212 int c_usecs = qh->ps.c_usecs;
213 int tt_usecs = qh->ps.tt_usecs;
216 if (qh->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */
218 start_uf = qh->ps.bw_phase << 3;
220 bandwidth_dbg(ehci, sign, "intr", &qh->ps);
229 for (i = start_uf + qh->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
230 i += qh->ps.bw_uperiod)
234 if (qh->ps.c_usecs) {
237 i += qh->ps.bw_uperiod) {
239 if (qh->ps.cs_mask & m)
247 tt = find_tt(qh->ps.udev);
249 list_add_tail(&qh->ps.ps_list, &tt->ps_list);
251 list_del(&qh->ps.ps_list);
254 i += qh->ps.bw_period)
436 hw = here.qh->hw;
437 if (same_tt(dev, here.qh->ps.udev)) {
448 here = here.qh->qh_next;
511 * this just links in a qh; caller guarantees uframe masks are set right.
514 static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
517 unsigned period = qh->ps.period;
519 dev_dbg(&qh->ps.udev->dev,
520 "link qh%d-%04x/%p start %d [%d/%d us]\n",
521 period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
523 qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
529 for (i = qh->ps.phase; i < ehci->periodic_size; i += period) {
548 while (here.ptr && qh != here.qh) {
549 if (qh->ps.period > here.qh->ps.period)
551 prev = &here.qh->qh_next;
552 hw_p = &here.qh->hw->hw_next;
555 /* link in this qh, unless some earlier pass did that */
556 if (qh != here.qh) {
557 qh->qh_next = here;
558 if (here.qh)
559 qh->hw->hw_next = *hw_p;
561 prev->qh = qh;
562 *hw_p = QH_NEXT(ehci, qh->qh_dma);
565 qh->qh_state = QH_STATE_LINKED;
566 qh->xacterrs = 0;
567 qh->unlink_reason = 0;
569 /* update per-qh bandwidth for debugfs */
570 ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->ps.bw_period
571 ? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
572 : (qh->ps.usecs * 8);
574 list_add(&qh->intr_node, &ehci->intr_qh_list);
581 static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
587 * If qh is for a low/full-speed device, simply unlinking it
602 period = qh->ps.period ? : 1;
604 for (i = qh->ps.phase; i < ehci->periodic_size; i += period)
605 periodic_unlink(ehci, i, qh);
607 /* update per-qh bandwidth for debugfs */
608 ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->ps.bw_period
609 ? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
610 : (qh->ps.usecs * 8);
612 dev_dbg(&qh->ps.udev->dev,
613 "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
614 qh->ps.period,
615 hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
616 qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
618 /* qh->qh_next still "live" to HC */
619 qh->qh_state = QH_STATE_UNLINK;
620 qh->qh_next.ptr = NULL;
622 if (ehci->qh_scan_next == qh)
623 ehci->qh_scan_next = list_entry(qh->intr_node.next,
625 list_del(&qh->intr_node);
628 static void cancel_unlink_wait_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
630 if (qh->qh_state != QH_STATE_LINKED ||
631 list_empty(&qh->unlink_node))
634 list_del_init(&qh->unlink_node);
642 static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
645 if (qh->qh_state != QH_STATE_LINKED)
648 /* if the qh is waiting for unlink, cancel it now */
649 cancel_unlink_wait_intr(ehci, qh);
651 qh_unlink_periodic(ehci, qh);
661 qh->unlink_cycle = ehci->intr_unlink_cycle;
664 list_add_tail(&qh->unlink_node, &ehci->intr_unlink);
670 else if (ehci->intr_unlink.next == &qh->unlink_node) {
677 * It is common only one intr URB is scheduled on one qh, and
679 * delay to avoid unlink qh too early.
682 struct ehci_qh *qh)
684 qh->unlink_cycle = ehci->intr_unlink_wait_cycle;
687 list_add_tail(&qh->unlink_node, &ehci->intr_unlink_wait);
691 else if (ehci->intr_unlink_wait.next == &qh->unlink_node) {
697 static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
699 struct ehci_qh_hw *hw = qh->hw;
702 qh->qh_state = QH_STATE_IDLE;
705 if (!list_empty(&qh->qtd_list))
706 qh_completions(ehci, qh);
709 if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
710 rc = qh_schedule(ehci, qh);
712 qh_refresh(ehci, qh);
713 qh_link_periodic(ehci, qh);
723 ehci_err(ehci, "can't reschedule qh %p, err %d\n",
724 qh, rc);
765 struct ehci_qh *qh,
773 if (qh->ps.c_usecs && uframe >= 6) /* FSTN territory? */
776 if (!check_period(ehci, frame, uframe, qh->ps.bw_uperiod, qh->ps.usecs))
778 if (!qh->ps.c_usecs) {
785 if (tt_available(ehci, &qh->ps, tt, frame, uframe)) {
791 qh->ps.bw_uperiod, qh->ps.c_usecs))
808 mask = 0x03 << (uframe + qh->gap_uf);
812 if (tt_no_collision(ehci, qh->ps.bw_period, qh->ps.udev, frame, mask)) {
813 if (!check_period(ehci, frame, uframe + qh->gap_uf + 1,
814 qh->ps.bw_uperiod, qh->ps.c_usecs))
816 if (!check_period(ehci, frame, uframe + qh->gap_uf,
817 qh->ps.bw_uperiod, qh->ps.c_usecs))
829 static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
834 struct ehci_qh_hw *hw = qh->hw;
840 if (qh->ps.phase != NO_FRAME) {
841 ehci_dbg(ehci, "reused qh %p schedule\n", qh);
847 tt = find_tt(qh->ps.udev);
858 if (qh->ps.bw_period) {
862 for (i = qh->ps.bw_period; i > 0; --i) {
863 frame = ++ehci->random_frame & (qh->ps.bw_period - 1);
866 frame, uframe, qh, &c_mask, tt);
872 /* qh->ps.bw_period == 0 means every uframe */
874 status = check_intr_schedule(ehci, 0, 0, qh, &c_mask, tt);
880 qh->ps.phase = (qh->ps.period ? ehci->random_frame &
881 (qh->ps.period - 1) : 0);
882 qh->ps.bw_phase = qh->ps.phase & (qh->ps.bw_period - 1);
883 qh->ps.phase_uf = uframe;
884 qh->ps.cs_mask = qh->ps.period ?
890 hw->hw_info2 |= cpu_to_hc32(ehci, qh->ps.cs_mask);
891 reserve_release_intr_bandwidth(ehci, qh, 1);
905 struct ehci_qh *qh;
922 /* get qh and force any scheduling errors */
924 qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv);
925 if (qh == NULL) {
929 if (qh->qh_state == QH_STATE_IDLE) {
930 status = qh_schedule(ehci, qh);
935 /* then queue the urb's tds to the qh */
936 qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
937 BUG_ON(qh == NULL);
940 if (qh->qh_state == QH_STATE_IDLE) {
941 qh_refresh(ehci, qh);
942 qh_link_periodic(ehci, qh);
944 /* cancel unlink wait for the qh */
945 cancel_unlink_wait_intr(ehci, qh);
964 struct ehci_qh *qh;
966 list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list,
969 /* clean any finished work for this qh */
970 if (!list_empty(&qh->qtd_list)) {
976 * always holds the next qh to scan; if the next qh
980 temp = qh_completions(ehci, qh);
982 start_unlink_intr(ehci, qh);
983 else if (unlikely(list_empty(&qh->qtd_list) &&
984 qh->qh_state == QH_STATE_LINKED))
985 start_unlink_intr_wait(ehci, qh);