Lines Matching refs:qh
28 * @periodic: host pointer to qh/itd/sitd
37 return &periodic->qh->qh_next;
53 /* our ehci_shadow.qh is actually software part */
55 return &periodic->qh->hw->hw_next;
207 struct ehci_qh *qh, int sign)
211 int usecs = qh->ps.usecs;
212 int c_usecs = qh->ps.c_usecs;
213 int tt_usecs = qh->ps.tt_usecs;
216 if (qh->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */
218 start_uf = qh->ps.bw_phase << 3;
220 bandwidth_dbg(ehci, sign, "intr", &qh->ps);
229 for (i = start_uf + qh->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
230 i += qh->ps.bw_uperiod)
234 if (qh->ps.c_usecs) {
237 i += qh->ps.bw_uperiod) {
239 if (qh->ps.cs_mask & m)
253 tt = find_tt(qh->ps.udev);
255 list_add_tail(&qh->ps.ps_list, &tt->ps_list);
257 list_del(&qh->ps.ps_list);
260 i += qh->ps.bw_period)
442 hw = here.qh->hw;
443 if (same_tt(dev, here.qh->ps.udev)) {
454 here = here.qh->qh_next;
518 * this just links in a qh; caller guarantees uframe masks are set right.
521 static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
524 unsigned period = qh->ps.period;
526 dev_dbg(&qh->ps.udev->dev,
527 "link qh%d-%04x/%p start %d [%d/%d us]\n",
528 period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
530 qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
536 for (i = qh->ps.phase; i < ehci->periodic_size; i += period) {
555 while (here.ptr && qh != here.qh) {
556 if (qh->ps.period > here.qh->ps.period)
558 prev = &here.qh->qh_next;
559 hw_p = &here.qh->hw->hw_next;
562 /* link in this qh, unless some earlier pass did that */
563 if (qh != here.qh) {
564 qh->qh_next = here;
565 if (here.qh)
566 qh->hw->hw_next = *hw_p;
568 prev->qh = qh;
569 *hw_p = QH_NEXT(ehci, qh->qh_dma);
572 qh->qh_state = QH_STATE_LINKED;
573 qh->xacterrs = 0;
574 qh->unlink_reason = 0;
576 /* update per-qh bandwidth for debugfs */
577 ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->ps.bw_period
578 ? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
579 : (qh->ps.usecs * 8);
581 list_add(&qh->intr_node, &ehci->intr_qh_list);
588 static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
594 * If qh is for a low/full-speed device, simply unlinking it
609 period = qh->ps.period ? : 1;
611 for (i = qh->ps.phase; i < ehci->periodic_size; i += period)
612 periodic_unlink(ehci, i, qh);
614 /* update per-qh bandwidth for debugfs */
615 ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->ps.bw_period
616 ? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
617 : (qh->ps.usecs * 8);
619 dev_dbg(&qh->ps.udev->dev,
620 "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
621 qh->ps.period,
622 hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
623 qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
625 /* qh->qh_next still "live" to HC */
626 qh->qh_state = QH_STATE_UNLINK;
627 qh->qh_next.ptr = NULL;
629 if (ehci->qh_scan_next == qh)
630 ehci->qh_scan_next = list_entry(qh->intr_node.next,
632 list_del(&qh->intr_node);
635 static void cancel_unlink_wait_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
637 if (qh->qh_state != QH_STATE_LINKED ||
638 list_empty(&qh->unlink_node))
641 list_del_init(&qh->unlink_node);
649 static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
652 if (qh->qh_state != QH_STATE_LINKED)
655 /* if the qh is waiting for unlink, cancel it now */
656 cancel_unlink_wait_intr(ehci, qh);
658 qh_unlink_periodic(ehci, qh);
668 qh->unlink_cycle = ehci->intr_unlink_cycle;
671 list_add_tail(&qh->unlink_node, &ehci->intr_unlink);
677 else if (ehci->intr_unlink.next == &qh->unlink_node) {
684 * It is common only one intr URB is scheduled on one qh, and
686 * delay to avoid unlink qh too early.
689 struct ehci_qh *qh)
691 qh->unlink_cycle = ehci->intr_unlink_wait_cycle;
694 list_add_tail(&qh->unlink_node, &ehci->intr_unlink_wait);
698 else if (ehci->intr_unlink_wait.next == &qh->unlink_node) {
704 static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
706 struct ehci_qh_hw *hw = qh->hw;
709 qh->qh_state = QH_STATE_IDLE;
712 if (!list_empty(&qh->qtd_list))
713 qh_completions(ehci, qh);
716 if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
717 rc = qh_schedule(ehci, qh);
719 qh_refresh(ehci, qh);
720 qh_link_periodic(ehci, qh);
730 ehci_err(ehci, "can't reschedule qh %p, err %d\n",
731 qh, rc);
772 struct ehci_qh *qh,
780 if (qh->ps.c_usecs && uframe >= 6) /* FSTN territory? */
783 if (!check_period(ehci, frame, uframe, qh->ps.bw_uperiod, qh->ps.usecs))
785 if (!qh->ps.c_usecs) {
792 if (tt_available(ehci, &qh->ps, tt, frame, uframe)) {
798 qh->ps.bw_uperiod, qh->ps.c_usecs))
815 mask = 0x03 << (uframe + qh->gap_uf);
819 if (tt_no_collision(ehci, qh->ps.bw_period, qh->ps.udev, frame, mask)) {
820 if (!check_period(ehci, frame, uframe + qh->gap_uf + 1,
821 qh->ps.bw_uperiod, qh->ps.c_usecs))
823 if (!check_period(ehci, frame, uframe + qh->gap_uf,
824 qh->ps.bw_uperiod, qh->ps.c_usecs))
836 static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
841 struct ehci_qh_hw *hw = qh->hw;
847 if (qh->ps.phase != NO_FRAME) {
848 ehci_dbg(ehci, "reused qh %p schedule\n", qh);
854 tt = find_tt(qh->ps.udev);
865 if (qh->ps.bw_period) {
869 for (i = qh->ps.bw_period; i > 0; --i) {
870 frame = ++ehci->random_frame & (qh->ps.bw_period - 1);
873 frame, uframe, qh, &c_mask, tt);
879 /* qh->ps.bw_period == 0 means every uframe */
881 status = check_intr_schedule(ehci, 0, 0, qh, &c_mask, tt);
887 qh->ps.phase = (qh->ps.period ? ehci->random_frame &
888 (qh->ps.period - 1) : 0);
889 qh->ps.bw_phase = qh->ps.phase & (qh->ps.bw_period - 1);
890 qh->ps.phase_uf = uframe;
891 qh->ps.cs_mask = qh->ps.period ?
897 hw->hw_info2 |= cpu_to_hc32(ehci, qh->ps.cs_mask);
898 reserve_release_intr_bandwidth(ehci, qh, 1);
912 struct ehci_qh *qh;
929 /* get qh and force any scheduling errors */
931 qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv);
932 if (qh == NULL) {
936 if (qh->qh_state == QH_STATE_IDLE) {
937 status = qh_schedule(ehci, qh);
942 /* then queue the urb's tds to the qh */
943 qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
944 BUG_ON(qh == NULL);
947 if (qh->qh_state == QH_STATE_IDLE) {
948 qh_refresh(ehci, qh);
949 qh_link_periodic(ehci, qh);
951 /* cancel unlink wait for the qh */
952 cancel_unlink_wait_intr(ehci, qh);
971 struct ehci_qh *qh;
973 list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list,
976 /* clean any finished work for this qh */
977 if (!list_empty(&qh->qtd_list)) {
983 * always holds the next qh to scan; if the next qh
987 temp = qh_completions(ehci, qh);
989 start_unlink_intr(ehci, qh);
990 else if (unlikely(list_empty(&qh->qtd_list) &&
991 qh->qh_state == QH_STATE_LINKED))
992 start_unlink_intr_wait(ehci, qh);