Lines Matching refs:qh

13  * Control, bulk, and interrupt traffic all use "qh" lists.  They list "qtd"
24 * (b) special fields in qh entries or (c) split iso entries. TTs will
79 qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
81 struct ehci_qh_hw *hw = qh->hw;
84 WARN_ON(qh->qh_state != QH_STATE_IDLE);
97 is_out = qh->is_out;
99 if (unlikely(!usb_gettoggle(qh->ps.udev, epnum, is_out))) {
101 usb_settoggle(qh->ps.udev, epnum, is_out, 1);
108 /* if it weren't for a common silicon quirk (writing the dummy into the qh
109 * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
113 qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
117 qtd = list_entry(qh->qtd_list.next, struct ehci_qtd, qtd_list);
126 if (qh->hw->hw_token & ACTIVE_BIT(ehci)) {
127 qh->hw->hw_qtd_next = qtd->hw_next;
128 if (qh->should_be_inactive)
129 ehci_warn(ehci, "qh %p should be inactive!\n", qh);
131 qh_update(ehci, qh, qtd);
133 qh->should_be_inactive = 0;
138 static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
144 struct ehci_qh *qh = ep->hcpriv;
148 qh->clearing_tt = 0;
149 if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
151 qh_link_async(ehci, qh);
155 static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh,
165 if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
177 qh->clearing_tt = 1;
281 static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
284 * Process and free completed qtds for a qh, returning URBs to drivers.
285 * Chases up to qh->hw_current. Returns nonzero if the caller should
286 * unlink qh.
289 qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
291 struct ehci_qtd *last, *end = qh->dummy;
296 struct ehci_qh_hw *hw = qh->hw;
300 * they add urbs to this qh's queue or mark them for unlinking.
304 * It's a bug for qh->qh_state to be anything other than
308 state = qh->qh_state;
309 qh->qh_state = QH_STATE_COMPLETING;
315 qh->dequeue_during_giveback = 0;
322 list_for_each_safe (entry, tmp, &qh->qtd_list) {
344 /* hardware copies qtd out of qh overlay */
355 "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
361 qh);
373 ++qh->xacterrs < QH_XACTERR_MAX &&
377 qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs);
380 * qh overlay (which still contains
395 qh->unlink_reason |= QH_UNLINK_HALTED;
397 /* magic dummy for some short reads; qh won't advance.
410 qh->unlink_reason |= QH_UNLINK_SHORT_READ;
425 qh->unlink_reason |= QH_UNLINK_SHUTDOWN;
435 * If this was the active qtd when the qh was unlinked
443 qh->qtd_list.next == &qtd->qtd_list &&
447 qh->should_be_inactive = 1;
453 ehci_clear_tt_buffer(ehci, qh, urb, token);
487 ehci_clear_tt_buffer(ehci, qh, urb,
495 if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
506 qh->xacterrs = 0;
516 if (unlikely(qh->dequeue_during_giveback)) {
525 qh->qh_state = state;
527 /* be sure the hardware's done with the qh before refreshing
543 qh->unlink_reason |= QH_UNLINK_DUMMY_OVERLAY;
546 return qh->unlink_reason;
572 * create a list of filled qtds for this URB; won't link into qh.
671 /* qh makes control packets use qtd toggle; maybe switch it */
744 // Would be best to create all qh's from config descriptors,
746 // any previous qh and cancel its urbs first; endpoints are
765 struct ehci_qh *qh = ehci_qh_alloc (ehci, flags);
774 if (!qh)
775 return qh;
793 ehci_dbg(ehci, "bogus qh maxpacket %d\n", maxp);
801 * - qh has a polling interval
808 qh->ps.usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
810 qh->ps.phase = NO_FRAME;
813 qh->ps.c_usecs = 0;
814 qh->gap_uf = 0;
825 qh->ps.period = urb->interval >> 3;
832 qh->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval);
833 qh->ps.bw_period = qh->ps.bw_uperiod >> 3;
838 qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed,
843 qh->ps.c_usecs = qh->ps.usecs + HS_USECS(0);
844 qh->ps.usecs = HS_USECS(1);
846 qh->ps.usecs += HS_USECS(1);
847 qh->ps.c_usecs = HS_USECS(0);
851 qh->ps.tt_usecs = NS_TO_US(think_time +
856 qh->ps.period = urb->interval;
864 qh->ps.bw_period = min_t(unsigned, tmp, urb->interval);
865 qh->ps.bw_uperiod = qh->ps.bw_period << 3;
870 qh->ps.udev = urb->dev;
871 qh->ps.ep = urb->ep;
935 qh_destroy(ehci, qh);
942 qh->qh_state = QH_STATE_IDLE;
943 hw = qh->hw;
946 qh->is_out = !is_input;
948 return qh;
972 WARN_ON(ehci->async->qh_next.qh || !list_empty(&ehci->async_unlink) ||
979 /* move qh (and its qtds) onto async queue; maybe enable queue. */
981 static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
983 __hc32 dma = QH_NEXT(ehci, qh->qh_dma);
987 if (unlikely(qh->clearing_tt))
990 WARN_ON(qh->qh_state != QH_STATE_IDLE);
993 qh_refresh(ehci, qh);
997 qh->qh_next = head->qh_next;
998 qh->hw->hw_next = head->hw->hw_next;
1001 head->qh_next.qh = qh;
1004 qh->qh_state = QH_STATE_LINKED;
1005 qh->xacterrs = 0;
1006 qh->unlink_reason = 0;
1028 struct ehci_qh *qh = NULL;
1031 qh = (struct ehci_qh *) *ptr;
1032 if (unlikely (qh == NULL)) {
1034 qh = qh_make (ehci, urb, GFP_ATOMIC);
1035 *ptr = qh;
1037 if (likely (qh != NULL)) {
1046 /* control qh may need patching ... */
1051 qh->hw->hw_info1 &= ~qh_addr_mask;
1070 dummy = qh->dummy;
1078 list_splice_tail(qtd_list, &qh->qtd_list);
1081 qh->dummy = qtd;
1085 qtd = list_entry (qh->qtd_list.prev,
1093 urb->hcpriv = qh;
1096 return qh;
1110 struct ehci_qh *qh = NULL;
1120 "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
1137 qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
1138 if (unlikely(qh == NULL)) {
1147 if (likely (qh->qh_state == QH_STATE_IDLE))
1148 qh_link_async(ehci, qh);
1151 if (unlikely (qh == NULL))
1257 static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
1262 qh->qh_state = QH_STATE_UNLINK_WAIT;
1263 list_add_tail(&qh->unlink_node, &ehci->async_unlink);
1267 while (prev->qh_next.qh != qh)
1268 prev = prev->qh_next.qh;
1270 prev->hw->hw_next = qh->hw->hw_next;
1271 prev->qh_next = qh->qh_next;
1272 if (ehci->qh_scan_next == qh)
1273 ehci->qh_scan_next = qh->qh_next.qh;
1309 /* See if the async qh for the qtds being unlinked are now gone from the HC */
1313 struct ehci_qh *qh;
1318 qh = list_first_entry(&ehci->async_unlink, struct ehci_qh,
1336 else if (qh->qh_state == QH_STATE_UNLINK) {
1341 list_move_tail(&qh->unlink_node, &ehci->async_idle);
1358 else if (qh->unlink_reason & (QH_UNLINK_HALTED |
1363 else if ((qh->unlink_reason & QH_UNLINK_QUEUE_EMPTY) &&
1364 list_empty(&qh->qtd_list))
1368 else if (qh->hw->hw_token & cpu_to_hc32(ehci, QTD_STS_HALT))
1375 qh_current = qh->hw->hw_current;
1376 qh_token = qh->hw->hw_token;
1386 qh->qh_state = QH_STATE_UNLINK;
1405 qh = list_first_entry(&ehci->async_idle, struct ehci_qh,
1407 list_del(&qh->unlink_node);
1409 qh->qh_state = QH_STATE_IDLE;
1410 qh->qh_next.qh = NULL;
1412 if (!list_empty(&qh->qtd_list))
1413 qh_completions(ehci, qh);
1414 if (!list_empty(&qh->qtd_list) &&
1416 qh_link_async(ehci, qh);
1422 static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
1426 struct ehci_qh *qh;
1431 for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) {
1432 if (list_empty(&qh->qtd_list) &&
1433 qh->qh_state == QH_STATE_LINKED) {
1435 if (qh->unlink_cycle != ehci->async_unlink_cycle)
1436 qh_to_unlink = qh;
1459 struct ehci_qh *qh;
1461 while (ehci->async->qh_next.qh) {
1462 qh = ehci->async->qh_next.qh;
1463 WARN_ON(!list_empty(&qh->qtd_list));
1464 single_unlink_async(ehci, qh);
1470 /* makes sure the async qh will become idle */
1473 static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
1476 if (qh->qh_state != QH_STATE_LINKED)
1479 single_unlink_async(ehci, qh);
1487 struct ehci_qh *qh;
1490 ehci->qh_scan_next = ehci->async->qh_next.qh;
1492 qh = ehci->qh_scan_next;
1493 ehci->qh_scan_next = qh->qh_next.qh;
1495 /* clean any finished work for this qh */
1496 if (!list_empty(&qh->qtd_list)) {
1502 * always holds the next qh to scan; if the next qh
1506 temp = qh_completions(ehci, qh);
1508 start_unlink_async(ehci, qh);
1509 } else if (list_empty(&qh->qtd_list)
1510 && qh->qh_state == QH_STATE_LINKED) {
1511 qh->unlink_cycle = ehci->async_unlink_cycle;
1519 * as HCD schedule-scanning costs. Delay for any qh