Lines Matching refs:qh

114 #define HCC_CANPARK(p)		((p)&(1 << 2))  /* true: can park on async qh */
126 #define CMD_PARK (1<<11) /* enable "park" on async qh */
268 /* mask NakCnt+T in qh->hw_alt_next */
273 /* Type tag from {qh, itd, sitd, fstn}->hw_next */
291 * For entries in the async schedule, the type tag always says "qh".
294 struct ehci_qh *qh; /* Q_TYPE_QH */
328 dma_addr_t qh_dma; /* address of qh */
329 union ehci_shadow qh_next; /* ptr to qh; or periodic */
469 #define EHCI_SHRINK_JIFFIES (HZ/200) /* async qh unlink delay */
1030 static void oxu_qh_free(struct oxu_hcd *oxu, struct ehci_qh *qh)
1036 index = qh - &oxu->mem->qh_pool[0];
1044 struct ehci_qh *qh = container_of(kref, struct ehci_qh, kref);
1045 struct oxu_hcd *oxu = qh->oxu;
1048 if (!list_empty(&qh->qtd_list) || qh->qh_next.ptr) {
1049 oxu_dbg(oxu, "unused qh not empty!\n");
1052 if (qh->dummy)
1053 oxu_qtd_free(oxu, qh->dummy);
1054 oxu_qh_free(oxu, qh);
1060 struct ehci_qh *qh = NULL;
1069 qh = (struct ehci_qh *) &oxu->mem->qh_pool[i];
1070 memset(qh, 0, sizeof *qh);
1072 kref_init(&qh->kref);
1073 qh->oxu = oxu;
1074 qh->qh_dma = virt_to_phys(qh);
1075 INIT_LIST_HEAD(&qh->qtd_list);
1078 qh->dummy = ehci_qtd_alloc(oxu);
1079 if (qh->dummy == NULL) {
1082 qh = NULL;
1091 return qh;
1094 /* to share a qh (cpu threads, or hc) */
1095 static inline struct ehci_qh *qh_get(struct ehci_qh *qh)
1097 kref_get(&qh->kref);
1098 return qh;
1101 static inline void qh_put(struct ehci_qh *qh)
1103 kref_put(&qh->kref, qh_destroy);
1245 struct ehci_qh *qh, struct ehci_qtd *qtd)
1248 BUG_ON(qh->qh_state != QH_STATE_IDLE);
1250 qh->hw_qtd_next = QTD_NEXT(qtd->qtd_dma);
1251 qh->hw_alt_next = EHCI_LIST_END;
1258 if (!(qh->hw_info1 & cpu_to_le32(1 << 14))) {
1262 epnum = (le32_to_cpup(&qh->hw_info1) >> 8) & 0x0f;
1263 if (unlikely(!usb_gettoggle(qh->dev, epnum, is_out))) {
1264 qh->hw_token &= ~cpu_to_le32(QTD_TOGGLE);
1265 usb_settoggle(qh->dev, epnum, is_out, 1);
1269 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
1271 qh->hw_token &= cpu_to_le32(QTD_TOGGLE | QTD_STS_PING);
1274 /* If it weren't for a common silicon quirk (writing the dummy into the qh
1275 * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
1278 static void qh_refresh(struct oxu_hcd *oxu, struct ehci_qh *qh)
1282 if (list_empty(&qh->qtd_list))
1283 qtd = qh->dummy;
1285 qtd = list_entry(qh->qtd_list.next,
1288 if (cpu_to_le32(qtd->qtd_dma) == qh->hw_current)
1293 qh_update(oxu, qh, qtd);
1353 struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
1356 if ((qh->hw_info2 & cpu_to_le32(QH_SMASK)) != 0) {
1361 qh_put(qh);
1394 static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
1395 static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
1397 static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
1398 static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
1402 /* Process and free completed qtds for a qh, returning URBs to drivers.
1403 * Chases up to qh->hw_current. Returns number of completions called,
1406 static unsigned qh_completions(struct oxu_hcd *oxu, struct ehci_qh *qh)
1408 struct ehci_qtd *last = NULL, *end = qh->dummy;
1416 if (unlikely(list_empty(&qh->qtd_list)))
1421 * they add urbs to this qh's queue or mark them for unlinking.
1425 state = qh->qh_state;
1426 qh->qh_state = QH_STATE_COMPLETING;
1434 list_for_each_entry_safe(qtd, tmp, &qh->qtd_list, qtd_list) {
1464 /* hardware copies qtd out of qh overlay */
1474 /* magic dummy for some short reads; qh won't advance.
1496 * its urb was canceled. we may patch qh or qtds.
1511 == qh->hw_current)
1512 token = le32_to_cpu(qh->hw_token);
1514 /* force halt for unlinked or blocked qh, so we'll
1515 * patch the qh later and so that completions can't
1518 if ((HALT_BIT & qh->hw_token) == 0) {
1520 qh->hw_token |= HALT_BIT;
1535 if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
1562 qh->qh_state = state;
1564 /* be sure the hardware's done with the qh before refreshing
1568 if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END) {
1571 qh_refresh(oxu, qh);
1578 & qh->hw_info2) != 0) {
1579 intr_deschedule(oxu, qh);
1580 (void) qh_schedule(oxu, qh);
1582 unlink_async(oxu, qh);
1610 /* Create a list of filled qtds for this URB; won't link into qh.
1705 /* qh makes control packets use qtd toggle; maybe switch it */
1727 * up after short reads, hc should advance qh past this urb
1782 struct ehci_qh *qh = oxu_qh_alloc(oxu);
1787 if (!qh)
1788 return qh;
1804 * - qh has a polling interval
1809 qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
1812 qh->start = NO_FRAME;
1815 qh->c_usecs = 0;
1816 qh->gap_uf = 0;
1818 qh->period = urb->interval >> 3;
1819 if (qh->period == 0 && urb->interval != 1) {
1833 qh->gap_uf = 1 + usb_calc_bus_time(urb->dev->speed,
1838 qh->c_usecs = qh->usecs + HS_USECS(0);
1839 qh->usecs = HS_USECS(1);
1841 qh->usecs += HS_USECS(1);
1842 qh->c_usecs = HS_USECS(0);
1846 qh->tt_usecs = NS_TO_US(think_time +
1849 qh->period = urb->interval;
1854 qh->dev = urb->dev;
1898 qh_put(qh);
1905 qh->qh_state = QH_STATE_IDLE;
1906 qh->hw_info1 = cpu_to_le32(info1);
1907 qh->hw_info2 = cpu_to_le32(info2);
1909 qh_refresh(oxu, qh);
1910 return qh;
1913 /* Move qh (and its qtds) onto async queue; maybe enable queue.
1915 static void qh_link_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
1917 __le32 dma = QH_NEXT(qh->qh_dma);
1923 if (!head->qh_next.qh) {
1938 if (qh->qh_state == QH_STATE_IDLE)
1939 qh_refresh(oxu, qh);
1942 qh->qh_next = head->qh_next;
1943 qh->hw_next = head->hw_next;
1946 head->qh_next.qh = qh;
1949 qh->qh_state = QH_STATE_LINKED;
1965 struct ehci_qh *qh = NULL;
1967 qh = (struct ehci_qh *) *ptr;
1968 if (unlikely(qh == NULL)) {
1970 qh = qh_make(oxu, urb, GFP_ATOMIC);
1971 *ptr = qh;
1973 if (likely(qh != NULL)) {
1982 /* control qh may need patching ... */
1987 qh->hw_info1 &= ~QH_ADDR_MASK;
2006 dummy = qh->dummy;
2014 list_splice(qtd_list, qh->qtd_list.prev);
2017 qh->dummy = qtd;
2021 qtd = list_entry(qh->qtd_list.prev,
2030 urb->hcpriv = qh_get(qh);
2033 return qh;
2041 struct ehci_qh *qh = NULL;
2048 oxu_dbg(oxu, "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
2061 qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
2062 if (unlikely(qh == NULL)) {
2070 if (likely(qh->qh_state == QH_STATE_IDLE))
2071 qh_link_async(oxu, qh_get(qh));
2074 if (unlikely(qh == NULL))
2079 /* The async qh for the qtds being reclaimed are now unlinked from the HC */
2083 struct ehci_qh *qh = oxu->reclaim;
2088 qh->qh_state = QH_STATE_IDLE;
2089 qh->qh_next.qh = NULL;
2090 qh_put(qh); /* refcount from reclaim */
2093 next = qh->reclaim;
2096 qh->reclaim = NULL;
2098 qh_completions(oxu, qh);
2100 if (!list_empty(&qh->qtd_list)
2102 qh_link_async(oxu, qh);
2104 qh_put(qh); /* refcount from async list */
2110 && oxu->async->qh_next.qh == NULL)
2120 /* makes sure the async qh will become idle */
2123 static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
2130 BUG_ON(oxu->reclaim || (qh->qh_state != QH_STATE_LINKED
2131 && qh->qh_state != QH_STATE_UNLINK_WAIT));
2135 if (unlikely(qh == oxu->async)) {
2148 qh->qh_state = QH_STATE_UNLINK;
2149 oxu->reclaim = qh = qh_get(qh);
2152 while (prev->qh_next.qh != qh)
2153 prev = prev->qh_next.qh;
2155 prev->hw_next = qh->hw_next;
2156 prev->qh_next = qh->qh_next;
2160 /* if (unlikely(qh->reclaim != 0))
2176 struct ehci_qh *qh;
2183 qh = oxu->async->qh_next.qh;
2184 if (likely(qh != NULL)) {
2186 /* clean any finished work for this qh */
2187 if (!list_empty(&qh->qtd_list)
2188 && qh->stamp != oxu->stamp) {
2196 qh = qh_get(qh);
2197 qh->stamp = oxu->stamp;
2198 temp = qh_completions(oxu, qh);
2199 qh_put(qh);
2205 * as HCD schedule-scanning costs. delay for any qh
2210 if (list_empty(&qh->qtd_list)) {
2211 if (qh->stamp == oxu->stamp)
2214 && qh->qh_state == QH_STATE_LINKED)
2215 start_unlink_async(oxu, qh);
2218 qh = qh->qh_next.qh;
2219 } while (qh);
2227 * @periodic: host pointer to qh/itd/sitd
2236 return &periodic->qh->qh_next;
2277 if (q->qh->hw_info2 & cpu_to_le32(1 << uframe))
2278 usecs += q->qh->usecs;
2280 if (q->qh->hw_info2 & cpu_to_le32(1 << (8 + uframe)))
2281 usecs += q->qh->c_usecs;
2282 hw_p = &q->qh->hw_next;
2283 q = &q->qh->qh_next;
2347 * this just links in a qh; caller guarantees uframe masks are set right.
2350 static int qh_link_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
2353 unsigned period = qh->period;
2355 dev_dbg(&qh->dev->dev,
2356 "link qh%d-%04x/%p start %d [%d/%d us]\n",
2357 period, le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK),
2358 qh, qh->start, qh->usecs, qh->c_usecs);
2364 for (i = qh->start; i < oxu->periodic_size; i += period) {
2376 hw_p = &here.qh->hw_next;
2383 while (here.ptr && qh != here.qh) {
2384 if (qh->period > here.qh->period)
2386 prev = &here.qh->qh_next;
2387 hw_p = &here.qh->hw_next;
2390 /* link in this qh, unless some earlier pass did that */
2391 if (qh != here.qh) {
2392 qh->qh_next = here;
2393 if (here.qh)
2394 qh->hw_next = *hw_p;
2396 prev->qh = qh;
2397 *hw_p = QH_NEXT(qh->qh_dma);
2400 qh->qh_state = QH_STATE_LINKED;
2401 qh_get(qh);
2403 /* update per-qh bandwidth for usbfs */
2404 oxu_to_hcd(oxu)->self.bandwidth_allocated += qh->period
2405 ? ((qh->usecs + qh->c_usecs) / qh->period)
2406 : (qh->usecs * 8);
2415 static void qh_unlink_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
2422 * and this qh is active in the current uframe
2425 * qh->hw_info1 |= cpu_to_le32(1 << 7 "ignore");
2429 period = qh->period;
2433 for (i = qh->start; i < oxu->periodic_size; i += period)
2434 periodic_unlink(oxu, i, qh);
2436 /* update per-qh bandwidth for usbfs */
2437 oxu_to_hcd(oxu)->self.bandwidth_allocated -= qh->period
2438 ? ((qh->usecs + qh->c_usecs) / qh->period)
2439 : (qh->usecs * 8);
2441 dev_dbg(&qh->dev->dev,
2442 "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
2443 qh->period,
2444 le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK),
2445 qh, qh->start, qh->usecs, qh->c_usecs);
2447 /* qh->qh_next still "live" to HC */
2448 qh->qh_state = QH_STATE_UNLINK;
2449 qh->qh_next.ptr = NULL;
2450 qh_put(qh);
2458 static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
2462 qh_unlink_periodic(oxu, qh);
2465 * qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and
2469 if (list_empty(&qh->qtd_list)
2470 || (cpu_to_le32(QH_CMASK) & qh->hw_info2) != 0)
2476 qh->qh_state = QH_STATE_IDLE;
2477 qh->hw_next = EHCI_LIST_END;
2525 const struct ehci_qh *qh, __le32 *c_maskp)
2529 if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
2532 if (!check_period(oxu, frame, uframe, qh->period, qh->usecs))
2534 if (!qh->c_usecs) {
2547 static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
2552 unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
2554 qh_refresh(oxu, qh);
2555 qh->hw_next = EHCI_LIST_END;
2556 frame = qh->start;
2559 if (frame < qh->period) {
2560 uframe = ffs(le32_to_cpup(&qh->hw_info2) & QH_SMASK);
2562 qh, &c_mask);
2574 if (qh->period) {
2575 frame = qh->period - 1;
2579 frame, uframe, qh,
2586 /* qh->period == 0 means every uframe */
2589 status = check_intr_schedule(oxu, 0, 0, qh, &c_mask);
2593 qh->start = frame;
2596 qh->hw_info2 &= cpu_to_le32(~(QH_CMASK | QH_SMASK));
2597 qh->hw_info2 |= qh->period
2600 qh->hw_info2 |= c_mask;
2602 oxu_dbg(oxu, "reused qh %p schedule\n", qh);
2605 status = qh_link_periodic(oxu, qh);
2615 struct ehci_qh *qh;
2629 /* get qh and force any scheduling errors */
2631 qh = qh_append_tds(oxu, urb, &empty, epnum, &urb->ep->hcpriv);
2632 if (qh == NULL) {
2636 if (qh->qh_state == QH_STATE_IDLE) {
2637 status = qh_schedule(oxu, qh);
2642 /* then queue the urb's tds to the qh */
2643 qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
2644 BUG_ON(qh == NULL);
2715 temp.qh = qh_get(q.qh);
2716 type = Q_NEXT_TYPE(q.qh->hw_next);
2717 q = q.qh->qh_next;
2718 modified = qh_completions(oxu, temp.qh);
2719 if (unlikely(list_empty(&temp.qh->qtd_list)))
2720 intr_deschedule(oxu, temp.qh);
2721 qh_put(temp.qh);
2827 static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
2830 if (qh->qh_state == QH_STATE_LINKED
2839 qh->qh_state = QH_STATE_UNLINK_WAIT;
2840 last->reclaim = qh;
2846 /* something else might have unlinked the qh by now */
2847 if (qh->qh_state == QH_STATE_LINKED)
2848 start_unlink_async(oxu, qh);
2894 /* complete the unlinking of some qh [4.15.2.3] */
3040 * dedicate a qh for the async ring head, since we couldn't unlink
3041 * a 'real' qh without stopping the async schedule [4.8]. use it
3043 * its dummy is used in hw_alt_next of many tds, to prevent the qh
3046 oxu->async->qh_next.qh = NULL;
3358 struct ehci_qh *qh;
3366 qh = (struct ehci_qh *) urb->hcpriv;
3367 if (!qh)
3369 unlink_async(oxu, qh);
3373 qh = (struct ehci_qh *) urb->hcpriv;
3374 if (!qh)
3376 switch (qh->qh_state) {
3378 intr_deschedule(oxu, qh);
3381 qh_completions(oxu, qh);
3384 oxu_dbg(oxu, "bogus qh %p state %d\n",
3385 qh, qh->qh_state);
3390 if (!list_empty(&qh->qtd_list)
3394 status = qh_schedule(oxu, qh);
3402 "can't reschedule qh %p, err %d\n", qh,
3414 /* Bulk qh holds the data toggle */
3420 struct ehci_qh *qh, *tmp;
3427 qh = ep->hcpriv;
3428 if (!qh)
3434 if (qh->hw_info1 == 0) {
3440 qh->qh_state = QH_STATE_IDLE;
3441 switch (qh->qh_state) {
3443 for (tmp = oxu->async->qh_next.qh;
3444 tmp && tmp != qh;
3445 tmp = tmp->qh_next.qh)
3447 /* periodic qh self-unlinks on empty */
3450 unlink_async(oxu, qh);
3458 if (list_empty(&qh->qtd_list)) {
3459 qh_put(qh);
3468 oxu_err(oxu, "qh %p (#%02x) state %d%s\n",
3469 qh, ep->desc.bEndpointAddress, qh->qh_state,
3470 list_empty(&qh->qtd_list) ? "" : "(has tds)");
3993 if (oxu->async->qh_next.qh)