Lines Matching refs:qh

114 #define HCC_CANPARK(p)		((p)&(1 << 2))  /* true: can park on async qh */
126 #define CMD_PARK (1<<11) /* enable "park" on async qh */
268 /* mask NakCnt+T in qh->hw_alt_next */
273 /* Type tag from {qh, itd, sitd, fstn}->hw_next */
291 * For entries in the async schedule, the type tag always says "qh".
294 struct ehci_qh *qh; /* Q_TYPE_QH */
328 dma_addr_t qh_dma; /* address of qh */
329 union ehci_shadow qh_next; /* ptr to qh; or periodic */
469 #define EHCI_SHRINK_JIFFIES (HZ/200) /* async qh unlink delay */
1030 static void oxu_qh_free(struct oxu_hcd *oxu, struct ehci_qh *qh)
1036 index = qh - &oxu->mem->qh_pool[0];
1044 struct ehci_qh *qh = container_of(kref, struct ehci_qh, kref);
1045 struct oxu_hcd *oxu = qh->oxu;
1048 if (!list_empty(&qh->qtd_list) || qh->qh_next.ptr) {
1049 oxu_dbg(oxu, "unused qh not empty!\n");
1052 if (qh->dummy)
1053 oxu_qtd_free(oxu, qh->dummy);
1054 oxu_qh_free(oxu, qh);
1060 struct ehci_qh *qh = NULL;
1069 qh = (struct ehci_qh *) &oxu->mem->qh_pool[i];
1070 memset(qh, 0, sizeof *qh);
1072 kref_init(&qh->kref);
1073 qh->oxu = oxu;
1074 qh->qh_dma = virt_to_phys(qh);
1075 INIT_LIST_HEAD(&qh->qtd_list);
1078 qh->dummy = ehci_qtd_alloc(oxu);
1079 if (qh->dummy == NULL) {
1082 qh = NULL;
1091 return qh;
1094 /* to share a qh (cpu threads, or hc) */
1095 static inline struct ehci_qh *qh_get(struct ehci_qh *qh)
1097 kref_get(&qh->kref);
1098 return qh;
1101 static inline void qh_put(struct ehci_qh *qh)
1103 kref_put(&qh->kref, qh_destroy);
1245 struct ehci_qh *qh, struct ehci_qtd *qtd)
1248 BUG_ON(qh->qh_state != QH_STATE_IDLE);
1250 qh->hw_qtd_next = QTD_NEXT(qtd->qtd_dma);
1251 qh->hw_alt_next = EHCI_LIST_END;
1258 if (!(qh->hw_info1 & cpu_to_le32(1 << 14))) {
1262 epnum = (le32_to_cpup(&qh->hw_info1) >> 8) & 0x0f;
1263 if (unlikely(!usb_gettoggle(qh->dev, epnum, is_out))) {
1264 qh->hw_token &= ~cpu_to_le32(QTD_TOGGLE);
1265 usb_settoggle(qh->dev, epnum, is_out, 1);
1269 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
1271 qh->hw_token &= cpu_to_le32(QTD_TOGGLE | QTD_STS_PING);
1274 /* If it weren't for a common silicon quirk (writing the dummy into the qh
1275 * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
1278 static void qh_refresh(struct oxu_hcd *oxu, struct ehci_qh *qh)
1282 if (list_empty(&qh->qtd_list))
1283 qtd = qh->dummy;
1285 qtd = list_entry(qh->qtd_list.next,
1288 if (cpu_to_le32(qtd->qtd_dma) == qh->hw_current)
1293 qh_update(oxu, qh, qtd);
1353 struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
1356 if ((qh->hw_info2 & cpu_to_le32(QH_SMASK)) != 0) {
1361 qh_put(qh);
1395 static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
1396 static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
1398 static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
1399 static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
1403 /* Process and free completed qtds for a qh, returning URBs to drivers.
1404 * Chases up to qh->hw_current. Returns number of completions called,
1407 static unsigned qh_completions(struct oxu_hcd *oxu, struct ehci_qh *qh)
1409 struct ehci_qtd *last = NULL, *end = qh->dummy;
1417 if (unlikely(list_empty(&qh->qtd_list)))
1422 * they add urbs to this qh's queue or mark them for unlinking.
1426 state = qh->qh_state;
1427 qh->qh_state = QH_STATE_COMPLETING;
1435 list_for_each_entry_safe(qtd, tmp, &qh->qtd_list, qtd_list) {
1465 /* hardware copies qtd out of qh overlay */
1475 /* magic dummy for some short reads; qh won't advance.
1497 * its urb was canceled. we may patch qh or qtds.
1512 == qh->hw_current)
1513 token = le32_to_cpu(qh->hw_token);
1515 /* force halt for unlinked or blocked qh, so we'll
1516 * patch the qh later and so that completions can't
1519 if ((HALT_BIT & qh->hw_token) == 0) {
1521 qh->hw_token |= HALT_BIT;
1536 if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
1563 qh->qh_state = state;
1565 /* be sure the hardware's done with the qh before refreshing
1569 if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END) {
1572 qh_refresh(oxu, qh);
1579 & qh->hw_info2) != 0) {
1580 intr_deschedule(oxu, qh);
1581 (void) qh_schedule(oxu, qh);
1583 unlink_async(oxu, qh);
1611 /* Create a list of filled qtds for this URB; won't link into qh.
1706 /* qh makes control packets use qtd toggle; maybe switch it */
1728 * up after short reads, hc should advance qh past this urb
1783 struct ehci_qh *qh = oxu_qh_alloc(oxu);
1788 if (!qh)
1789 return qh;
1805 * - qh has a polling interval
1810 qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
1813 qh->start = NO_FRAME;
1816 qh->c_usecs = 0;
1817 qh->gap_uf = 0;
1819 qh->period = urb->interval >> 3;
1820 if (qh->period == 0 && urb->interval != 1) {
1834 qh->gap_uf = 1 + usb_calc_bus_time(urb->dev->speed,
1839 qh->c_usecs = qh->usecs + HS_USECS(0);
1840 qh->usecs = HS_USECS(1);
1842 qh->usecs += HS_USECS(1);
1843 qh->c_usecs = HS_USECS(0);
1847 qh->tt_usecs = NS_TO_US(think_time +
1850 qh->period = urb->interval;
1855 qh->dev = urb->dev;
1899 qh_put(qh);
1906 qh->qh_state = QH_STATE_IDLE;
1907 qh->hw_info1 = cpu_to_le32(info1);
1908 qh->hw_info2 = cpu_to_le32(info2);
1910 qh_refresh(oxu, qh);
1911 return qh;
1914 /* Move qh (and its qtds) onto async queue; maybe enable queue.
1916 static void qh_link_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
1918 __le32 dma = QH_NEXT(qh->qh_dma);
1924 if (!head->qh_next.qh) {
1939 if (qh->qh_state == QH_STATE_IDLE)
1940 qh_refresh(oxu, qh);
1943 qh->qh_next = head->qh_next;
1944 qh->hw_next = head->hw_next;
1947 head->qh_next.qh = qh;
1950 qh->qh_state = QH_STATE_LINKED;
1966 struct ehci_qh *qh = NULL;
1968 qh = (struct ehci_qh *) *ptr;
1969 if (unlikely(qh == NULL)) {
1971 qh = qh_make(oxu, urb, GFP_ATOMIC);
1972 *ptr = qh;
1974 if (likely(qh != NULL)) {
1983 /* control qh may need patching ... */
1988 qh->hw_info1 &= ~QH_ADDR_MASK;
2007 dummy = qh->dummy;
2015 list_splice(qtd_list, qh->qtd_list.prev);
2018 qh->dummy = qtd;
2022 qtd = list_entry(qh->qtd_list.prev,
2031 urb->hcpriv = qh_get(qh);
2034 return qh;
2042 struct ehci_qh *qh = NULL;
2049 oxu_dbg(oxu, "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
2062 qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
2063 if (unlikely(qh == NULL)) {
2071 if (likely(qh->qh_state == QH_STATE_IDLE))
2072 qh_link_async(oxu, qh_get(qh));
2075 if (unlikely(qh == NULL))
2080 /* The async qh for the qtds being reclaimed are now unlinked from the HC */
2084 struct ehci_qh *qh = oxu->reclaim;
2089 qh->qh_state = QH_STATE_IDLE;
2090 qh->qh_next.qh = NULL;
2091 qh_put(qh); /* refcount from reclaim */
2094 next = qh->reclaim;
2097 qh->reclaim = NULL;
2099 qh_completions(oxu, qh);
2101 if (!list_empty(&qh->qtd_list)
2103 qh_link_async(oxu, qh);
2105 qh_put(qh); /* refcount from async list */
2111 && oxu->async->qh_next.qh == NULL)
2121 /* makes sure the async qh will become idle */
2124 static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
2131 BUG_ON(oxu->reclaim || (qh->qh_state != QH_STATE_LINKED
2132 && qh->qh_state != QH_STATE_UNLINK_WAIT));
2136 if (unlikely(qh == oxu->async)) {
2149 qh->qh_state = QH_STATE_UNLINK;
2150 oxu->reclaim = qh = qh_get(qh);
2153 while (prev->qh_next.qh != qh)
2154 prev = prev->qh_next.qh;
2156 prev->hw_next = qh->hw_next;
2157 prev->qh_next = qh->qh_next;
2161 /* if (unlikely(qh->reclaim != 0))
2177 struct ehci_qh *qh;
2184 qh = oxu->async->qh_next.qh;
2185 if (likely(qh != NULL)) {
2187 /* clean any finished work for this qh */
2188 if (!list_empty(&qh->qtd_list)
2189 && qh->stamp != oxu->stamp) {
2197 qh = qh_get(qh);
2198 qh->stamp = oxu->stamp;
2199 temp = qh_completions(oxu, qh);
2200 qh_put(qh);
2206 * as HCD schedule-scanning costs. delay for any qh
2211 if (list_empty(&qh->qtd_list)) {
2212 if (qh->stamp == oxu->stamp)
2215 && qh->qh_state == QH_STATE_LINKED)
2216 start_unlink_async(oxu, qh);
2219 qh = qh->qh_next.qh;
2220 } while (qh);
2228 * @periodic: host pointer to qh/itd/sitd
2237 return &periodic->qh->qh_next;
2278 if (q->qh->hw_info2 & cpu_to_le32(1 << uframe))
2279 usecs += q->qh->usecs;
2281 if (q->qh->hw_info2 & cpu_to_le32(1 << (8 + uframe)))
2282 usecs += q->qh->c_usecs;
2283 hw_p = &q->qh->hw_next;
2284 q = &q->qh->qh_next;
2348 * this just links in a qh; caller guarantees uframe masks are set right.
2351 static int qh_link_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
2354 unsigned period = qh->period;
2356 dev_dbg(&qh->dev->dev,
2357 "link qh%d-%04x/%p start %d [%d/%d us]\n",
2358 period, le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK),
2359 qh, qh->start, qh->usecs, qh->c_usecs);
2365 for (i = qh->start; i < oxu->periodic_size; i += period) {
2377 hw_p = &here.qh->hw_next;
2384 while (here.ptr && qh != here.qh) {
2385 if (qh->period > here.qh->period)
2387 prev = &here.qh->qh_next;
2388 hw_p = &here.qh->hw_next;
2391 /* link in this qh, unless some earlier pass did that */
2392 if (qh != here.qh) {
2393 qh->qh_next = here;
2394 if (here.qh)
2395 qh->hw_next = *hw_p;
2397 prev->qh = qh;
2398 *hw_p = QH_NEXT(qh->qh_dma);
2401 qh->qh_state = QH_STATE_LINKED;
2402 qh_get(qh);
2404 /* update per-qh bandwidth for usbfs */
2405 oxu_to_hcd(oxu)->self.bandwidth_allocated += qh->period
2406 ? ((qh->usecs + qh->c_usecs) / qh->period)
2407 : (qh->usecs * 8);
2416 static void qh_unlink_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
2423 * and this qh is active in the current uframe
2426 * qh->hw_info1 |= cpu_to_le32(1 << 7 "ignore");
2430 period = qh->period;
2434 for (i = qh->start; i < oxu->periodic_size; i += period)
2435 periodic_unlink(oxu, i, qh);
2437 /* update per-qh bandwidth for usbfs */
2438 oxu_to_hcd(oxu)->self.bandwidth_allocated -= qh->period
2439 ? ((qh->usecs + qh->c_usecs) / qh->period)
2440 : (qh->usecs * 8);
2442 dev_dbg(&qh->dev->dev,
2443 "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
2444 qh->period,
2445 le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK),
2446 qh, qh->start, qh->usecs, qh->c_usecs);
2448 /* qh->qh_next still "live" to HC */
2449 qh->qh_state = QH_STATE_UNLINK;
2450 qh->qh_next.ptr = NULL;
2451 qh_put(qh);
2459 static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
2463 qh_unlink_periodic(oxu, qh);
2466 * qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and
2470 if (list_empty(&qh->qtd_list)
2471 || (cpu_to_le32(QH_CMASK) & qh->hw_info2) != 0)
2477 qh->qh_state = QH_STATE_IDLE;
2478 qh->hw_next = EHCI_LIST_END;
2526 const struct ehci_qh *qh, __le32 *c_maskp)
2530 if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
2533 if (!check_period(oxu, frame, uframe, qh->period, qh->usecs))
2535 if (!qh->c_usecs) {
2548 static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
2553 unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
2555 qh_refresh(oxu, qh);
2556 qh->hw_next = EHCI_LIST_END;
2557 frame = qh->start;
2560 if (frame < qh->period) {
2561 uframe = ffs(le32_to_cpup(&qh->hw_info2) & QH_SMASK);
2563 qh, &c_mask);
2575 if (qh->period) {
2576 frame = qh->period - 1;
2580 frame, uframe, qh,
2587 /* qh->period == 0 means every uframe */
2590 status = check_intr_schedule(oxu, 0, 0, qh, &c_mask);
2594 qh->start = frame;
2597 qh->hw_info2 &= cpu_to_le32(~(QH_CMASK | QH_SMASK));
2598 qh->hw_info2 |= qh->period
2601 qh->hw_info2 |= c_mask;
2603 oxu_dbg(oxu, "reused qh %p schedule\n", qh);
2606 status = qh_link_periodic(oxu, qh);
2616 struct ehci_qh *qh;
2630 /* get qh and force any scheduling errors */
2632 qh = qh_append_tds(oxu, urb, &empty, epnum, &urb->ep->hcpriv);
2633 if (qh == NULL) {
2637 if (qh->qh_state == QH_STATE_IDLE) {
2638 status = qh_schedule(oxu, qh);
2643 /* then queue the urb's tds to the qh */
2644 qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
2645 BUG_ON(qh == NULL);
2716 temp.qh = qh_get(q.qh);
2717 type = Q_NEXT_TYPE(q.qh->hw_next);
2718 q = q.qh->qh_next;
2719 modified = qh_completions(oxu, temp.qh);
2720 if (unlikely(list_empty(&temp.qh->qtd_list)))
2721 intr_deschedule(oxu, temp.qh);
2722 qh_put(temp.qh);
2828 static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
2831 if (qh->qh_state == QH_STATE_LINKED
2840 qh->qh_state = QH_STATE_UNLINK_WAIT;
2841 last->reclaim = qh;
2847 /* something else might have unlinked the qh by now */
2848 if (qh->qh_state == QH_STATE_LINKED)
2849 start_unlink_async(oxu, qh);
2895 /* complete the unlinking of some qh [4.15.2.3] */
3041 * dedicate a qh for the async ring head, since we couldn't unlink
3042 * a 'real' qh without stopping the async schedule [4.8]. use it
3044 * its dummy is used in hw_alt_next of many tds, to prevent the qh
3047 oxu->async->qh_next.qh = NULL;
3359 struct ehci_qh *qh;
3367 qh = (struct ehci_qh *) urb->hcpriv;
3368 if (!qh)
3370 unlink_async(oxu, qh);
3374 qh = (struct ehci_qh *) urb->hcpriv;
3375 if (!qh)
3377 switch (qh->qh_state) {
3379 intr_deschedule(oxu, qh);
3382 qh_completions(oxu, qh);
3385 oxu_dbg(oxu, "bogus qh %p state %d\n",
3386 qh, qh->qh_state);
3391 if (!list_empty(&qh->qtd_list)
3395 status = qh_schedule(oxu, qh);
3403 "can't reschedule qh %p, err %d\n", qh,
3415 /* Bulk qh holds the data toggle */
3421 struct ehci_qh *qh, *tmp;
3428 qh = ep->hcpriv;
3429 if (!qh)
3435 if (qh->hw_info1 == 0) {
3441 qh->qh_state = QH_STATE_IDLE;
3442 switch (qh->qh_state) {
3444 for (tmp = oxu->async->qh_next.qh;
3445 tmp && tmp != qh;
3446 tmp = tmp->qh_next.qh)
3448 /* periodic qh self-unlinks on empty */
3451 unlink_async(oxu, qh);
3459 if (list_empty(&qh->qtd_list)) {
3460 qh_put(qh);
3469 oxu_err(oxu, "qh %p (#%02x) state %d%s\n",
3470 qh, ep->desc.bEndpointAddress, qh->qh_state,
3471 list_empty(&qh->qtd_list) ? "" : "(has tds)");
3994 if (oxu->async->qh_next.qh)