Lines Matching refs:qtd

49 #define FOTG210_TUNE_CERR	3 /* 0-3 qtd retries; 0 == don't stop */
115 dbg_qtd(const char *label, struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd)
117 fotg210_dbg(fotg210, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd,
118 hc32_to_cpup(fotg210, &qtd->hw_next),
119 hc32_to_cpup(fotg210, &qtd->hw_alt_next),
120 hc32_to_cpup(fotg210, &qtd->hw_token),
121 hc32_to_cpup(fotg210, &qtd->hw_buf[0]));
122 if (qtd->hw_buf[1])
124 hc32_to_cpup(fotg210, &qtd->hw_buf[1]),
125 hc32_to_cpup(fotg210, &qtd->hw_buf[2]),
126 hc32_to_cpup(fotg210, &qtd->hw_buf[3]),
127 hc32_to_cpup(fotg210, &qtd->hw_buf[4]));
135 fotg210_dbg(fotg210, "%s qh %p n%08x info %x %x qtd %x\n", label, qh,
376 /* else alt_next points to some other qtd */
487 struct fotg210_qtd *qtd;
492 list_for_each_entry(qtd, &qh->qtd_list, qtd_list) {
494 switch ((hc32_to_cpu(fotg210, qtd->hw_token) >> 8) & 0x03) {
1805 struct fotg210_qtd *qtd, dma_addr_t dma)
1807 memset(qtd, 0, sizeof(*qtd));
1808 qtd->qtd_dma = dma;
1809 qtd->hw_token = cpu_to_hc32(fotg210, QTD_STS_HALT);
1810 qtd->hw_next = FOTG210_LIST_END(fotg210);
1811 qtd->hw_alt_next = FOTG210_LIST_END(fotg210);
1812 INIT_LIST_HEAD(&qtd->qtd_list);
1818 struct fotg210_qtd *qtd;
1821 qtd = dma_pool_alloc(fotg210->qtd_pool, flags, &dma);
1822 if (qtd != NULL)
1823 fotg210_qtd_init(fotg210, qtd, dma);
1825 return qtd;
1829 struct fotg210_qtd *qtd)
1831 dma_pool_free(fotg210->qtd_pool, qtd, qtd->qtd_dma);
1975 * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd"
1990 /* fill a qtd, returning how much of the buffer we were able to queue up */
1991 static int qtd_fill(struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd,
1998 qtd->hw_buf[0] = cpu_to_hc32(fotg210, (u32)addr);
1999 qtd->hw_buf_hi[0] = cpu_to_hc32(fotg210, (u32)(addr >> 32));
2007 /* per-qtd limit: from 16K to 20K (best alignment) */
2010 qtd->hw_buf[i] = cpu_to_hc32(fotg210, (u32)addr);
2011 qtd->hw_buf_hi[i] = cpu_to_hc32(fotg210,
2024 qtd->hw_token = cpu_to_hc32(fotg210, (count << 16) | token);
2025 qtd->length = count;
2031 struct fotg210_qh *qh, struct fotg210_qtd *qtd)
2038 hw->hw_qtd_next = QTD_NEXT(fotg210, qtd->qtd_dma);
2066 struct fotg210_qtd *qtd;
2069 qtd = qh->dummy;
2071 qtd = list_entry(qh->qtd_list.next,
2074 * first qtd may already be partially processed.
2076 * might have reference to the just unlinked qtd. The
2077 * qtd is updated in qh_completions(). Update the QH
2080 if (cpu_to_hc32(fotg210, qtd->qtd_dma) == qh->hw->hw_current) {
2081 qh->hw->hw_qtd_next = qtd->hw_next;
2082 qtd = NULL;
2086 if (qtd)
2087 qh_update(fotg210, qh, qtd);
2182 "dev%d ep%d%s qtd token %08x --> status %d\n",
2244 struct fotg210_qtd *qtd, *tmp;
2278 list_for_each_entry_safe(qtd, tmp, &qh->qtd_list, qtd_list) {
2282 urb = qtd->urb;
2297 if (qtd == end)
2300 /* hardware copies qtd out of qh overlay */
2302 token = hc32_to_cpu(fotg210, qtd->hw_token);
2311 "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
2315 urb->transfer_buffer_length, qtd, qh);
2331 qtd->length - QTD_LENGTH(token),
2332 qtd->length,
2335 /* reset the token in the qtd and the
2337 * the qtd) so that we pick up from
2343 qtd->hw_token = cpu_to_hc32(fotg210,
2357 * most other single-qtd reads ... the queue stops if
2362 !(qtd->hw_alt_next &
2380 /* this qtd is active; skip it unless a previous qtd
2388 cpu_to_hc32(fotg210, qtd->qtd_dma)
2401 /* unless we already know the urb's status, collect qtd status
2403 * cases with only one data qtd (including control transfers),
2405 * example, with a 32 KB transfer), when the first qtd gets a
2410 qtd->length, token);
2412 (qtd->hw_alt_next &
2439 if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
2440 last = list_entry(qtd->qtd_list.prev,
2442 last->hw_next = qtd->hw_next;
2445 /* remove qtd; it's recycled after possible urb completion */
2446 list_del(&qtd->qtd_list);
2447 last = qtd;
2449 /* reinit the xacterr counter for the next qtd */
2480 * overlaying the dummy qtd (which reduces DMA chatter).
2516 struct fotg210_qtd *qtd, *temp;
2518 list_for_each_entry_safe(qtd, temp, head, qtd_list) {
2519 list_del(&qtd->qtd_list);
2520 fotg210_qtd_free(fotg210, qtd);
2529 struct fotg210_qtd *qtd, *qtd_prev;
2540 qtd = fotg210_qtd_alloc(fotg210, flags);
2541 if (unlikely(!qtd))
2543 list_add_tail(&qtd->qtd_list, head);
2544 qtd->urb = urb;
2554 qtd_fill(fotg210, qtd, urb->setup_dma,
2560 qtd_prev = qtd;
2561 qtd = fotg210_qtd_alloc(fotg210, flags);
2562 if (unlikely(!qtd))
2564 qtd->urb = urb;
2565 qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma);
2566 list_add_tail(&qtd->qtd_list, head);
2605 this_qtd_len = qtd_fill(fotg210, qtd, buf, this_sg_len, token,
2613 * qtd ... that forces the queue to stop, for manual cleanup.
2617 qtd->hw_alt_next = fotg210->async->hw->hw_alt_next;
2619 /* qh makes control packets use qtd toggle; maybe switch it */
2631 qtd_prev = qtd;
2632 qtd = fotg210_qtd_alloc(fotg210, flags);
2633 if (unlikely(!qtd))
2635 qtd->urb = urb;
2636 qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma);
2637 list_add_tail(&qtd->qtd_list, head);
2643 * last data qtd (the only one, for control and most other cases).
2647 qtd->hw_alt_next = FOTG210_LIST_END(fotg210);
2667 qtd_prev = qtd;
2668 qtd = fotg210_qtd_alloc(fotg210, flags);
2669 if (unlikely(!qtd))
2671 qtd->urb = urb;
2672 qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma);
2673 list_add_tail(&qtd->qtd_list, head);
2676 qtd_fill(fotg210, qtd, 0, 0, token, 0);
2682 qtd->hw_token |= cpu_to_hc32(fotg210, QTD_IOC);
2698 /* Each QH holds a qtd list; a QH is used for everything except iso.
2811 info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
2840 info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
2930 /* qtd completions reported later by interrupt */
2954 struct fotg210_qtd *qtd;
2957 qtd = NULL;
2959 qtd = list_entry(qtd_list->next, struct fotg210_qtd,
2969 /* just one way to queue requests: swap with the dummy qtd.
2972 if (likely(qtd != NULL)) {
2982 token = qtd->hw_token;
2983 qtd->hw_token = HALT_BIT(fotg210);
2988 *dummy = *qtd;
2991 list_del(&qtd->qtd_list);
2995 fotg210_qtd_init(fotg210, qtd, qtd->qtd_dma);
2996 qh->dummy = qtd;
2999 dma = qtd->qtd_dma;
3000 qtd = list_entry(qh->qtd_list.prev,
3002 qtd->hw_next = QTD_NEXT(fotg210, dma);
3026 struct fotg210_qtd *qtd;
3028 qtd = list_entry(qtd_list->next, struct fotg210_qtd, qtd_list);
3030 "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
3035 qtd, urb->ep->hcpriv);
5638 pr_debug("%s: block sizes: qh %zd qtd %zd itd %zd\n",