Lines Matching defs:qtd
13 * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd"
34 /* fill a qtd, returning how much of the buffer we were able to queue up */
37 qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf,
44 qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr);
45 qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32));
53 /* per-qtd limit: from 16K to 20K (best alignment) */
56 qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr);
57 qtd->hw_buf_hi[i] = cpu_to_hc32(ehci,
70 qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token);
71 qtd->length = count;
79 qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
86 hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
115 struct ehci_qtd *qtd;
117 qtd = list_entry(qh->qtd_list.next, struct ehci_qtd, qtd_list);
120 * first qtd may already be partially processed.
122 * might have reference to the just unlinked qtd. The
123 * qtd is updated in qh_completions(). Update the QH
127 qh->hw->hw_qtd_next = qtd->hw_next;
131 qh_update(ehci, qh, qtd);
323 struct ehci_qtd *qtd;
327 qtd = list_entry (entry, struct ehci_qtd, qtd_list);
328 urb = qtd->urb;
341 if (qtd == end)
344 /* hardware copies qtd out of qh overlay */
346 token = hc32_to_cpu(ehci, qtd->hw_token);
355 "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
360 qtd,
377 qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs);
379 /* reset the token in the qtd and the
381 * the qtd) so that we pick up from
387 qtd->hw_token = cpu_to_hc32(ehci,
402 * most other single-qtd reads ... the queue stops if
407 && !(qtd->hw_alt_next
428 /* this qtd is active; skip it unless a previous qtd
435 * If this was the active qtd when the qh was unlinked
437 * hasn't been written back to the qtd yet so use its
438 * token instead of the qtd's. After the qtd is
443 qh->qtd_list.next == &qtd->qtd_list &&
457 /* unless we already know the urb's status, collect qtd status
459 * cases with only one data qtd (including control transfers),
461 * example, with a 32 KB transfer), when the first qtd gets a
466 qtd->length, token);
468 && (qtd->hw_alt_next
495 if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
496 last = list_entry (qtd->qtd_list.prev,
498 last->hw_next = qtd->hw_next;
501 /* remove qtd; it's recycled after possible urb completion */
502 list_del (&qtd->qtd_list);
503 last = qtd;
505 /* reinit the xacterr counter for the next qtd */
529 * overlaying the dummy qtd (which reduces DMA chatter).
563 struct ehci_qtd *qtd;
565 qtd = list_entry (entry, struct ehci_qtd, qtd_list);
566 list_del (&qtd->qtd_list);
567 ehci_qtd_free (ehci, qtd);
581 struct ehci_qtd *qtd, *qtd_prev;
592 qtd = ehci_qtd_alloc (ehci, flags);
593 if (unlikely (!qtd))
595 list_add_tail (&qtd->qtd_list, head);
596 qtd->urb = urb;
606 qtd_fill(ehci, qtd, urb->setup_dma,
612 qtd_prev = qtd;
613 qtd = ehci_qtd_alloc (ehci, flags);
614 if (unlikely (!qtd))
616 qtd->urb = urb;
617 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
618 list_add_tail (&qtd->qtd_list, head);
657 this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token,
665 * qtd ... that forces the queue to stop, for manual cleanup.
669 qtd->hw_alt_next = ehci->async->hw->hw_alt_next;
671 /* qh makes control packets use qtd toggle; maybe switch it */
683 qtd_prev = qtd;
684 qtd = ehci_qtd_alloc (ehci, flags);
685 if (unlikely (!qtd))
687 qtd->urb = urb;
688 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
689 list_add_tail (&qtd->qtd_list, head);
695 * last data qtd (the only one, for control and most other cases).
699 qtd->hw_alt_next = EHCI_LIST_END(ehci);
719 qtd_prev = qtd;
720 qtd = ehci_qtd_alloc (ehci, flags);
721 if (unlikely (!qtd))
723 qtd->urb = urb;
724 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
725 list_add_tail (&qtd->qtd_list, head);
728 qtd_fill(ehci, qtd, 0, 0, token, 0);
734 qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
752 * Each QH holds a qtd list; a QH is used for everything except iso.
885 info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
914 info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
1007 /* qtd completions reported later by interrupt */
1038 struct ehci_qtd *qtd;
1041 qtd = NULL;
1043 qtd = list_entry (qtd_list->next, struct ehci_qtd,
1054 /* just one way to queue requests: swap with the dummy qtd.
1057 if (likely (qtd != NULL)) {
1067 token = qtd->hw_token;
1068 qtd->hw_token = HALT_BIT(ehci);
1073 *dummy = *qtd;
1076 list_del (&qtd->qtd_list);
1080 ehci_qtd_init(ehci, qtd, qtd->qtd_dma);
1081 qh->dummy = qtd;
1084 dma = qtd->qtd_dma;
1085 qtd = list_entry (qh->qtd_list.prev,
1087 qtd->hw_next = QTD_NEXT(ehci, dma);
1117 struct ehci_qtd *qtd;
1118 qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
1120 "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
1124 qtd, urb->ep->hcpriv);
1177 struct ehci_qtd *qtd, *qtd_prev;
1186 qtd = ehci_qtd_alloc(ehci, GFP_KERNEL);
1187 if (unlikely(!qtd))
1189 list_add_tail(&qtd->qtd_list, head);
1190 qtd->urb = urb;
1203 qtd_fill(ehci, qtd, urb->setup_dma,
1222 qtd_fill(ehci, qtd, buf, len, token, maxpacket);
1226 * and let it advance to the next qtd which zero length OUT status
1228 qtd->hw_alt_next = EHCI_LIST_END(ehci);
1234 qtd_prev = qtd;
1235 qtd = ehci_qtd_alloc(ehci, GFP_ATOMIC);
1236 if (unlikely(!qtd))
1238 qtd->urb = urb;
1239 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
1240 list_add_tail(&qtd->qtd_list, head);
1243 qtd_fill(ehci, qtd, 0, 0, token | QTD_IOC, 0);