Lines Matching defs:qtd
13 * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd"
34 /* fill a qtd, returning how much of the buffer we were able to queue up */
37 qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf,
45 qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr);
46 qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32));
54 /* per-qtd limit: from 16K to 20K (best alignment) */
57 qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr);
58 qtd->hw_buf_hi[i] = cpu_to_hc32(ehci,
71 qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token);
72 qtd->length = count;
80 qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
87 hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
116 struct ehci_qtd *qtd;
118 qtd = list_entry(qh->qtd_list.next, struct ehci_qtd, qtd_list);
121 * first qtd may already be partially processed.
123 * might have reference to the just unlinked qtd. The
124 * qtd is updated in qh_completions(). Update the QH
128 qh->hw->hw_qtd_next = qtd->hw_next;
132 qh_update(ehci, qh, qtd);
324 struct ehci_qtd *qtd;
328 qtd = list_entry (entry, struct ehci_qtd, qtd_list);
329 urb = qtd->urb;
342 if (qtd == end)
345 /* hardware copies qtd out of qh overlay */
347 token = hc32_to_cpu(ehci, qtd->hw_token);
356 "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
361 qtd,
378 qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs);
380 /* reset the token in the qtd and the
382 * the qtd) so that we pick up from
388 qtd->hw_token = cpu_to_hc32(ehci,
403 * most other single-qtd reads ... the queue stops if
408 && !(qtd->hw_alt_next
429 /* this qtd is active; skip it unless a previous qtd
436 * If this was the active qtd when the qh was unlinked
438 * hasn't been written back to the qtd yet so use its
439 * token instead of the qtd's. After the qtd is
444 qh->qtd_list.next == &qtd->qtd_list &&
458 /* unless we already know the urb's status, collect qtd status
460 * cases with only one data qtd (including control transfers),
462 * example, with a 32 KB transfer), when the first qtd gets a
467 qtd->length, token);
469 && (qtd->hw_alt_next
496 if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
497 last = list_entry (qtd->qtd_list.prev,
499 last->hw_next = qtd->hw_next;
502 /* remove qtd; it's recycled after possible urb completion */
503 list_del (&qtd->qtd_list);
504 last = qtd;
506 /* reinit the xacterr counter for the next qtd */
530 * overlaying the dummy qtd (which reduces DMA chatter).
564 struct ehci_qtd *qtd;
566 qtd = list_entry (entry, struct ehci_qtd, qtd_list);
567 list_del (&qtd->qtd_list);
568 ehci_qtd_free (ehci, qtd);
582 struct ehci_qtd *qtd, *qtd_prev;
593 qtd = ehci_qtd_alloc (ehci, flags);
594 if (unlikely (!qtd))
596 list_add_tail (&qtd->qtd_list, head);
597 qtd->urb = urb;
607 qtd_fill(ehci, qtd, urb->setup_dma,
613 qtd_prev = qtd;
614 qtd = ehci_qtd_alloc (ehci, flags);
615 if (unlikely (!qtd))
617 qtd->urb = urb;
618 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
619 list_add_tail (&qtd->qtd_list, head);
658 this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token,
666 * qtd ... that forces the queue to stop, for manual cleanup.
670 qtd->hw_alt_next = ehci->async->hw->hw_alt_next;
672 /* qh makes control packets use qtd toggle; maybe switch it */
684 qtd_prev = qtd;
685 qtd = ehci_qtd_alloc (ehci, flags);
686 if (unlikely (!qtd))
688 qtd->urb = urb;
689 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
690 list_add_tail (&qtd->qtd_list, head);
696 * last data qtd (the only one, for control and most other cases).
700 qtd->hw_alt_next = EHCI_LIST_END(ehci);
720 qtd_prev = qtd;
721 qtd = ehci_qtd_alloc (ehci, flags);
722 if (unlikely (!qtd))
724 qtd->urb = urb;
725 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
726 list_add_tail (&qtd->qtd_list, head);
729 qtd_fill(ehci, qtd, 0, 0, token, 0);
735 qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
753 * Each QH holds a qtd list; a QH is used for everything except iso.
886 info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
915 info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
1008 /* qtd completions reported later by interrupt */
1039 struct ehci_qtd *qtd;
1042 qtd = NULL;
1044 qtd = list_entry (qtd_list->next, struct ehci_qtd,
1055 /* just one way to queue requests: swap with the dummy qtd.
1058 if (likely (qtd != NULL)) {
1068 token = qtd->hw_token;
1069 qtd->hw_token = HALT_BIT(ehci);
1074 *dummy = *qtd;
1077 list_del (&qtd->qtd_list);
1081 ehci_qtd_init(ehci, qtd, qtd->qtd_dma);
1082 qh->dummy = qtd;
1085 dma = qtd->qtd_dma;
1086 qtd = list_entry (qh->qtd_list.prev,
1088 qtd->hw_next = QTD_NEXT(ehci, dma);
1118 struct ehci_qtd *qtd;
1119 qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
1121 "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
1125 qtd, urb->ep->hcpriv);
1178 struct ehci_qtd *qtd, *qtd_prev;
1187 qtd = ehci_qtd_alloc(ehci, GFP_KERNEL);
1188 if (unlikely(!qtd))
1190 list_add_tail(&qtd->qtd_list, head);
1191 qtd->urb = urb;
1204 qtd_fill(ehci, qtd, urb->setup_dma,
1223 qtd_fill(ehci, qtd, buf, len, token, maxpacket);
1227 * and let it advance to the next qtd which zero length OUT status
1229 qtd->hw_alt_next = EHCI_LIST_END(ehci);
1235 qtd_prev = qtd;
1236 qtd = ehci_qtd_alloc(ehci, GFP_ATOMIC);
1237 if (unlikely(!qtd))
1239 qtd->urb = urb;
1240 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
1241 list_add_tail(&qtd->qtd_list, head);
1244 qtd_fill(ehci, qtd, 0, 0, token | QTD_IOC, 0);