Lines Matching defs:qtd
82 #define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
247 #define QTD_STS_BABBLE (1 << 4) /* device was babbling (qtd halted) */
256 dma_addr_t qtd_dma; /* qtd address */
257 struct list_head qtd_list; /* sw qtd list */
258 struct urb *urb; /* qtd's urb */
282 /* for periodic/async schedules and qtd lists, mark end of list */
318 __le32 hw_current; /* qtd list - see EHCI 3.6.4 */
320 /* qtd overlay (hardware parts of a struct ehci_qtd) */
330 struct list_head qtd_list; /* sw qtd list */
907 static int oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len)
941 qtd->buffer = (void *) &oxu->mem->db_pool[i];
942 qtd->buffer_dma = virt_to_phys(qtd->buffer);
944 qtd->qtd_buffer_len = BUFFER_SIZE * a_blocks;
959 static void oxu_buf_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
965 index = (qtd->buffer - (void *) &oxu->mem->db_pool[0])
968 qtd->qtd_buffer_len = 0;
969 qtd->buffer_dma = 0;
970 qtd->buffer = NULL;
975 static inline void ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma)
977 memset(qtd, 0, sizeof *qtd);
978 qtd->qtd_dma = dma;
979 qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
980 qtd->hw_next = EHCI_LIST_END;
981 qtd->hw_alt_next = EHCI_LIST_END;
982 INIT_LIST_HEAD(&qtd->qtd_list);
985 static inline void oxu_qtd_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
989 if (qtd->buffer)
990 oxu_buf_free(oxu, qtd);
994 index = qtd - &oxu->mem->qtd_pool[0];
1003 struct ehci_qtd *qtd = NULL;
1012 qtd = (struct ehci_qtd *) &oxu->mem->qtd_pool[i];
1013 memset(qtd, 0, sizeof *qtd);
1015 qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
1016 qtd->hw_next = EHCI_LIST_END;
1017 qtd->hw_alt_next = EHCI_LIST_END;
1018 INIT_LIST_HEAD(&qtd->qtd_list);
1020 qtd->qtd_dma = virt_to_phys(qtd);
1027 return qtd;
1204 /* Fill a qtd, returning how much of the buffer we were able to queue up.
1206 static int qtd_fill(struct ehci_qtd *qtd, dma_addr_t buf, size_t len,
1213 qtd->hw_buf[0] = cpu_to_le32((u32)addr);
1214 qtd->hw_buf_hi[0] = cpu_to_le32((u32)(addr >> 32));
1222 /* per-qtd limit: from 16K to 20K (best alignment) */
1225 qtd->hw_buf[i] = cpu_to_le32((u32)addr);
1226 qtd->hw_buf_hi[i] = cpu_to_le32((u32)(addr >> 32));
1238 qtd->hw_token = cpu_to_le32((count << 16) | token);
1239 qtd->length = count;
1245 struct ehci_qh *qh, struct ehci_qtd *qtd)
1250 qh->hw_qtd_next = QTD_NEXT(qtd->qtd_dma);
1261 is_out = !(qtd->hw_token & cpu_to_le32(1 << 8));
1269 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
1280 struct ehci_qtd *qtd;
1283 qtd = qh->dummy;
1285 qtd = list_entry(qh->qtd_list.next,
1287 /* first qtd may already be partially processed */
1288 if (cpu_to_le32(qtd->qtd_dma) == qh->hw_current)
1289 qtd = NULL;
1292 if (qtd)
1293 qh_update(oxu, qh, qtd);
1340 oxu_vdbg(oxu, "dev%d ep%d%s qtd token %08x --> status %d\n",
1409 struct ehci_qtd *qtd, *tmp;
1434 list_for_each_entry_safe(qtd, tmp, &qh->qtd_list, qtd_list) {
1438 urb = qtd->urb;
1461 if (qtd == end)
1464 /* hardware copies qtd out of qh overlay */
1466 token = le32_to_cpu(qtd->hw_token);
1478 !(qtd->hw_alt_next & EHCI_LIST_END)) {
1494 /* ignore active urbs unless some previous qtd
1510 && cpu_to_le32(qtd->qtd_dma)
1528 qtd->length, token);
1529 if ((usb_pipein(qtd->urb->pipe)) &&
1530 (NULL != qtd->transfer_buffer))
1531 memcpy(qtd->transfer_buffer, qtd->buffer, qtd->length);
1535 if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
1536 last = list_entry(qtd->qtd_list.prev,
1538 last->hw_next = qtd->hw_next;
1540 list_del(&qtd->qtd_list);
1541 last = qtd;
1566 * overlaying the dummy qtd (which reduces DMA chatter).
1602 struct ehci_qtd *qtd, *temp;
1604 list_for_each_entry_safe(qtd, temp, head, qtd_list) {
1605 list_del(&qtd->qtd_list);
1606 oxu_qtd_free(oxu, qtd);
1617 struct ehci_qtd *qtd, *qtd_prev;
1628 qtd = ehci_qtd_alloc(oxu);
1629 if (unlikely(!qtd))
1631 list_add_tail(&qtd->qtd_list, head);
1632 qtd->urb = urb;
1645 ret = oxu_buf_alloc(oxu, qtd, sizeof(struct usb_ctrlrequest));
1649 qtd_fill(qtd, qtd->buffer_dma, sizeof(struct usb_ctrlrequest),
1651 memcpy(qtd->buffer, qtd->urb->setup_packet,
1656 qtd_prev = qtd;
1657 qtd = ehci_qtd_alloc(oxu);
1658 if (unlikely(!qtd))
1660 qtd->urb = urb;
1661 qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
1662 list_add_tail(&qtd->qtd_list, head);
1673 ret = oxu_buf_alloc(oxu, qtd, len);
1677 buf = qtd->buffer_dma;
1681 memcpy(qtd->buffer, qtd->urb->transfer_buffer, len);
1697 this_qtd_len = qtd_fill(qtd, buf, len, token, maxpacket);
1698 qtd->transfer_buffer = transfer_buf;
1703 qtd->hw_alt_next = oxu->async->hw_alt_next;
1705 /* qh makes control packets use qtd toggle; maybe switch it */
1712 qtd_prev = qtd;
1713 qtd = ehci_qtd_alloc(oxu);
1714 if (unlikely(!qtd))
1717 ret = oxu_buf_alloc(oxu, qtd, len);
1721 qtd->urb = urb;
1722 qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
1723 list_add_tail(&qtd->qtd_list, head);
1731 qtd->hw_alt_next = EHCI_LIST_END;
1750 qtd_prev = qtd;
1751 qtd = ehci_qtd_alloc(oxu);
1752 if (unlikely(!qtd))
1754 qtd->urb = urb;
1755 qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
1756 list_add_tail(&qtd->qtd_list, head);
1759 qtd_fill(qtd, 0, 0, token, 0);
1764 qtd->hw_token |= cpu_to_le32(QTD_IOC);
1772 /* Each QH holds a qtd list; a QH is used for everything except iso.
1868 info1 |= 1 << 14; /* toggle from qtd */
1884 info1 |= 1 << 14; /* toggle from qtd */
1950 /* qtd completions reported later by interrupt */
1974 struct ehci_qtd *qtd;
1977 qtd = NULL;
1979 qtd = list_entry(qtd_list->next, struct ehci_qtd,
1990 /* just one way to queue requests: swap with the dummy qtd.
1993 if (likely(qtd != NULL)) {
2003 token = qtd->hw_token;
2004 qtd->hw_token = HALT_BIT;
2009 *dummy = *qtd;
2012 list_del(&qtd->qtd_list);
2016 ehci_qtd_init(qtd, qtd->qtd_dma);
2017 qh->dummy = qtd;
2020 dma = qtd->qtd_dma;
2021 qtd = list_entry(qh->qtd_list.prev,
2023 qtd->hw_next = QTD_NEXT(dma);
2044 struct ehci_qtd *qtd;
2046 qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
2048 oxu_dbg(oxu, "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
2052 qtd, urb->ep->hcpriv);