Lines Matching defs:qtd

82 #define EHCI_TUNE_CERR		3	/* 0-3 qtd retries; 0 == don't stop */
247 #define QTD_STS_BABBLE (1 << 4) /* device was babbling (qtd halted) */
256 dma_addr_t qtd_dma; /* qtd address */
257 struct list_head qtd_list; /* sw qtd list */
258 struct urb *urb; /* qtd's urb */
282 /* for periodic/async schedules and qtd lists, mark end of list */
318 __le32 hw_current; /* qtd list - see EHCI 3.6.4 */
320 /* qtd overlay (hardware parts of a struct ehci_qtd) */
330 struct list_head qtd_list; /* sw qtd list */
907 static int oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len)
941 qtd->buffer = (void *) &oxu->mem->db_pool[i];
942 qtd->buffer_dma = virt_to_phys(qtd->buffer);
944 qtd->qtd_buffer_len = BUFFER_SIZE * a_blocks;
959 static void oxu_buf_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
965 index = (qtd->buffer - (void *) &oxu->mem->db_pool[0])
968 qtd->qtd_buffer_len = 0;
969 qtd->buffer_dma = 0;
970 qtd->buffer = NULL;
975 static inline void ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma)
977 memset(qtd, 0, sizeof *qtd);
978 qtd->qtd_dma = dma;
979 qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
980 qtd->hw_next = EHCI_LIST_END;
981 qtd->hw_alt_next = EHCI_LIST_END;
982 INIT_LIST_HEAD(&qtd->qtd_list);
985 static inline void oxu_qtd_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
989 if (qtd->buffer)
990 oxu_buf_free(oxu, qtd);
994 index = qtd - &oxu->mem->qtd_pool[0];
1003 struct ehci_qtd *qtd = NULL;
1012 qtd = (struct ehci_qtd *) &oxu->mem->qtd_pool[i];
1013 memset(qtd, 0, sizeof *qtd);
1015 qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
1016 qtd->hw_next = EHCI_LIST_END;
1017 qtd->hw_alt_next = EHCI_LIST_END;
1018 INIT_LIST_HEAD(&qtd->qtd_list);
1020 qtd->qtd_dma = virt_to_phys(qtd);
1027 return qtd;
1204 /* Fill a qtd, returning how much of the buffer we were able to queue up.
1206 static int qtd_fill(struct ehci_qtd *qtd, dma_addr_t buf, size_t len,
1213 qtd->hw_buf[0] = cpu_to_le32((u32)addr);
1214 qtd->hw_buf_hi[0] = cpu_to_le32((u32)(addr >> 32));
1222 /* per-qtd limit: from 16K to 20K (best alignment) */
1225 qtd->hw_buf[i] = cpu_to_le32((u32)addr);
1226 qtd->hw_buf_hi[i] = cpu_to_le32((u32)(addr >> 32));
1238 qtd->hw_token = cpu_to_le32((count << 16) | token);
1239 qtd->length = count;
1245 struct ehci_qh *qh, struct ehci_qtd *qtd)
1250 qh->hw_qtd_next = QTD_NEXT(qtd->qtd_dma);
1261 is_out = !(qtd->hw_token & cpu_to_le32(1 << 8));
1269 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
1280 struct ehci_qtd *qtd;
1283 qtd = qh->dummy;
1285 qtd = list_entry(qh->qtd_list.next,
1287 /* first qtd may already be partially processed */
1288 if (cpu_to_le32(qtd->qtd_dma) == qh->hw_current)
1289 qtd = NULL;
1292 if (qtd)
1293 qh_update(oxu, qh, qtd);
1340 oxu_vdbg(oxu, "dev%d ep%d%s qtd token %08x --> status %d\n",
1410 struct ehci_qtd *qtd, *tmp;
1435 list_for_each_entry_safe(qtd, tmp, &qh->qtd_list, qtd_list) {
1439 urb = qtd->urb;
1462 if (qtd == end)
1465 /* hardware copies qtd out of qh overlay */
1467 token = le32_to_cpu(qtd->hw_token);
1479 !(qtd->hw_alt_next & EHCI_LIST_END)) {
1495 /* ignore active urbs unless some previous qtd
1511 && cpu_to_le32(qtd->qtd_dma)
1529 qtd->length, token);
1530 if ((usb_pipein(qtd->urb->pipe)) &&
1531 (NULL != qtd->transfer_buffer))
1532 memcpy(qtd->transfer_buffer, qtd->buffer, qtd->length);
1536 if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
1537 last = list_entry(qtd->qtd_list.prev,
1539 last->hw_next = qtd->hw_next;
1541 list_del(&qtd->qtd_list);
1542 last = qtd;
1567 * overlaying the dummy qtd (which reduces DMA chatter).
1603 struct ehci_qtd *qtd, *temp;
1605 list_for_each_entry_safe(qtd, temp, head, qtd_list) {
1606 list_del(&qtd->qtd_list);
1607 oxu_qtd_free(oxu, qtd);
1618 struct ehci_qtd *qtd, *qtd_prev;
1629 qtd = ehci_qtd_alloc(oxu);
1630 if (unlikely(!qtd))
1632 list_add_tail(&qtd->qtd_list, head);
1633 qtd->urb = urb;
1646 ret = oxu_buf_alloc(oxu, qtd, sizeof(struct usb_ctrlrequest));
1650 qtd_fill(qtd, qtd->buffer_dma, sizeof(struct usb_ctrlrequest),
1652 memcpy(qtd->buffer, qtd->urb->setup_packet,
1657 qtd_prev = qtd;
1658 qtd = ehci_qtd_alloc(oxu);
1659 if (unlikely(!qtd))
1661 qtd->urb = urb;
1662 qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
1663 list_add_tail(&qtd->qtd_list, head);
1674 ret = oxu_buf_alloc(oxu, qtd, len);
1678 buf = qtd->buffer_dma;
1682 memcpy(qtd->buffer, qtd->urb->transfer_buffer, len);
1698 this_qtd_len = qtd_fill(qtd, buf, len, token, maxpacket);
1699 qtd->transfer_buffer = transfer_buf;
1704 qtd->hw_alt_next = oxu->async->hw_alt_next;
1706 /* qh makes control packets use qtd toggle; maybe switch it */
1713 qtd_prev = qtd;
1714 qtd = ehci_qtd_alloc(oxu);
1715 if (unlikely(!qtd))
1718 ret = oxu_buf_alloc(oxu, qtd, len);
1722 qtd->urb = urb;
1723 qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
1724 list_add_tail(&qtd->qtd_list, head);
1732 qtd->hw_alt_next = EHCI_LIST_END;
1751 qtd_prev = qtd;
1752 qtd = ehci_qtd_alloc(oxu);
1753 if (unlikely(!qtd))
1755 qtd->urb = urb;
1756 qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
1757 list_add_tail(&qtd->qtd_list, head);
1760 qtd_fill(qtd, 0, 0, token, 0);
1765 qtd->hw_token |= cpu_to_le32(QTD_IOC);
1773 /* Each QH holds a qtd list; a QH is used for everything except iso.
1869 info1 |= 1 << 14; /* toggle from qtd */
1885 info1 |= 1 << 14; /* toggle from qtd */
1951 /* qtd completions reported later by interrupt */
1975 struct ehci_qtd *qtd;
1978 qtd = NULL;
1980 qtd = list_entry(qtd_list->next, struct ehci_qtd,
1991 /* just one way to queue requests: swap with the dummy qtd.
1994 if (likely(qtd != NULL)) {
2004 token = qtd->hw_token;
2005 qtd->hw_token = HALT_BIT;
2010 *dummy = *qtd;
2013 list_del(&qtd->qtd_list);
2017 ehci_qtd_init(qtd, qtd->qtd_dma);
2018 qh->dummy = qtd;
2021 dma = qtd->qtd_dma;
2022 qtd = list_entry(qh->qtd_list.prev,
2024 qtd->hw_next = QTD_NEXT(dma);
2045 struct ehci_qtd *qtd;
2047 qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
2049 oxu_dbg(oxu, "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
2053 qtd, urb->ep->hcpriv);