Lines Matching defs:etd

57 #define DEBUG_LOG_FRAME(imx21, etd, event) \
58 (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
60 #define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0)
140 struct imx21 *imx21, struct etd_priv *etd, int status);
142 static void free_dmem(struct imx21 *imx21, struct etd_priv *etd);
151 struct etd_priv *etd = imx21->etd;
153 for (i = 0; i < USB_NUM_ETD; i++, etd++) {
154 if (etd->alloc == 0) {
155 memset(etd, 0, sizeof(imx21->etd[0]));
156 etd->alloc = 1;
167 struct etd_priv *etd = &imx21->etd[num];
174 etd->active_count = 0;
176 DEBUG_LOG_FRAME(imx21, etd, disactivated);
181 struct etd_priv *etd = imx21->etd + num;
188 etd->urb = NULL;
189 etd->ep = NULL;
190 etd->td = NULL;
191 etd->bounce_buffer = NULL;
200 dev_err(imx21->dev, "BAD etd=%d!\n", num);
203 if (imx21->etd[num].alloc == 0) {
210 memset(&imx21->etd[num], 0, sizeof(imx21->etd[0]));
257 struct etd_priv *etd = &imx21->etd[etd_num];
259 if (etd->dma_handle && unsuitable_for_dma(etd->dma_handle)) {
261 if (etd->len <= etd->dmem_size) {
265 etd->dmem_offset,
266 etd->cpu_buffer, etd->len);
268 etd->dma_handle = 0;
276 etd->bounce_buffer = kmalloc(etd->len,
280 etd->bounce_buffer = kmemdup(etd->cpu_buffer,
281 etd->len,
284 if (!etd->bounce_buffer) {
289 etd->dma_handle =
291 etd->bounce_buffer,
292 etd->len,
294 if (dma_mapping_error(imx21->dev, etd->dma_handle)) {
306 if (etd->dma_handle) {
310 writel(etd->dma_handle, imx21->regs + USB_ETDSMSA(etd_num));
320 DEBUG_LOG_FRAME(imx21, etd, activated);
323 if (!etd->active_count) {
325 etd->activated_frame = readl(imx21->regs + USBH_FRMNUB);
326 etd->disactivated_frame = -1;
327 etd->last_int_frame = -1;
328 etd->last_req_frame = -1;
331 etd->submitted_dwords[i] = etd_readl(imx21, etd_num, i);
335 etd->active_count = 1;
340 kfree(etd->bounce_buffer);
343 free_dmem(imx21, etd);
344 nonisoc_urb_completed_for_etd(imx21, etd, -ENOMEM);
393 struct etd_priv *etd, u32 dmem_offset)
395 struct urb_priv *urb_priv = etd->urb->hcpriv;
396 int etd_num = etd - &imx21->etd[0];
405 etd->dmem_offset = dmem_offset;
410 static void free_dmem(struct imx21 *imx21, struct etd_priv *etd)
417 if (!etd->dmem_size)
419 etd->dmem_size = 0;
421 offset = etd->dmem_offset;
439 list_for_each_entry_safe(etd, tmp, &imx21->queue_for_dmem, queue) {
440 offset = alloc_dmem(imx21, etd->dmem_size, etd->ep);
442 list_del(&etd->queue);
443 activate_queued_etd(imx21, etd, (u32)offset);
474 int etd_num = ep_priv->etd[i];
475 struct etd_priv *etd;
479 etd = &imx21->etd[etd_num];
480 ep_priv->etd[i] = -1;
482 free_dmem(imx21, etd); /* for isoc */
490 "assigning idle etd %d for queued request\n", etd_num);
496 ep_priv->etd[i] = etd_num;
530 struct imx21 *imx21, struct etd_priv *etd, int status)
532 struct usb_host_endpoint *ep = etd->ep;
534 urb_done(imx21->hcd, etd->urb, status);
535 etd->urb = NULL;
556 struct etd_priv *etd;
569 etd_num = ep_priv->etd[i];
573 etd = &imx21->etd[etd_num];
574 if (etd->urb)
595 etd->td = td;
596 etd->ep = td->ep;
597 etd->urb = td->urb;
598 etd->len = td->len;
599 etd->dma_handle = td->dma_handle;
600 etd->cpu_buffer = td->cpu_buffer;
605 setup_etd_dword0(imx21, etd_num, td->urb, dir, etd->dmem_size);
606 etd_writel(imx21, etd_num, 1, etd->dmem_offset);
622 struct etd_priv *etd = imx21->etd + etd_num;
623 struct urb *urb = etd->urb;
625 struct td *td = etd->td;
626 struct usb_host_endpoint *ep = etd->ep;
653 "cnt=%d len=%d urb=%p etd=%d index=%d\n",
660 if (!etd->dma_handle)
661 memcpy_fromio(etd->cpu_buffer,
662 imx21->regs + USBOTG_DMEM + etd->dmem_offset,
670 etd->td = NULL;
671 etd->urb = NULL;
672 etd->ep = NULL;
691 ep_priv->etd[i] = -1;
706 if (ep_priv->etd[i] < 0) {
711 ep_priv->etd[i] = etd_num;
712 imx21->etd[etd_num].ep = ep_priv->ep;
718 dev_err(imx21->dev, "isoc: Couldn't allocate etd\n");
720 free_etd(imx21, ep_priv->etd[j]);
721 ep_priv->etd[j] = -1;
780 struct etd_priv *etd = &imx21->etd[ep_priv->etd[i]];
782 if (etd->dmem_size > 0 && etd->dmem_size < maxpacket) {
785 etd->dmem_size, maxpacket);
790 if (etd->dmem_size == 0) {
791 etd->dmem_offset = alloc_dmem(imx21, maxpacket, ep);
792 if (etd->dmem_offset < 0) {
797 etd->dmem_size = maxpacket;
876 int etd_num = ep_priv->etd[i];
877 if (etd_num != -1 && imx21->etd[etd_num].urb == urb) {
878 struct etd_priv *etd = imx21->etd + etd_num;
881 free_dmem(imx21, etd);
904 int etd_num = ep_priv->etd[0];
905 struct etd_priv *etd;
922 etd = &imx21->etd[etd_num];
933 etd->dma_handle = urb->setup_dma;
934 etd->cpu_buffer = urb->setup_packet;
950 etd->dma_handle = urb->transfer_dma;
951 etd->cpu_buffer = urb->transfer_buffer;
970 etd->urb = urb;
971 etd->ep = urb_priv->ep;
972 etd->len = count;
1002 etd->dma_handle = 0;
1005 etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket;
1006 etd->dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep);
1007 if (etd->dmem_offset < 0) {
1011 dev_dbg(imx21->dev, "Queuing etd %d for DMEM\n", etd_num);
1013 list_add_tail(&etd->queue, &imx21->queue_for_dmem);
1018 (((u32) etd->dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) |
1019 (u32) etd->dmem_offset);
1024 dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n",
1033 struct etd_priv *etd = &imx21->etd[etd_num];
1034 struct urb *urb = etd->urb;
1046 bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff);
1057 if (etd->bounce_buffer) {
1058 memcpy(etd->cpu_buffer, etd->bounce_buffer, bytes_xfrd);
1060 etd->dma_handle, etd->len, DMA_FROM_DEVICE);
1061 } else if (!etd->dma_handle && bytes_xfrd) {/* PIO */
1062 memcpy_fromio(etd->cpu_buffer,
1063 imx21->regs + USBOTG_DMEM + etd->dmem_offset,
1068 kfree(etd->bounce_buffer);
1069 etd->bounce_buffer = NULL;
1070 free_dmem(imx21, etd);
1128 nonisoc_urb_completed_for_etd(imx21, etd, cc_to_error[cc]);
1146 ep_priv->etd[i] = -1;
1158 struct etd_priv *etd;
1210 if (ep_priv->etd[0] < 0) {
1218 ep_priv->etd[0] = alloc_etd(imx21);
1219 if (ep_priv->etd[0] < 0) {
1230 etd = &imx21->etd[ep_priv->etd[0]];
1231 if (etd->urb == NULL) {
1232 DEBUG_LOG_FRAME(imx21, etd, last_req);
1274 int etd_num = ep_priv->etd[0];
1276 struct etd_priv *etd = &imx21->etd[etd_num];
1279 free_dmem(imx21, etd);
1280 etd->urb = NULL;
1281 kfree(etd->bounce_buffer);
1282 etd->bounce_buffer = NULL;
1312 struct etd_priv *etd = &imx21->etd[etd_num];
1316 DEBUG_LOG_FRAME(imx21, etd, last_int);
1337 if (etd->active_count && !enabled) /* suspicious... */
1340 if (!sof || enabled || !etd->active_count)
1347 if (++etd->active_count < 10)
1361 etd->activated_frame,
1362 etd->disactivated_frame,
1363 etd->last_int_frame,
1364 etd->last_req_frame,
1368 etd->active_count = 0;
1372 if (etd->ep == NULL || etd->urb == NULL) {
1374 "Interrupt for unexpected etd %d"
1376 etd_num, etd->ep, etd->urb);
1381 if (usb_pipeisoc(etd->urb->pipe))
1435 if (ep_priv->etd[i] > -1)
1436 dev_dbg(imx21->dev, "free etd %d for disable\n",
1437 ep_priv->etd[i]);
1439 free_etd(imx21, ep_priv->etd[i]);
1446 if (imx21->etd[i].alloc && imx21->etd[i].ep == ep) {
1448 "Active etd %d for disabled ep=%p!\n", i, ep);