Lines Matching refs:pep

75 			     struct cdns2_endpoint *pep)
79 dma_index = readl(&pdev->adma_regs->ep_traddr) - pep->ring.dma;
99 dma_addr_t cdns2_trb_virt_to_dma(struct cdns2_endpoint *pep,
102 u32 offset = (char *)trb - (char *)pep->ring.trbs;
104 return pep->ring.dma + offset;
107 static void cdns2_free_tr_segment(struct cdns2_endpoint *pep)
109 struct cdns2_device *pdev = pep->pdev;
110 struct cdns2_ring *ring = &pep->ring;
112 if (pep->ring.trbs) {
119 static int cdns2_alloc_tr_segment(struct cdns2_endpoint *pep)
121 struct cdns2_device *pdev = pep->pdev;
125 ring = &pep->ring;
137 if (!pep->num)
153 static void cdns2_ep_stall_flush(struct cdns2_endpoint *pep)
155 struct cdns2_device *pdev = pep->pdev;
158 trace_cdns2_ep_halt(pep, 1, 1);
165 pep->ep_state |= EP_STALLED;
166 pep->ep_state &= ~EP_STALL_PENDING;
227 static struct cdns2_trb *cdns2_next_trb(struct cdns2_endpoint *pep,
230 if (trb == (pep->ring.trbs + (TRBS_PER_SEGMENT - 1)))
231 return pep->ring.trbs;
236 void cdns2_gadget_giveback(struct cdns2_endpoint *pep,
241 struct cdns2_device *pdev = pep->pdev;
248 usb_gadget_unmap_request_by_dev(pdev->dev, request, pep->dir);
257 usb_gadget_giveback_request(&pep->endpoint, request);
262 cdns2_gadget_ep_free_request(&pep->endpoint, request);
265 static void cdns2_wa1_restore_cycle_bit(struct cdns2_endpoint *pep)
268 if (pep->wa1_set) {
269 trace_cdns2_wa1(pep, "restore cycle bit");
271 pep->wa1_set = 0;
272 pep->wa1_trb_index = 0xFFFF;
273 if (pep->wa1_cycle_bit)
274 pep->wa1_trb->control |= cpu_to_le32(0x1);
276 pep->wa1_trb->control &= cpu_to_le32(~0x1);
280 static int cdns2_wa1_update_guard(struct cdns2_endpoint *pep,
283 struct cdns2_device *pdev = pep->pdev;
285 if (!pep->wa1_set) {
291 pep->wa1_cycle_bit = pep->ring.pcs ? TRB_CYCLE : 0;
292 pep->wa1_set = 1;
293 pep->wa1_trb = trb;
294 pep->wa1_trb_index = pep->ring.enqueue;
295 trace_cdns2_wa1(pep, "set guard");
303 struct cdns2_endpoint *pep)
309 dma_index = cdns2_get_dma_pos(pdev, pep);
311 if (!doorbell || dma_index != pep->wa1_trb_index)
312 cdns2_wa1_restore_cycle_bit(pep);
316 struct cdns2_endpoint *pep,
324 ring = &pep->ring;
327 pep->ep_state |= EP_RING_FULL;
334 dma_index = cdns2_get_dma_pos(pdev, pep);
338 pep->ep_state |= EP_DEFERRED_DRDY;
353 if (pep->type == USB_ENDPOINT_XFER_ISOC || TRBS_PER_SEGMENT > 2)
363 static void cdns2_dbg_request_trbs(struct cdns2_endpoint *pep,
366 struct cdns2_trb *link_trb = pep->ring.trbs + (TRBS_PER_SEGMENT - 1);
372 trace_cdns2_queue_trb(pep, trb + i);
374 trb = pep->ring.trbs;
383 static unsigned int cdns2_count_trbs(struct cdns2_endpoint *pep,
388 if (pep->type == USB_ENDPOINT_XFER_ISOC) {
398 if (pep->interval > 1)
399 num_trbs = pep->dir ? num_trbs * pep->interval : 1;
400 } else if (pep->dir) {
412 static unsigned int cdns2_count_sg_trbs(struct cdns2_endpoint *pep,
423 num_trbs += cdns2_count_trbs(pep, sg_dma_address(sg), len);
435 if (pep->type == USB_ENDPOINT_XFER_ISOC) {
498 static void cdns2_ep_tx_isoc(struct cdns2_endpoint *pep,
523 num_tds = pep->dir ? pep->interval : 1;
564 if (enqd_len + trb_buff_len >= full_len || !pep->dir)
574 if (pep->ring.pcs == 0)
577 control |= pep->ring.pcs;
584 TRB_BURST(pep->pdev->burst_opt[trb_buff_len]);
586 trb = pep->ring.trbs + pep->ring.enqueue;
607 preq->end_trb = pep->ring.enqueue;
609 cdns2_ep_inc_enq(&pep->ring);
614 static void cdns2_ep_tx_bulk(struct cdns2_endpoint *pep,
635 ring = &pep->ring;
639 trb = pep->ring.trbs + ring->enqueue;
641 if (pep->dir && sg_iter == trbs_per_td - 1) {
645 cdns2_ep_inc_enq(&pep->ring);
651 trb->buffer = cpu_to_le32(pep->ring.dma +
667 if (sg_iter == (trbs_per_td - (pep->dir ? 2 : 1)))
673 trb->length = cpu_to_le32(TRB_BURST(pep->trb_burst_size) |
684 cdns2_ep_inc_enq(&pep->ring);
689 struct cdns2_endpoint *pep)
691 trace_cdns2_ring(pep);
708 trace_cdns2_doorbell_epx(pep, readl(&pdev->adma_regs->ep_traddr));
712 struct cdns2_endpoint *pep)
721 if (!pep->dir) {
723 writel(pep->ring.dma + pep->ring.dequeue,
732 buffer = pep->ring.dma + pep->ring.dequeue * TRB_SIZE;
735 trb = &pep->ring.trbs[TRBS_PER_SEGMENT];
750 if (hw_ccs != pep->ring.ccs)
754 writel(pep->ring.dma + (TRBS_PER_SEGMENT * TRB_SIZE),
761 static int cdns2_ep_run_transfer(struct cdns2_endpoint *pep,
764 struct cdns2_device *pdev = pep->pdev;
770 cdns2_select_ep(pdev, pep->endpoint.address);
773 num_trbs = cdns2_count_sg_trbs(pep, &preq->request);
775 num_trbs = cdns2_count_trbs(pep, preq->request.dma,
778 ret = cdns2_prepare_ring(pdev, pep, num_trbs);
782 ring = &pep->ring;
786 if (usb_endpoint_xfer_isoc(pep->endpoint.desc)) {
787 cdns2_ep_tx_isoc(pep, preq, num_trbs);
789 togle_pcs = cdns2_wa1_update_guard(pep, ring->trbs + ring->enqueue);
790 cdns2_ep_tx_bulk(pep, preq, num_trbs);
804 cdns2_wa1_tray_restore_cycle_bit(pdev, pep);
805 cdns2_dbg_request_trbs(pep, preq);
807 if (!pep->wa1_set && !(pep->ep_state & EP_STALLED) && !pep->skip) {
808 if (pep->type == USB_ENDPOINT_XFER_ISOC) {
809 ret = cdns2_prepare_first_isoc_transfer(pdev, pep);
814 cdns2_set_drdy(pdev, pep);
822 struct cdns2_endpoint *pep)
827 while (!list_empty(&pep->deferred_list)) {
828 preq = cdns2_next_preq(&pep->deferred_list);
830 ret = cdns2_ep_run_transfer(pep, preq);
834 list_move_tail(&preq->list, &pep->pending_list);
837 pep->ep_state &= ~EP_RING_FULL;
858 * Then, we check if cycle bit for index pep->dequeue
878 static bool cdns2_trb_handled(struct cdns2_endpoint *pep,
881 struct cdns2_device *pdev = pep->pdev;
888 ring = &pep->ring;
889 current_index = cdns2_get_dma_pos(pdev, pep);
925 if (TRBS_PER_SEGMENT == 2 && pep->type != USB_ENDPOINT_XFER_ISOC) {
950 struct cdns2_endpoint *pep,
956 trb = pep->ring.trbs + pep->ring.dequeue;
960 trace_cdns2_complete_trb(pep, trb);
961 cdns2_ep_inc_deq(&pep->ring);
962 trb = cdns2_next_trb(pep, trb);
965 cdns2_gadget_giveback(pep, preq, 0);
966 cdns2_prepare_first_isoc_transfer(pdev, pep);
967 pep->skip = false;
968 cdns2_set_drdy(pdev, pep);
972 struct cdns2_endpoint *pep)
978 while (!list_empty(&pep->pending_list)) {
979 preq = cdns2_next_preq(&pep->pending_list);
980 trb = pep->ring.trbs + pep->ring.dequeue;
988 trace_cdns2_complete_trb(pep, trb);
989 cdns2_ep_inc_deq(&pep->ring);
990 trb = pep->ring.trbs + pep->ring.dequeue;
997 cdns2_select_ep(pdev, pep->endpoint.address);
999 while (cdns2_trb_handled(pep, preq)) {
1005 trb = pep->ring.trbs + pep->ring.dequeue;
1006 trace_cdns2_complete_trb(pep, trb);
1008 if (pep->dir && pep->type == USB_ENDPOINT_XFER_ISOC)
1018 cdns2_ep_inc_deq(&pep->ring);
1022 cdns2_gadget_giveback(pep, preq, 0);
1028 if (pep->type != USB_ENDPOINT_XFER_ISOC &&
1034 if (pep->skip && preq)
1035 cdns2_skip_isoc_td(pdev, pep, preq);
1037 if (!(pep->ep_state & EP_STALLED) &&
1038 !(pep->ep_state & EP_STALL_PENDING))
1039 cdns2_start_all_request(pdev, pep);
1051 static void cdns2_rearm_transfer(struct cdns2_endpoint *pep, u8 rearm)
1053 struct cdns2_device *pdev = pep->pdev;
1055 cdns2_wa1_restore_cycle_bit(pep);
1058 trace_cdns2_ring(pep);
1067 trace_cdns2_doorbell_epx(pep,
1072 static void cdns2_handle_epx_interrupt(struct cdns2_endpoint *pep)
1074 struct cdns2_device *pdev = pep->pdev;
1079 cdns2_select_ep(pdev, pep->endpoint.address);
1081 trace_cdns2_epx_irq(pdev, pep);
1086 if (pep->type == USB_ENDPOINT_XFER_ISOC) {
1090 mult = USB_EP_MAXP_MULT(pep->endpoint.desc->wMaxPacketSize);
1091 cs = pep->dir ? readb(&pdev->epx_regs->ep[pep->num - 1].txcs) :
1092 readb(&pdev->epx_regs->ep[pep->num - 1].rxcs);
1109 writel(DMA_EP_CMD_DFLUSH, &pep->pdev->adma_regs->ep_cmd);
1112 readl_poll_timeout_atomic(&pep->pdev->adma_regs->ep_cmd, val,
1115 pep->skip = true;
1118 if (ep_sts_reg & DMA_EP_STS_TRBERR || pep->skip) {
1119 if (pep->ep_state & EP_STALL_PENDING &&
1121 cdns2_ep_stall_flush(pep);
1130 if (pep->type == USB_ENDPOINT_XFER_ISOC && !pep->wa1_set) {
1131 if (!pep->dir)
1135 cdns2_transfer_completed(pdev, pep);
1136 if (pep->ep_state & EP_DEFERRED_DRDY) {
1137 pep->ep_state &= ~EP_DEFERRED_DRDY;
1138 cdns2_set_drdy(pdev, pep);
1144 cdns2_transfer_completed(pdev, pep);
1146 if (!(pep->ep_state & EP_STALLED) &&
1147 !(pep->ep_state & EP_STALL_PENDING)) {
1148 if (pep->ep_state & EP_DEFERRED_DRDY) {
1149 pep->ep_state &= ~EP_DEFERRED_DRDY;
1150 cdns2_start_all_request(pdev, pep);
1152 cdns2_rearm_transfer(pep, pep->wa1_set);
1160 cdns2_transfer_completed(pdev, pep);
1342 struct cdns2_endpoint *pep;
1351 pep = &pdev->eps[i];
1353 if (!(pep->ep_state & EP_CLAIMED))
1356 if (pep->dir)
1357 min_buf_tx += pep->buffering;
1359 min_buf_rx += pep->buffering;
1363 pep = &pdev->eps[i];
1365 if (!(pep->ep_state & EP_CLAIMED))
1368 if (pep->dir) {
1371 if (free + pep->buffering >= 4)
1374 free = free + pep->buffering;
1376 min_buf_tx = min_buf_tx - pep->buffering + free;
1378 pep->buffering = free;
1381 &pdev->epx_regs->txstaddr[pep->num - 1]);
1382 pdev->epx_regs->txstaddr[pep->num - 1] = tx_offset;
1385 pep->name, tx_offset, pep->buffering);
1387 tx_offset += pep->buffering * 1024;
1391 if (free + pep->buffering >= 4)
1394 free = free + pep->buffering;
1396 min_buf_rx = min_buf_rx - pep->buffering + free;
1398 pep->buffering = free;
1400 &pdev->epx_regs->rxstaddr[pep->num - 1]);
1403 pep->name, rx_offset, pep->buffering);
1405 rx_offset += pep->buffering * 1024;
1411 static int cdns2_ep_config(struct cdns2_endpoint *pep, bool enable)
1413 bool is_iso_ep = (pep->type == USB_ENDPOINT_XFER_ISOC);
1414 struct cdns2_device *pdev = pep->pdev;
1422 switch (pep->type) {
1430 mult = USB_EP_MAXP_MULT(pep->endpoint.desc->wMaxPacketSize);
1434 if (pep->dir) {
1435 set_reg_bit_8(&pdev->epx_regs->isoautoarm, BIT(pep->num));
1436 set_reg_bit_8(&pdev->epx_regs->isoautodump, BIT(pep->num));
1437 set_reg_bit_8(&pdev->epx_regs->isodctrl, BIT(pep->num));
1453 ep_cfg |= (EPX_CON_VAL | (pep->buffering - 1));
1455 if (pep->dir) {
1457 writew(max_packet_size, &pdev->epx_regs->txmaxpack[pep->num - 1]);
1458 writeb(ep_cfg, &pdev->epx_regs->ep[pep->num - 1].txcon);
1460 writew(max_packet_size, &pdev->epx_regs->rxmaxpack[pep->num - 1]);
1461 writeb(ep_cfg, &pdev->epx_regs->ep[pep->num - 1].rxcon);
1464 writeb(pep->num | dir | FIFOCTRL_FIFOAUTO,
1466 writeb(pep->num | dir, &pdev->epx_regs->endprst);
1467 writeb(pep->num | ENDPRST_FIFORST | ENDPRST_TOGRST | dir,
1471 pep->trb_burst_size = 128;
1473 pep->trb_burst_size = 64;
1475 pep->trb_burst_size = 16;
1477 cdns2_select_ep(pdev, pep->num | pep->dir);
1493 trace_cdns2_epx_hw_cfg(pdev, pep);
1496 pep->name, max_packet_size, ep_cfg);
1504 struct cdns2_endpoint *pep = ep_to_cdns2_ep(ep);
1511 preq->pep = pep;
1531 struct cdns2_endpoint *pep;
1542 pep = ep_to_cdns2_ep(ep);
1543 pdev = pep->pdev;
1545 if (dev_WARN_ONCE(pdev->dev, pep->ep_state & EP_ENABLED,
1546 "%s is already enabled\n", pep->name))
1551 pep->type = usb_endpoint_type(desc);
1552 pep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
1555 if (pep->type == USB_ENDPOINT_XFER_INT)
1556 pep->interval = desc->bInterval;
1558 if (pep->interval > ISO_MAX_INTERVAL &&
1559 pep->type == USB_ENDPOINT_XFER_ISOC) {
1561 ISO_MAX_INTERVAL, pep->interval);
1575 if (pep->type == USB_ENDPOINT_XFER_ISOC && !pep->dir)
1578 ret = cdns2_alloc_tr_segment(pep);
1582 ret = cdns2_ep_config(pep, enable);
1584 cdns2_free_tr_segment(pep);
1589 trace_cdns2_gadget_ep_enable(pep);
1591 pep->ep_state &= ~(EP_STALLED | EP_STALL_PENDING);
1592 pep->ep_state |= EP_ENABLED;
1593 pep->wa1_set = 0;
1594 pep->ring.enqueue = 0;
1595 pep->ring.dequeue = 0;
1597 pep->ring.pcs = !!DMA_EP_STS_CCS(reg);
1598 pep->ring.ccs = !!DMA_EP_STS_CCS(reg);
1600 writel(pep->ring.dma, &pdev->adma_regs->ep_traddr);
1603 pep->ring.free_trbs = TRBS_PER_SEGMENT - 1;
1613 struct cdns2_endpoint *pep;
1622 pep = ep_to_cdns2_ep(ep);
1623 pdev = pep->pdev;
1625 if (dev_WARN_ONCE(pdev->dev, !(pep->ep_state & EP_ENABLED),
1626 "%s is already disabled\n", pep->name))
1631 trace_cdns2_gadget_ep_disable(pep);
1650 while (!list_empty(&pep->pending_list)) {
1651 preq = cdns2_next_preq(&pep->pending_list);
1652 cdns2_gadget_giveback(pep, preq, -ESHUTDOWN);
1655 while (!list_empty(&pep->deferred_list)) {
1656 preq = cdns2_next_preq(&pep->deferred_list);
1657 cdns2_gadget_giveback(pep, preq, -ESHUTDOWN);
1661 pep->ep_state &= ~EP_ENABLED;
1668 static int cdns2_ep_enqueue(struct cdns2_endpoint *pep,
1672 struct cdns2_device *pdev = pep->pdev;
1680 ret = usb_gadget_map_request_by_dev(pdev->dev, request, pep->dir);
1686 list_add_tail(&preq->list, &pep->deferred_list);
1689 if (!(pep->ep_state & EP_STALLED) && !(pep->ep_state & EP_STALL_PENDING))
1690 cdns2_start_all_request(pdev, pep);
1700 struct cdns2_endpoint *pep;
1708 pep = ep_to_cdns2_ep(ep);
1709 pdev = pep->pdev;
1711 if (!(pep->ep_state & EP_ENABLED)) {
1713 pep->name);
1720 ret = cdns2_ep_enqueue(pep, preq, gfp_flags);
1731 ret = cdns2_ep_enqueue(pep, preq, gfp_flags);
1742 struct cdns2_endpoint *pep;
1752 pep = ep_to_cdns2_ep(ep);
1753 if (!pep->endpoint.desc) {
1754 dev_err(pep->pdev->dev, "%s: can't dequeue to disabled endpoint\n",
1755 pep->name);
1760 if (!(pep->ep_state & EP_ENABLED))
1763 spin_lock_irqsave(&pep->pdev->lock, flags);
1768 list_for_each_entry_safe(preq, preq_temp, &pep->pending_list, list) {
1775 list_for_each_entry_safe(preq, preq_temp, &pep->deferred_list, list) {
1788 writel(DMA_EP_CMD_DFLUSH, &pep->pdev->adma_regs->ep_cmd);
1791 readl_poll_timeout_atomic(&pep->pdev->adma_regs->ep_cmd, val,
1794 buffer = cpu_to_le32(TRB_BUFFER(pep->ring.dma +
1803 trace_cdns2_queue_trb(pep, link_trb);
1804 link_trb = cdns2_next_trb(pep, link_trb);
1807 if (pep->wa1_trb == preq->trb)
1808 cdns2_wa1_restore_cycle_bit(pep);
1811 cdns2_gadget_giveback(pep, cur_preq, -ECONNRESET);
1813 preq = cdns2_next_preq(&pep->pending_list);
1815 cdns2_rearm_transfer(pep, 1);
1818 spin_unlock_irqrestore(&pep->pdev->lock, flags);
1823 struct cdns2_endpoint *pep,
1829 if (!(pep->ep_state & EP_ENABLED))
1832 if (pep->dir) {
1834 conf = &pdev->epx_regs->ep[pep->num - 1].txcon;
1836 conf = &pdev->epx_regs->ep[pep->num - 1].rxcon;
1844 preq = cdns2_next_preq(&pep->pending_list);
1853 trace_cdns2_ep_halt(pep, 0, 0);
1856 writeb(dir | pep->num, &pdev->epx_regs->endprst);
1857 writeb(dir | ENDPRST_TOGRST | pep->num,
1862 pep->ep_state &= ~(EP_STALLED | EP_STALL_PENDING);
1868 cdns2_rearm_transfer(pep, 1);
1871 cdns2_start_all_request(pdev, pep);
1873 trace_cdns2_ep_halt(pep, 1, 0);
1875 writeb(dir | pep->num, &pdev->epx_regs->endprst);
1876 writeb(dir | ENDPRST_FIFORST | pep->num,
1878 pep->ep_state |= EP_STALLED;
1887 struct cdns2_endpoint *pep = ep_to_cdns2_ep(ep);
1888 struct cdns2_device *pdev = pep->pdev;
1895 preq = cdns2_next_preq(&pep->pending_list);
1897 trace_cdns2_ep_busy_try_halt_again(pep);
1903 pep->ep_state &= ~EP_WEDGE;
1905 ret = cdns2_halt_endpoint(pdev, pep, value);
1914 struct cdns2_endpoint *pep = ep_to_cdns2_ep(ep);
1917 pep->ep_state |= EP_WEDGE;
1926 struct cdns2_endpoint *pep;
1939 pep = ep_to_cdns2_ep(ep);
1941 if (pep->num != num)
1944 ep_correct = (pep->endpoint.caps.dir_in &&
1946 (pep->endpoint.caps.dir_out &&
1949 if (ep_correct && !(pep->ep_state & EP_CLAIMED))
1950 return pep;
1966 struct cdns2_endpoint *pep;
1969 pep = cdns2_find_available_ep(pdev, desc);
1970 if (IS_ERR(pep)) {
1978 pep->buffering = 4;
1980 pep->buffering = 1;
1982 pep->ep_state |= EP_CLAIMED;
1985 return &pep->endpoint;
2130 struct cdns2_endpoint *pep;
2139 pep = ep_to_cdns2_ep(ep);
2140 bEndpointAddress = pep->num | pep->dir;
2178 struct cdns2_endpoint *pep;
2192 pep = &pdev->eps[i];
2193 pep->pdev = pdev;
2194 pep->num = epnum;
2196 pep->dir = direction ? USB_DIR_IN : USB_DIR_OUT;
2197 pep->idx = i;
2203 snprintf(pep->name, sizeof(pep->name), "ep%d%s",
2206 cdns2_init_ep0(pdev, pep);
2208 ret = cdns2_alloc_tr_segment(pep);
2214 snprintf(pep->name, sizeof(pep->name), "ep%d%s",
2216 pep->endpoint.name = pep->name;
2218 usb_ep_set_maxpacket_limit(&pep->endpoint, 1024);
2219 pep->endpoint.ops = &cdns2_gadget_ep_ops;
2220 list_add_tail(&pep->endpoint.ep_list, &pdev->gadget.ep_list);
2222 pep->endpoint.caps.dir_in = direction;
2223 pep->endpoint.caps.dir_out = !direction;
2225 pep->endpoint.caps.type_iso = 1;
2226 pep->endpoint.caps.type_bulk = 1;
2227 pep->endpoint.caps.type_int = 1;
2230 pep->endpoint.name = pep->name;
2231 pep->ep_state = 0;
2235 pep->name,
2236 (pep->endpoint.caps.type_control) ? "yes" : "no",
2237 (pep->endpoint.caps.type_int) ? "yes" : "no",
2238 (pep->endpoint.caps.type_bulk) ? "yes" : "no",
2239 (pep->endpoint.caps.type_iso) ? "yes" : "no",
2240 (pep->endpoint.caps.dir_in) ? "yes" : "no",
2241 (pep->endpoint.caps.dir_out) ? "yes" : "no");
2243 INIT_LIST_HEAD(&pep->pending_list);
2244 INIT_LIST_HEAD(&pep->deferred_list);