Lines Matching defs:hw_ep
203 struct musb_hw_ep *hw_ep = qh->hw_ep;
204 int epnum = hw_ep->epnum;
234 musb_ep_set_qh(hw_ep, is_in, qh);
267 hw_ep->tx_channel ? "dma" : "pio");
269 if (!hw_ep->tx_channel)
270 musb_h_tx_start(hw_ep);
272 musb_h_tx_dma_start(hw_ep);
297 struct musb_hw_ep *hw_ep, int is_in)
299 struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in);
300 struct musb_hw_ep *ep = qh->hw_ep;
328 qh = musb_ep_get_qh(hw_ep, is_in);
385 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
390 static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
402 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
403 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
406 return musb_readw(hw_ep->regs, MUSB_RXCSR);
421 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
422 void __iomem *epio = hw_ep->regs;
423 struct musb_qh *qh = hw_ep->in_qh;
488 musb_read_fifo(hw_ep, length, buf);
493 musb_h_flush_rxfifo(hw_ep, csr);
573 struct musb_hw_ep *hw_ep, struct musb_qh *qh,
577 struct dma_channel *channel = hw_ep->tx_channel;
578 void __iomem *epio = hw_ep->regs;
600 can_bulk_split(hw_ep->musb, qh->type)))
612 struct musb_hw_ep *hw_ep,
619 struct dma_channel *channel = hw_ep->tx_channel;
631 struct musb_hw_ep *hw_ep, struct musb_qh *qh,
634 struct dma_channel *channel = hw_ep->tx_channel;
638 if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb))
639 musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, offset,
641 else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb))
642 musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb, offset,
657 void __iomem *epio = hw_ep->regs;
661 hw_ep->tx_channel = NULL;
683 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
684 void __iomem *epio = hw_ep->regs;
685 struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
705 hw_ep->tx_channel = NULL;
711 dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
714 dma_controller, hw_ep, is_out);
716 hw_ep->tx_channel = dma_channel;
718 hw_ep->rx_channel = dma_channel;
745 if (!hw_ep->tx_double_buffered)
746 musb_h_tx_flush_fifo(hw_ep);
763 if (!hw_ep->tx_double_buffered)
773 musb_h_ep0_flush_fifo(hw_ep);
789 qh->hb_mult = hw_ep->max_packet_sz_tx
807 load_count = min((u32) hw_ep->max_packet_sz_tx,
813 hw_ep, qh, urb, offset, len))
834 musb_write_fifo(hw_ep, load_count, buf);
838 musb_write_fifo(hw_ep, load_count, buf);
848 if (hw_ep->rx_reinit) {
856 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
862 hw_ep->epnum, csr);
876 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
877 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
890 hw_ep->rx_channel = dma_channel = NULL;
897 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
898 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
989 struct musb_hw_ep *hw_ep = musb->control_ep;
990 struct musb_qh *qh = hw_ep->in_qh;
1001 musb_read_fifo(hw_ep, fifo_count, fifo_dest);
1040 musb_write_fifo(hw_ep, fifo_count, fifo_dest);
1066 struct musb_hw_ep *hw_ep = musb->control_ep;
1067 void __iomem *epio = hw_ep->regs;
1068 struct musb_qh *qh = hw_ep->in_qh;
1128 musb_h_ep0_flush_fifo(hw_ep);
1142 musb_h_ep0_flush_fifo(hw_ep);
1178 musb_advance_schedule(musb, urb, hw_ep, 1);
1208 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1209 void __iomem *epio = hw_ep->regs;
1210 struct musb_qh *qh = hw_ep->out_qh;
1227 dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1250 musb_bulk_nak_timeout(musb, hw_ep, 0);
1279 musb_h_tx_flush_fifo(hw_ep);
1415 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1418 if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1421 musb_h_tx_dma_start(hw_ep);
1432 * REVISIT: some docs say that when hw_ep->tx_double_buffered,
1454 musb_write_fifo(hw_ep, length, qh->sg_miter.addr);
1458 musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
1471 struct musb_hw_ep *hw_ep,
1476 struct dma_channel *channel = hw_ep->rx_channel;
1477 void __iomem *epio = hw_ep->regs;
1489 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1496 struct musb_hw_ep *hw_ep,
1542 struct musb_hw_ep *hw_ep,
1547 struct dma_channel *channel = hw_ep->rx_channel;
1548 void __iomem *epio = hw_ep->regs;
1571 if (musb_dma_cppi41(hw_ep->musb))
1572 done = musb_rx_dma_iso_cppi41(dma, hw_ep, qh,
1612 struct musb_hw_ep *hw_ep,
1618 struct musb *musb = hw_ep->musb;
1619 void __iomem *epio = hw_ep->regs;
1620 struct dma_channel *channel = hw_ep->rx_channel;
1665 if (rx_count < hw_ep->max_packet_sz_rx) {
1699 hw_ep->rx_channel = NULL;
1712 struct musb_hw_ep *hw_ep,
1721 struct musb_hw_ep *hw_ep,
1738 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1740 void __iomem *epio = hw_ep->regs;
1741 struct musb_qh *qh = hw_ep->in_qh;
1754 dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1768 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1813 musb_bulk_nak_timeout(musb, hw_ep, 1);
1841 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1889 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1893 done = musb_rx_dma_inventra_cppi41(c, hw_ep, qh, urb, xfer_len);
1894 musb_dbg(hw_ep->musb,
1922 musb_dbg(hw_ep->musb,
1930 if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb,
1990 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
2007 struct musb_hw_ep *hw_ep = NULL;
2016 hw_ep = musb->control_ep;
2032 for (epnum = 1, hw_ep = musb->endpoints + 1;
2034 epnum++, hw_ep++) {
2037 if (musb_ep_get_qh(hw_ep, is_in) != NULL)
2040 if (hw_ep == musb->bulk_ep)
2044 diff = hw_ep->max_packet_sz_rx;
2046 diff = hw_ep->max_packet_sz_tx;
2063 hw_ep = musb->endpoints + epnum;
2065 txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
2077 hw_ep = musb->bulk_ep;
2104 hw_ep = musb->endpoints + best_end;
2112 qh->hw_ep = hw_ep;
2158 * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
2316 struct musb_hw_ep *ep = qh->hw_ep;
2402 || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
2413 musb_ep_set_qh(qh->hw_ep, is_in, NULL);
2445 if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
2456 * queue on hw_ep (e.g. bulk ring) when we're done.
2461 musb_advance_schedule(musb, urb, qh->hw_ep, is_in);