Lines Matching refs:dma
18 #include <linux/dma-mapping.h>
267 hw_ep->tx_channel ? "dma" : "pio");
335 struct dma_controller *dma = musb->dma_controller;
340 dma->channel_release(ep->rx_channel);
346 dma->channel_release(ep->tx_channel);
393 * ignore dma (various models),
572 static void musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
611 static void musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
630 static bool musb_tx_dma_program(struct dma_controller *dma,
639 musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, offset,
642 musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb, offset,
655 if (!dma->channel_program(channel, pkt_size, mode,
660 dma->channel_release(channel);
908 struct dma_channel *dma;
918 dma = is_dma_capable() ? ep->rx_channel : NULL;
934 dma = is_dma_capable() ? ep->tx_channel : NULL;
946 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
947 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
948 musb->dma_controller->channel_abort(dma);
949 urb->actual_length += dma->actual_len;
950 dma->actual_len = 0L;
1200 /* Service a Tx-Available or dma completion irq for the endpoint */
1214 struct dma_channel *dma;
1227 dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1230 dma ? ", dma" : "");
1234 /* dma was disabled, fifo flushed */
1241 /* (NON-ISO) dma was disabled, fifo flushed */
1255 * that could use this fifo. (dma complicates it...)
1271 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1272 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1273 musb->dma_controller->channel_abort(dma);
1277 * usb core; the dma engine should already be stopped.
1297 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1302 if (is_dma_capable() && dma && !status) {
1364 if (!status || dma || usb_pipeisoc(pipe)) {
1365 if (dma)
1366 length = dma->actual_len;
1384 } else if (dma && urb->transfer_buffer_length == qh->offset) {
1417 } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
1470 static int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1491 return dma->channel_program(channel, qh->maxpacket, 0,
1495 static inline int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1534 * (and that endpoint's dma queue stops immediately)
1541 static int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
1561 /* even if there was an error, we did the dma
1572 done = musb_rx_dma_iso_cppi41(dma, hw_ep, qh,
1611 static int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
1693 done = dma->channel_program(channel, qh->maxpacket,
1698 dma->channel_release(channel);
1711 static inline int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
1720 static inline int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
1748 struct dma_channel *dma;
1754 dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1835 /* clean up dma and collect transfer count */
1836 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1837 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1838 musb->dma_controller->channel_abort(dma);
1839 xfer_len = dma->actual_len;
1847 if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1849 ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1863 * and also duplicates dma cleanup code above ... plus,
1866 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1867 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1868 musb->dma_controller->channel_abort(dma);
1869 xfer_len = dma->actual_len;
1874 xfer_len, dma ? ", dma" : "");
1882 if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1883 xfer_len = dma->actual_len;
1895 "ep %d dma %s, rxcsr %04x, rxcount %d",
1921 musb_dma_cppi41(musb)) && dma) {
1937 if (!dma) {
2275 * until we get real dma queues (with an entry for each urb/buffer),
2324 struct dma_channel *dma = NULL;
2329 dma = is_in ? ep->rx_channel : ep->tx_channel;
2330 if (dma) {
2331 status = ep->musb->dma_controller->channel_abort(dma);
2335 urb->actual_length += dma->actual_len;
2345 if (is_dma_capable() && dma)