Lines Matching defs:dma
25 #include <linux/dma-mapping.h>
155 struct s3c24xx_uart_dma *dma;
287 struct s3c24xx_uart_dma *dma = ourport->dma;
306 if (dma && dma->tx_chan && ourport->tx_in_progress == S3C24XX_TX_DMA) {
307 dmaengine_pause(dma->tx_chan);
308 dmaengine_tx_status(dma->tx_chan, dma->tx_cookie, &state);
309 dmaengine_terminate_all(dma->tx_chan);
310 dma_sync_single_for_cpu(dma->tx_chan->device->dev,
311 dma->tx_transfer_addr, dma->tx_size,
313 async_tx_ack(dma->tx_desc);
314 count = dma->tx_bytes_requested - state.residue;
334 struct s3c24xx_uart_dma *dma = ourport->dma;
339 dmaengine_tx_status(dma->tx_chan, dma->tx_cookie, &state);
340 count = dma->tx_bytes_requested - state.residue;
341 async_tx_ack(dma->tx_desc);
343 dma_sync_single_for_cpu(dma->tx_chan->device->dev,
344 dma->tx_transfer_addr, dma->tx_size,
377 /* Enable tx dma mode */
439 struct s3c24xx_uart_dma *dma = ourport->dma;
444 dma->tx_size = count & ~(dma_get_cache_alignment() - 1);
445 dma->tx_transfer_addr = dma->tx_addr + xmit->tail;
447 dma_sync_single_for_device(dma->tx_chan->device->dev,
448 dma->tx_transfer_addr, dma->tx_size,
451 dma->tx_desc = dmaengine_prep_slave_single(dma->tx_chan,
452 dma->tx_transfer_addr, dma->tx_size,
454 if (!dma->tx_desc) {
459 dma->tx_desc->callback = s3c24xx_serial_tx_dma_complete;
460 dma->tx_desc->callback_param = ourport;
461 dma->tx_bytes_requested = dma->tx_size;
464 dma->tx_cookie = dmaengine_submit(dma->tx_desc);
465 dma_async_issue_pending(dma->tx_chan);
483 if (!ourport->dma || !ourport->dma->tx_chan ||
501 if (!ourport->dma || !ourport->dma->tx_chan)
505 if (ourport->dma && ourport->dma->tx_chan) {
514 struct s3c24xx_uart_dma *dma = ourport->dma;
520 dma_sync_single_for_cpu(dma->rx_chan->device->dev, dma->rx_addr,
521 dma->rx_size, DMA_FROM_DEVICE);
529 ((unsigned char *)(ourport->dma->rx_buf)), count);
539 struct s3c24xx_uart_dma *dma = ourport->dma;
562 if (dma && dma->rx_chan) {
563 dmaengine_pause(dma->tx_chan);
564 dma_status = dmaengine_tx_status(dma->rx_chan,
565 dma->rx_cookie, &state);
568 received = dma->rx_bytes_requested - state.residue;
569 dmaengine_terminate_all(dma->rx_chan);
610 struct s3c24xx_uart_dma *dma = ourport->dma;
618 dmaengine_tx_status(dma->rx_chan, dma->rx_cookie, &state);
619 received = dma->rx_bytes_requested - state.residue;
620 async_tx_ack(dma->rx_desc);
639 struct s3c24xx_uart_dma *dma = ourport->dma;
641 dma_sync_single_for_device(dma->rx_chan->device->dev, dma->rx_addr,
642 dma->rx_size, DMA_FROM_DEVICE);
644 dma->rx_desc = dmaengine_prep_slave_single(dma->rx_chan,
645 dma->rx_addr, dma->rx_size, DMA_DEV_TO_MEM,
647 if (!dma->rx_desc) {
652 dma->rx_desc->callback = s3c24xx_serial_rx_dma_complete;
653 dma->rx_desc->callback_param = ourport;
654 dma->rx_bytes_requested = dma->rx_size;
656 dma->rx_cookie = dmaengine_submit(dma->rx_desc);
657 dma_async_issue_pending(dma->rx_chan);
717 struct s3c24xx_uart_dma *dma = ourport->dma;
735 dmaengine_pause(dma->rx_chan);
736 dmaengine_tx_status(dma->rx_chan, dma->rx_cookie, &state);
737 dmaengine_terminate_all(dma->rx_chan);
738 received = dma->rx_bytes_requested - state.residue;
863 if (ourport->dma && ourport->dma->rx_chan)
876 if (ourport->dma && ourport->dma->tx_chan &&
1051 struct s3c24xx_uart_dma *dma = p->dma;
1057 dma->rx_conf.direction = DMA_DEV_TO_MEM;
1058 dma->rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1059 dma->rx_conf.src_addr = p->port.mapbase + S3C2410_URXH;
1060 dma->rx_conf.src_maxburst = 1;
1062 dma->tx_conf.direction = DMA_MEM_TO_DEV;
1063 dma->tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1064 dma->tx_conf.dst_addr = p->port.mapbase + S3C2410_UTXH;
1065 dma->tx_conf.dst_maxburst = 1;
1067 dma->rx_chan = dma_request_chan(p->port.dev, "rx");
1069 if (IS_ERR(dma->rx_chan)) {
1071 ret = PTR_ERR(dma->rx_chan);
1075 ret = dma_get_slave_caps(dma->rx_chan, &dma_caps);
1083 dmaengine_slave_config(dma->rx_chan, &dma->rx_conf);
1085 dma->tx_chan = dma_request_chan(p->port.dev, "tx");
1086 if (IS_ERR(dma->tx_chan)) {
1088 ret = PTR_ERR(dma->tx_chan);
1092 ret = dma_get_slave_caps(dma->tx_chan, &dma_caps);
1100 dmaengine_slave_config(dma->tx_chan, &dma->tx_conf);
1103 dma->rx_size = PAGE_SIZE;
1105 dma->rx_buf = kmalloc(dma->rx_size, GFP_KERNEL);
1106 if (!dma->rx_buf) {
1111 dma->rx_addr = dma_map_single(dma->rx_chan->device->dev, dma->rx_buf,
1112 dma->rx_size, DMA_FROM_DEVICE);
1113 if (dma_mapping_error(dma->rx_chan->device->dev, dma->rx_addr)) {
1120 dma->tx_addr = dma_map_single(dma->tx_chan->device->dev,
1123 if (dma_mapping_error(dma->tx_chan->device->dev, dma->tx_addr)) {
1132 dma_unmap_single(dma->rx_chan->device->dev, dma->rx_addr,
1133 dma->rx_size, DMA_FROM_DEVICE);
1135 kfree(dma->rx_buf);
1137 dma_release_channel(dma->tx_chan);
1139 dma_release_channel(dma->rx_chan);
1148 struct s3c24xx_uart_dma *dma = p->dma;
1150 if (dma->rx_chan) {
1151 dmaengine_terminate_all(dma->rx_chan);
1152 dma_unmap_single(dma->rx_chan->device->dev, dma->rx_addr,
1153 dma->rx_size, DMA_FROM_DEVICE);
1154 kfree(dma->rx_buf);
1155 dma_release_channel(dma->rx_chan);
1156 dma->rx_chan = NULL;
1159 if (dma->tx_chan) {
1160 dmaengine_terminate_all(dma->tx_chan);
1161 dma_unmap_single(dma->tx_chan->device->dev, dma->tx_addr,
1163 dma_release_channel(dma->tx_chan);
1164 dma->tx_chan = NULL;
1185 if (ourport->dma)
1204 if (ourport->dma)
1230 if (ourport->dma)
1286 if (ourport->dma) {
1289 devm_kfree(port->dev, ourport->dma);
1290 ourport->dma = NULL;
1972 ourport->dma = devm_kzalloc(port->dev,
1973 sizeof(*ourport->dma),
1975 if (!ourport->dma) {