Lines Matching defs:tdc

177 typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
241 static inline void tdc_write(struct tegra_dma_channel *tdc,
244 writel(val, tdc->chan_addr + reg);
247 static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
249 return readl(tdc->chan_addr + reg);
263 static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
265 return &tdc->dma_chan.dev->device;
271 static struct tegra_dma_desc *tegra_dma_desc_get(struct tegra_dma_channel *tdc)
276 spin_lock_irqsave(&tdc->lock, flags);
279 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
282 spin_unlock_irqrestore(&tdc->lock, flags);
288 spin_unlock_irqrestore(&tdc->lock, flags);
295 dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
302 static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
307 spin_lock_irqsave(&tdc->lock, flags);
309 list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req);
310 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
311 spin_unlock_irqrestore(&tdc->lock, flags);
315 tegra_dma_sg_req_get(struct tegra_dma_channel *tdc)
320 spin_lock_irqsave(&tdc->lock, flags);
321 if (!list_empty(&tdc->free_sg_req)) {
322 sg_req = list_first_entry(&tdc->free_sg_req, typeof(*sg_req),
325 spin_unlock_irqrestore(&tdc->lock, flags);
328 spin_unlock_irqrestore(&tdc->lock, flags);
338 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
340 if (!list_empty(&tdc->pending_sg_req)) {
341 dev_err(tdc2dev(tdc), "Configuration not allowed\n");
345 memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
346 if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID &&
350 tdc->slave_id = sconfig->slave_id;
352 tdc->config_init = true;
357 static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
360 struct tegra_dma *tdma = tdc->tdma;
364 if (tdc->tdma->global_pause_count == 0) {
370 tdc->tdma->global_pause_count++;
375 static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
377 struct tegra_dma *tdma = tdc->tdma;
381 if (WARN_ON(tdc->tdma->global_pause_count == 0))
384 if (--tdc->tdma->global_pause_count == 0)
392 static void tegra_dma_pause(struct tegra_dma_channel *tdc,
395 struct tegra_dma *tdma = tdc->tdma;
398 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
403 tegra_dma_global_pause(tdc, wait_for_burst_complete);
407 static void tegra_dma_resume(struct tegra_dma_channel *tdc)
409 struct tegra_dma *tdma = tdc->tdma;
412 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
414 tegra_dma_global_resume(tdc);
417 static void tegra_dma_stop(struct tegra_dma_channel *tdc)
422 csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
424 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
428 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
431 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
433 dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
434 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
436 tdc->busy = false;
439 static void tegra_dma_start(struct tegra_dma_channel *tdc,
444 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr);
445 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq);
446 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
447 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
448 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
449 if (tdc->tdma->chip_data->support_separate_wcount_reg)
450 tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount);
453 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
457 static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
473 tegra_dma_pause(tdc, false);
474 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
481 dev_err(tdc2dev(tdc),
483 tegra_dma_resume(tdc);
488 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
489 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
490 if (tdc->tdma->chip_data->support_separate_wcount_reg)
491 tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
493 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
498 tegra_dma_resume(tdc);
501 static void tdc_start_head_req(struct tegra_dma_channel *tdc)
505 sg_req = list_first_entry(&tdc->pending_sg_req, typeof(*sg_req), node);
506 tegra_dma_start(tdc, sg_req);
509 tdc->busy = true;
512 static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
516 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
517 if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
520 tegra_dma_configure_for_next(tdc, hnsgreq);
525 get_current_xferred_count(struct tegra_dma_channel *tdc,
532 static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
537 while (!list_empty(&tdc->pending_sg_req)) {
538 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq),
540 list_move_tail(&sgreq->node, &tdc->free_sg_req);
544 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
549 &tdc->cb_desc);
553 tdc->isr_handler = NULL;
556 static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
566 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
568 tegra_dma_stop(tdc);
569 pm_runtime_put(tdc->tdma->dev);
570 dev_err(tdc2dev(tdc), "DMA transfer underflow, aborting DMA\n");
571 tegra_dma_abort_all(tdc);
577 tdc_configure_next_head_desc(tdc);
582 static void handle_once_dma_done(struct tegra_dma_channel *tdc,
588 tdc->busy = false;
589 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
598 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
600 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
602 list_add_tail(&sgreq->node, &tdc->free_sg_req);
608 if (list_empty(&tdc->pending_sg_req)) {
609 pm_runtime_put(tdc->tdma->dev);
613 tdc_start_head_req(tdc);
616 static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
623 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
632 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
638 if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
639 list_move_tail(&sgreq->node, &tdc->pending_sg_req);
641 st = handle_continuous_head_request(tdc, to_terminate);
649 struct tegra_dma_channel *tdc = from_tasklet(tdc, t, tasklet);
655 spin_lock_irqsave(&tdc->lock, flags);
656 while (!list_empty(&tdc->cb_desc)) {
657 dma_desc = list_first_entry(&tdc->cb_desc, typeof(*dma_desc),
663 trace_tegra_dma_complete_cb(&tdc->dma_chan, cb_count,
665 spin_unlock_irqrestore(&tdc->lock, flags);
668 spin_lock_irqsave(&tdc->lock, flags);
670 spin_unlock_irqrestore(&tdc->lock, flags);
675 struct tegra_dma_channel *tdc = dev_id;
678 spin_lock(&tdc->lock);
680 trace_tegra_dma_isr(&tdc->dma_chan, irq);
681 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
683 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
684 tdc->isr_handler(tdc, false);
685 tasklet_schedule(&tdc->tasklet);
686 wake_up_all(&tdc->wq);
687 spin_unlock(&tdc->lock);
691 spin_unlock(&tdc->lock);
692 dev_info(tdc2dev(tdc), "Interrupt already served status 0x%08x\n",
701 struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan);
705 spin_lock_irqsave(&tdc->lock, flags);
708 list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
709 spin_unlock_irqrestore(&tdc->lock, flags);
716 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
720 spin_lock_irqsave(&tdc->lock, flags);
721 if (list_empty(&tdc->pending_sg_req)) {
722 dev_err(tdc2dev(tdc), "No DMA request\n");
725 if (!tdc->busy) {
726 err = pm_runtime_resume_and_get(tdc->tdma->dev);
728 dev_err(tdc2dev(tdc), "Failed to enable DMA\n");
732 tdc_start_head_req(tdc);
735 if (tdc->cyclic) {
741 tdc_configure_next_head_desc(tdc);
745 spin_unlock_irqrestore(&tdc->lock, flags);
750 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
757 spin_lock_irqsave(&tdc->lock, flags);
759 if (!tdc->busy)
763 tegra_dma_pause(tdc, true);
765 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
767 dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__);
768 tdc->isr_handler(tdc, true);
769 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
771 if (tdc->tdma->chip_data->support_separate_wcount_reg)
772 wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
776 was_busy = tdc->busy;
777 tegra_dma_stop(tdc);
779 if (!list_empty(&tdc->pending_sg_req) && was_busy) {
780 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq),
783 get_current_xferred_count(tdc, sgreq, wcount);
785 tegra_dma_resume(tdc);
787 pm_runtime_put(tdc->tdma->dev);
788 wake_up_all(&tdc->wq);
791 tegra_dma_abort_all(tdc);
793 while (!list_empty(&tdc->cb_desc)) {
794 dma_desc = list_first_entry(&tdc->cb_desc, typeof(*dma_desc),
799 spin_unlock_irqrestore(&tdc->lock, flags);
804 static bool tegra_dma_eoc_interrupt_deasserted(struct tegra_dma_channel *tdc)
809 spin_lock_irqsave(&tdc->lock, flags);
810 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
811 spin_unlock_irqrestore(&tdc->lock, flags);
818 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
821 err = pm_runtime_resume_and_get(tdc->tdma->dev);
823 dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err);
832 wait_event(tdc->wq, tegra_dma_eoc_interrupt_deasserted(tdc));
834 tasklet_kill(&tdc->tasklet);
836 pm_runtime_put(tdc->tdma->dev);
839 static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc,
844 if (!list_is_first(&sg_req->node, &tdc->pending_sg_req))
847 if (tdc->tdma->chip_data->support_separate_wcount_reg)
848 wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
850 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
852 if (!tdc->tdma->chip_data->support_separate_wcount_reg)
858 wcount = get_current_xferred_count(tdc, sg_req, wcount);
902 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
914 spin_lock_irqsave(&tdc->lock, flags);
917 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
925 list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
928 bytes = tegra_dma_sg_bytes_xferred(tdc, sg_req);
934 dev_dbg(tdc2dev(tdc), "cookie %d not found\n", cookie);
945 trace_tegra_dma_tx_status(&tdc->dma_chan, cookie, txstate);
946 spin_unlock_irqrestore(&tdc->lock, flags);
951 static inline unsigned int get_bus_width(struct tegra_dma_channel *tdc,
964 dev_warn(tdc2dev(tdc),
970 static inline unsigned int get_burst_size(struct tegra_dma_channel *tdc,
1001 static int get_transfer_param(struct tegra_dma_channel *tdc,
1011 *apb_addr = tdc->dma_sconfig.dst_addr;
1012 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
1013 *burst_size = tdc->dma_sconfig.dst_maxburst;
1014 *slave_bw = tdc->dma_sconfig.dst_addr_width;
1019 *apb_addr = tdc->dma_sconfig.src_addr;
1020 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
1021 *burst_size = tdc->dma_sconfig.src_maxburst;
1022 *slave_bw = tdc->dma_sconfig.src_addr_width;
1027 dev_err(tdc2dev(tdc), "DMA direction is not supported\n");
1034 static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc,
1040 if (tdc->tdma->chip_data->support_separate_wcount_reg)
1054 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1064 if (!tdc->config_init) {
1065 dev_err(tdc2dev(tdc), "DMA channel is not configured\n");
1069 dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
1073 if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
1086 if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
1088 csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1100 dma_desc = tegra_dma_desc_get(tdc);
1102 dev_err(tdc2dev(tdc), "DMA descriptors not available\n");
1120 len > tdc->tdma->chip_data->max_dma_count) {
1121 dev_err(tdc2dev(tdc),
1123 tegra_dma_desc_put(tdc, dma_desc);
1127 sg_req = tegra_dma_sg_req_get(tdc);
1129 dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
1130 tegra_dma_desc_put(tdc, dma_desc);
1134 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1140 tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
1158 if (!tdc->isr_handler) {
1159 tdc->isr_handler = handle_once_dma_done;
1160 tdc->cyclic = false;
1162 if (tdc->cyclic) {
1163 dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n");
1164 tegra_dma_desc_put(tdc, dma_desc);
1179 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1189 dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
1193 if (!tdc->config_init) {
1194 dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
1204 if (tdc->busy) {
1205 dev_err(tdc2dev(tdc), "Request not allowed when DMA running\n");
1214 dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
1220 len > tdc->tdma->chip_data->max_dma_count) {
1221 dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
1225 if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
1234 if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
1236 csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1248 dma_desc = tegra_dma_desc_get(tdc);
1250 dev_err(tdc2dev(tdc), "not enough descriptors available\n");
1264 sg_req = tegra_dma_sg_req_get(tdc);
1266 dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
1267 tegra_dma_desc_put(tdc, dma_desc);
1271 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1275 tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
1295 if (!tdc->isr_handler) {
1296 tdc->isr_handler = handle_cont_sngl_cycle_dma_done;
1297 tdc->cyclic = true;
1299 if (!tdc->cyclic) {
1300 dev_err(tdc2dev(tdc), "DMA configuration conflict\n");
1301 tegra_dma_desc_put(tdc, dma_desc);
1311 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1313 dma_cookie_init(&tdc->dma_chan);
1320 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1329 dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
1332 tasklet_kill(&tdc->tasklet);
1334 list_splice_init(&tdc->pending_sg_req, &sg_req_list);
1335 list_splice_init(&tdc->free_sg_req, &sg_req_list);
1336 list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
1337 INIT_LIST_HEAD(&tdc->cb_desc);
1338 tdc->config_init = false;
1339 tdc->isr_handler = NULL;
1354 tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
1361 struct tegra_dma_channel *tdc;
1373 tdc = to_tegra_dma_chan(chan);
1374 tdc->slave_id = dma_spec->args[0];
1495 struct tegra_dma_channel *tdc = &tdma->channels[i];
1498 tdc->chan_addr = tdma->base_addr +
1508 snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
1510 tdc->name, tdc);
1518 tdc->dma_chan.device = &tdma->dma_dev;
1519 dma_cookie_init(&tdc->dma_chan);
1520 list_add_tail(&tdc->dma_chan.device_node,
1522 tdc->tdma = tdma;
1523 tdc->id = i;
1524 tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
1526 tasklet_setup(&tdc->tasklet, tegra_dma_tasklet);
1527 spin_lock_init(&tdc->lock);
1528 init_waitqueue_head(&tdc->wq);
1530 INIT_LIST_HEAD(&tdc->pending_sg_req);
1531 INIT_LIST_HEAD(&tdc->free_sg_req);
1532 INIT_LIST_HEAD(&tdc->free_dma_desc);
1533 INIT_LIST_HEAD(&tdc->cb_desc);
1632 struct tegra_dma_channel *tdc = &tdma->channels[i];
1634 tasklet_kill(&tdc->tasklet);
1636 spin_lock_irqsave(&tdc->lock, flags);
1637 busy = tdc->busy;
1638 spin_unlock_irqrestore(&tdc->lock, flags);