Lines Matching defs:tdc
176 typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
235 static inline void tdc_write(struct tegra_dma_channel *tdc,
238 writel(val, tdc->chan_addr + reg);
241 static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
243 return readl(tdc->chan_addr + reg);
257 static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
259 return &tdc->dma_chan.dev->device;
265 static struct tegra_dma_desc *tegra_dma_desc_get(struct tegra_dma_channel *tdc)
270 spin_lock_irqsave(&tdc->lock, flags);
273 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
276 spin_unlock_irqrestore(&tdc->lock, flags);
282 spin_unlock_irqrestore(&tdc->lock, flags);
289 dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
296 static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
301 spin_lock_irqsave(&tdc->lock, flags);
303 list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req);
304 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
305 spin_unlock_irqrestore(&tdc->lock, flags);
309 tegra_dma_sg_req_get(struct tegra_dma_channel *tdc)
314 spin_lock_irqsave(&tdc->lock, flags);
315 if (!list_empty(&tdc->free_sg_req)) {
316 sg_req = list_first_entry(&tdc->free_sg_req, typeof(*sg_req),
319 spin_unlock_irqrestore(&tdc->lock, flags);
322 spin_unlock_irqrestore(&tdc->lock, flags);
332 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
334 if (!list_empty(&tdc->pending_sg_req)) {
335 dev_err(tdc2dev(tdc), "Configuration not allowed\n");
339 memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
340 tdc->config_init = true;
345 static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
348 struct tegra_dma *tdma = tdc->tdma;
352 if (tdc->tdma->global_pause_count == 0) {
358 tdc->tdma->global_pause_count++;
363 static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
365 struct tegra_dma *tdma = tdc->tdma;
369 if (WARN_ON(tdc->tdma->global_pause_count == 0))
372 if (--tdc->tdma->global_pause_count == 0)
380 static void tegra_dma_pause(struct tegra_dma_channel *tdc,
383 struct tegra_dma *tdma = tdc->tdma;
386 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
391 tegra_dma_global_pause(tdc, wait_for_burst_complete);
395 static void tegra_dma_resume(struct tegra_dma_channel *tdc)
397 struct tegra_dma *tdma = tdc->tdma;
400 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
402 tegra_dma_global_resume(tdc);
405 static void tegra_dma_stop(struct tegra_dma_channel *tdc)
410 csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
412 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
416 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
419 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
421 dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
422 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
424 tdc->busy = false;
427 static void tegra_dma_start(struct tegra_dma_channel *tdc,
432 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr);
433 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq);
434 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
435 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
436 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
437 if (tdc->tdma->chip_data->support_separate_wcount_reg)
438 tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount);
441 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
445 static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
461 tegra_dma_pause(tdc, false);
462 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
469 dev_err(tdc2dev(tdc),
471 tegra_dma_resume(tdc);
476 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
477 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
478 if (tdc->tdma->chip_data->support_separate_wcount_reg)
479 tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
481 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
486 tegra_dma_resume(tdc);
489 static void tdc_start_head_req(struct tegra_dma_channel *tdc)
493 sg_req = list_first_entry(&tdc->pending_sg_req, typeof(*sg_req), node);
494 tegra_dma_start(tdc, sg_req);
497 tdc->busy = true;
500 static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
504 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
505 if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
508 tegra_dma_configure_for_next(tdc, hnsgreq);
513 get_current_xferred_count(struct tegra_dma_channel *tdc,
520 static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
525 while (!list_empty(&tdc->pending_sg_req)) {
526 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq),
528 list_move_tail(&sgreq->node, &tdc->free_sg_req);
532 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
537 &tdc->cb_desc);
541 tdc->isr_handler = NULL;
544 static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
554 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
556 tegra_dma_stop(tdc);
557 pm_runtime_put(tdc->tdma->dev);
558 dev_err(tdc2dev(tdc), "DMA transfer underflow, aborting DMA\n");
559 tegra_dma_abort_all(tdc);
565 tdc_configure_next_head_desc(tdc);
570 static void handle_once_dma_done(struct tegra_dma_channel *tdc,
576 tdc->busy = false;
577 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
586 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
588 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
590 list_add_tail(&sgreq->node, &tdc->free_sg_req);
596 if (list_empty(&tdc->pending_sg_req)) {
597 pm_runtime_put(tdc->tdma->dev);
601 tdc_start_head_req(tdc);
604 static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
611 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
620 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
626 if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
627 list_move_tail(&sgreq->node, &tdc->pending_sg_req);
629 st = handle_continuous_head_request(tdc, to_terminate);
637 struct tegra_dma_channel *tdc = from_tasklet(tdc, t, tasklet);
643 spin_lock_irqsave(&tdc->lock, flags);
644 while (!list_empty(&tdc->cb_desc)) {
645 dma_desc = list_first_entry(&tdc->cb_desc, typeof(*dma_desc),
651 trace_tegra_dma_complete_cb(&tdc->dma_chan, cb_count,
653 spin_unlock_irqrestore(&tdc->lock, flags);
656 spin_lock_irqsave(&tdc->lock, flags);
658 spin_unlock_irqrestore(&tdc->lock, flags);
663 struct tegra_dma_channel *tdc = dev_id;
666 spin_lock(&tdc->lock);
668 trace_tegra_dma_isr(&tdc->dma_chan, irq);
669 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
671 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
672 tdc->isr_handler(tdc, false);
673 tasklet_schedule(&tdc->tasklet);
674 wake_up_all(&tdc->wq);
675 spin_unlock(&tdc->lock);
679 spin_unlock(&tdc->lock);
680 dev_info(tdc2dev(tdc), "Interrupt already served status 0x%08x\n",
689 struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan);
693 spin_lock_irqsave(&tdc->lock, flags);
696 list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
697 spin_unlock_irqrestore(&tdc->lock, flags);
704 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
708 spin_lock_irqsave(&tdc->lock, flags);
709 if (list_empty(&tdc->pending_sg_req)) {
710 dev_err(tdc2dev(tdc), "No DMA request\n");
713 if (!tdc->busy) {
714 err = pm_runtime_resume_and_get(tdc->tdma->dev);
716 dev_err(tdc2dev(tdc), "Failed to enable DMA\n");
720 tdc_start_head_req(tdc);
723 if (tdc->cyclic) {
729 tdc_configure_next_head_desc(tdc);
733 spin_unlock_irqrestore(&tdc->lock, flags);
738 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
745 spin_lock_irqsave(&tdc->lock, flags);
747 if (!tdc->busy)
751 tegra_dma_pause(tdc, true);
753 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
755 dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__);
756 tdc->isr_handler(tdc, true);
757 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
759 if (tdc->tdma->chip_data->support_separate_wcount_reg)
760 wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
764 was_busy = tdc->busy;
765 tegra_dma_stop(tdc);
767 if (!list_empty(&tdc->pending_sg_req) && was_busy) {
768 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq),
771 get_current_xferred_count(tdc, sgreq, wcount);
773 tegra_dma_resume(tdc);
775 pm_runtime_put(tdc->tdma->dev);
776 wake_up_all(&tdc->wq);
779 tegra_dma_abort_all(tdc);
781 while (!list_empty(&tdc->cb_desc)) {
782 dma_desc = list_first_entry(&tdc->cb_desc, typeof(*dma_desc),
787 spin_unlock_irqrestore(&tdc->lock, flags);
792 static bool tegra_dma_eoc_interrupt_deasserted(struct tegra_dma_channel *tdc)
797 spin_lock_irqsave(&tdc->lock, flags);
798 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
799 spin_unlock_irqrestore(&tdc->lock, flags);
806 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
809 err = pm_runtime_resume_and_get(tdc->tdma->dev);
811 dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err);
820 wait_event(tdc->wq, tegra_dma_eoc_interrupt_deasserted(tdc));
822 tasklet_kill(&tdc->tasklet);
824 pm_runtime_put(tdc->tdma->dev);
827 static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc,
832 if (!list_is_first(&sg_req->node, &tdc->pending_sg_req))
835 if (tdc->tdma->chip_data->support_separate_wcount_reg)
836 wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
838 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
840 if (!tdc->tdma->chip_data->support_separate_wcount_reg)
846 wcount = get_current_xferred_count(tdc, sg_req, wcount);
890 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
902 spin_lock_irqsave(&tdc->lock, flags);
905 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
913 list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
916 bytes = tegra_dma_sg_bytes_xferred(tdc, sg_req);
922 dev_dbg(tdc2dev(tdc), "cookie %d not found\n", cookie);
933 trace_tegra_dma_tx_status(&tdc->dma_chan, cookie, txstate);
934 spin_unlock_irqrestore(&tdc->lock, flags);
939 static inline unsigned int get_bus_width(struct tegra_dma_channel *tdc,
952 dev_warn(tdc2dev(tdc),
958 static inline unsigned int get_burst_size(struct tegra_dma_channel *tdc,
989 static int get_transfer_param(struct tegra_dma_channel *tdc,
999 *apb_addr = tdc->dma_sconfig.dst_addr;
1000 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
1001 *burst_size = tdc->dma_sconfig.dst_maxburst;
1002 *slave_bw = tdc->dma_sconfig.dst_addr_width;
1007 *apb_addr = tdc->dma_sconfig.src_addr;
1008 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
1009 *burst_size = tdc->dma_sconfig.src_maxburst;
1010 *slave_bw = tdc->dma_sconfig.src_addr_width;
1015 dev_err(tdc2dev(tdc), "DMA direction is not supported\n");
1022 static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc,
1028 if (tdc->tdma->chip_data->support_separate_wcount_reg)
1042 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1052 if (!tdc->config_init) {
1053 dev_err(tdc2dev(tdc), "DMA channel is not configured\n");
1057 dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
1061 if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
1074 if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
1076 csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1088 dma_desc = tegra_dma_desc_get(tdc);
1090 dev_err(tdc2dev(tdc), "DMA descriptors not available\n");
1108 len > tdc->tdma->chip_data->max_dma_count) {
1109 dev_err(tdc2dev(tdc),
1111 tegra_dma_desc_put(tdc, dma_desc);
1115 sg_req = tegra_dma_sg_req_get(tdc);
1117 dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
1118 tegra_dma_desc_put(tdc, dma_desc);
1122 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1128 tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
1146 if (!tdc->isr_handler) {
1147 tdc->isr_handler = handle_once_dma_done;
1148 tdc->cyclic = false;
1150 if (tdc->cyclic) {
1151 dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n");
1152 tegra_dma_desc_put(tdc, dma_desc);
1167 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1177 dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
1181 if (!tdc->config_init) {
1182 dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
1192 if (tdc->busy) {
1193 dev_err(tdc2dev(tdc), "Request not allowed when DMA running\n");
1202 dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
1208 len > tdc->tdma->chip_data->max_dma_count) {
1209 dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
1213 if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
1222 if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
1224 csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1236 dma_desc = tegra_dma_desc_get(tdc);
1238 dev_err(tdc2dev(tdc), "not enough descriptors available\n");
1252 sg_req = tegra_dma_sg_req_get(tdc);
1254 dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
1255 tegra_dma_desc_put(tdc, dma_desc);
1259 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1263 tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
1283 if (!tdc->isr_handler) {
1284 tdc->isr_handler = handle_cont_sngl_cycle_dma_done;
1285 tdc->cyclic = true;
1287 if (!tdc->cyclic) {
1288 dev_err(tdc2dev(tdc), "DMA configuration conflict\n");
1289 tegra_dma_desc_put(tdc, dma_desc);
1299 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1301 dma_cookie_init(&tdc->dma_chan);
1308 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1317 dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
1320 tasklet_kill(&tdc->tasklet);
1322 list_splice_init(&tdc->pending_sg_req, &sg_req_list);
1323 list_splice_init(&tdc->free_sg_req, &sg_req_list);
1324 list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
1325 INIT_LIST_HEAD(&tdc->cb_desc);
1326 tdc->config_init = false;
1327 tdc->isr_handler = NULL;
1342 tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
1349 struct tegra_dma_channel *tdc;
1361 tdc = to_tegra_dma_chan(chan);
1362 tdc->slave_id = dma_spec->args[0];
1483 struct tegra_dma_channel *tdc = &tdma->channels[i];
1486 tdc->chan_addr = tdma->base_addr +
1496 snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
1498 tdc->name, tdc);
1506 tdc->dma_chan.device = &tdma->dma_dev;
1507 dma_cookie_init(&tdc->dma_chan);
1508 list_add_tail(&tdc->dma_chan.device_node,
1510 tdc->tdma = tdma;
1511 tdc->id = i;
1512 tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
1514 tasklet_setup(&tdc->tasklet, tegra_dma_tasklet);
1515 spin_lock_init(&tdc->lock);
1516 init_waitqueue_head(&tdc->wq);
1518 INIT_LIST_HEAD(&tdc->pending_sg_req);
1519 INIT_LIST_HEAD(&tdc->free_sg_req);
1520 INIT_LIST_HEAD(&tdc->free_dma_desc);
1521 INIT_LIST_HEAD(&tdc->cb_desc);
1620 struct tegra_dma_channel *tdc = &tdma->channels[i];
1622 tasklet_kill(&tdc->tasklet);
1624 spin_lock_irqsave(&tdc->lock, flags);
1625 busy = tdc->busy;
1626 spin_unlock_irqrestore(&tdc->lock, flags);