Lines Matching refs:mdma
117 struct mdc_dma *mdma;
148 static inline u32 mdc_readl(struct mdc_dma *mdma, u32 reg)
150 return readl(mdma->regs + reg);
153 static inline void mdc_writel(struct mdc_dma *mdma, u32 val, u32 reg)
155 writel(val, mdma->regs + reg);
160 return mdc_readl(mchan->mdma, mchan->chan_nr * 0x040 + reg);
165 mdc_writel(mchan->mdma, val, mchan->chan_nr * 0x040 + reg);
180 static inline struct device *mdma2dev(struct mdc_dma *mdma)
182 return mdma->dma_dev.dev;
209 struct mdc_dma *mdma = mchan->mdma;
228 if (IS_ALIGNED(dst, mdma->bus_width) &&
229 IS_ALIGNED(src, mdma->bus_width))
230 max_burst = mdma->bus_width * mdma->max_burst_mult;
232 max_burst = mdma->bus_width * (mdma->max_burst_mult - 1);
237 mdc_set_read_width(ldesc, mdma->bus_width);
245 mdc_set_write_width(ldesc, mdma->bus_width);
251 mdc_set_read_width(ldesc, mdma->bus_width);
252 mdc_set_write_width(ldesc, mdma->bus_width);
261 struct mdc_dma *mdma = mdesc->chan->mdma;
270 dma_pool_free(mdma->desc_pool, curr, curr_phys);
289 struct mdc_dma *mdma = mchan->mdma;
306 curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, &curr_phys);
318 xfer_size = min_t(size_t, mdma->max_xfer_size, len);
359 if (width > mchan->mdma->bus_width)
371 struct mdc_dma *mdma = mchan->mdma;
392 mdma->max_xfer_size);
400 curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT,
413 xfer_size = min_t(size_t, mdma->max_xfer_size,
452 struct mdc_dma *mdma = mchan->mdma;
480 curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT,
493 xfer_size = min_t(size_t, mdma->max_xfer_size,
525 struct mdc_dma *mdma = mchan->mdma;
539 dev_dbg(mdma2dev(mdma), "Issuing descriptor on channel %d\n",
542 mdma->soc->enable_chan(mchan);
739 struct device *dev = mdma2dev(mchan->mdma);
747 struct mdc_dma *mdma = mchan->mdma;
748 struct device *dev = mdma2dev(mdma);
751 mdma->soc->disable_chan(mchan);
763 dev_dbg(mdma2dev(mchan->mdma), "IRQ on channel %d\n", mchan->chan_nr);
772 dev_warn(mdma2dev(mchan->mdma),
810 struct mdc_dma *mdma = ofdma->of_dma_data;
816 list_for_each_entry(chan, &mdma->dma_dev.channels, device_node) {
837 struct mdc_dma *mdma = mchan->mdma;
839 regmap_update_bits(mdma->periph_regs,
849 struct mdc_dma *mdma = mchan->mdma;
851 regmap_update_bits(mdma->periph_regs,
871 struct mdc_dma *mdma = dev_get_drvdata(dev);
873 clk_disable_unprepare(mdma->clk);
880 struct mdc_dma *mdma = dev_get_drvdata(dev);
882 return clk_prepare_enable(mdma->clk);
887 struct mdc_dma *mdma;
892 mdma = devm_kzalloc(&pdev->dev, sizeof(*mdma), GFP_KERNEL);
893 if (!mdma)
895 platform_set_drvdata(pdev, mdma);
897 mdma->soc = of_device_get_match_data(&pdev->dev);
899 mdma->regs = devm_platform_ioremap_resource(pdev, 0);
900 if (IS_ERR(mdma->regs))
901 return PTR_ERR(mdma->regs);
903 mdma->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
905 if (IS_ERR(mdma->periph_regs))
906 return PTR_ERR(mdma->periph_regs);
908 mdma->clk = devm_clk_get(&pdev->dev, "sys");
909 if (IS_ERR(mdma->clk))
910 return PTR_ERR(mdma->clk);
912 dma_cap_zero(mdma->dma_dev.cap_mask);
913 dma_cap_set(DMA_SLAVE, mdma->dma_dev.cap_mask);
914 dma_cap_set(DMA_PRIVATE, mdma->dma_dev.cap_mask);
915 dma_cap_set(DMA_CYCLIC, mdma->dma_dev.cap_mask);
916 dma_cap_set(DMA_MEMCPY, mdma->dma_dev.cap_mask);
918 val = mdc_readl(mdma, MDC_GLOBAL_CONFIG_A);
919 mdma->nr_channels = (val >> MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT) &
921 mdma->nr_threads =
924 mdma->bus_width =
936 mdma->max_xfer_size = MDC_TRANSFER_SIZE_MASK + 1 - mdma->bus_width;
939 &mdma->nr_channels);
942 &mdma->max_burst_mult);
946 mdma->dma_dev.dev = &pdev->dev;
947 mdma->dma_dev.device_prep_slave_sg = mdc_prep_slave_sg;
948 mdma->dma_dev.device_prep_dma_cyclic = mdc_prep_dma_cyclic;
949 mdma->dma_dev.device_prep_dma_memcpy = mdc_prep_dma_memcpy;
950 mdma->dma_dev.device_alloc_chan_resources = mdc_alloc_chan_resources;
951 mdma->dma_dev.device_free_chan_resources = mdc_free_chan_resources;
952 mdma->dma_dev.device_tx_status = mdc_tx_status;
953 mdma->dma_dev.device_issue_pending = mdc_issue_pending;
954 mdma->dma_dev.device_terminate_all = mdc_terminate_all;
955 mdma->dma_dev.device_synchronize = mdc_synchronize;
956 mdma->dma_dev.device_config = mdc_slave_config;
958 mdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
959 mdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
960 for (i = 1; i <= mdma->bus_width; i <<= 1) {
961 mdma->dma_dev.src_addr_widths |= BIT(i);
962 mdma->dma_dev.dst_addr_widths |= BIT(i);
965 INIT_LIST_HEAD(&mdma->dma_dev.channels);
966 for (i = 0; i < mdma->nr_channels; i++) {
967 struct mdc_chan *mchan = &mdma->channels[i];
969 mchan->mdma = mdma;
982 vchan_init(&mchan->vc, &mdma->dma_dev);
985 mdma->desc_pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
988 if (!mdma->desc_pool)
998 ret = dma_async_device_register(&mdma->dma_dev);
1002 ret = of_dma_controller_register(pdev->dev.of_node, mdc_of_xlate, mdma);
1007 mdma->nr_channels, mdma->nr_threads);
1012 dma_async_device_unregister(&mdma->dma_dev);
1022 struct mdc_dma *mdma = platform_get_drvdata(pdev);
1026 dma_async_device_unregister(&mdma->dma_dev);
1028 list_for_each_entry_safe(mchan, next, &mdma->dma_dev.channels,
1047 struct mdc_dma *mdma = dev_get_drvdata(dev);
1051 for (i = 0; i < mdma->nr_channels; i++) {
1052 struct mdc_chan *mchan = &mdma->channels[i];