Lines Matching defs:tdmac
134 struct mmp_tdma_chan *tdmac[TDMA_CHANNEL_NUM];
143 static void mmp_tdma_chan_set_desc(struct mmp_tdma_chan *tdmac, dma_addr_t phys)
145 writel(phys, tdmac->reg_base + TDNDPR);
146 writel(readl(tdmac->reg_base + TDCR) | TDCR_FETCHND,
147 tdmac->reg_base + TDCR);
150 static void mmp_tdma_enable_irq(struct mmp_tdma_chan *tdmac, bool enable)
153 writel(TDIMR_COMP, tdmac->reg_base + TDIMR);
155 writel(0, tdmac->reg_base + TDIMR);
158 static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac)
161 writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN,
162 tdmac->reg_base + TDCR);
163 tdmac->status = DMA_IN_PROGRESS;
168 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
171 tdcr = readl(tdmac->reg_base + TDCR);
174 writel(tdcr, tdmac->reg_base + TDCR);
176 tdmac->status = DMA_COMPLETE;
183 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
185 writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN,
186 tdmac->reg_base + TDCR);
187 tdmac->status = DMA_IN_PROGRESS;
194 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
196 writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
197 tdmac->reg_base + TDCR);
198 tdmac->status = DMA_PAUSED;
205 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
210 if (tdmac->dir == DMA_MEM_TO_DEV)
212 else if (tdmac->dir == DMA_DEV_TO_MEM)
215 if (tdmac->type == MMP_AUD_TDMA) {
218 switch (tdmac->burst_sz) {
238 dev_err(tdmac->dev, "unknown burst size.\n");
242 switch (tdmac->buswidth) {
253 dev_err(tdmac->dev, "unknown bus size.\n");
256 } else if (tdmac->type == PXA910_SQU) {
259 switch (tdmac->burst_sz) {
279 dev_err(tdmac->dev, "unknown burst size.\n");
284 writel(tdcr, tdmac->reg_base + TDCR);
288 static int mmp_tdma_clear_chan_irq(struct mmp_tdma_chan *tdmac)
290 u32 reg = readl(tdmac->reg_base + TDISR);
295 writel(reg, tdmac->reg_base + TDISR);
302 static size_t mmp_tdma_get_pos(struct mmp_tdma_chan *tdmac)
306 if (tdmac->idx == 0) {
307 reg = __raw_readl(tdmac->reg_base + TDSAR);
308 reg -= tdmac->desc_arr[0].src_addr;
309 } else if (tdmac->idx == 1) {
310 reg = __raw_readl(tdmac->reg_base + TDDAR);
311 reg -= tdmac->desc_arr[0].dst_addr;
320 struct mmp_tdma_chan *tdmac = dev_id;
322 if (mmp_tdma_clear_chan_irq(tdmac) == 0) {
323 tasklet_schedule(&tdmac->tasklet);
336 struct mmp_tdma_chan *tdmac = tdev->tdmac[i];
338 ret = mmp_tdma_chan_handler(irq, tdmac);
351 struct mmp_tdma_chan *tdmac = from_tasklet(tdmac, t, tasklet);
353 dmaengine_desc_get_callback_invoke(&tdmac->desc, NULL);
356 static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
359 int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
361 gpool = tdmac->pool;
362 if (gpool && tdmac->desc_arr)
363 gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
365 tdmac->desc_arr = NULL;
366 if (tdmac->status == DMA_ERROR)
367 tdmac->status = DMA_COMPLETE;
374 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(tx->chan);
376 mmp_tdma_chan_set_desc(tdmac, tdmac->desc_arr_phys);
383 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
386 dma_async_tx_descriptor_init(&tdmac->desc, chan);
387 tdmac->desc.tx_submit = mmp_tdma_tx_submit;
389 if (tdmac->irq) {
390 ret = devm_request_irq(tdmac->dev, tdmac->irq,
391 mmp_tdma_chan_handler, 0, "tdma", tdmac);
400 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
402 if (tdmac->irq)
403 devm_free_irq(tdmac->dev, tdmac->irq, tdmac);
404 mmp_tdma_free_descriptor(tdmac);
408 static struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac)
411 int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
413 gpool = tdmac->pool;
417 tdmac->desc_arr = gen_pool_dma_alloc(gpool, size, &tdmac->desc_arr_phys);
419 return tdmac->desc_arr;
427 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
433 dev_err(tdmac->dev, "unsupported transfer direction\n");
437 if (tdmac->status != DMA_COMPLETE) {
438 dev_err(tdmac->dev, "controller busy");
443 dev_err(tdmac->dev,
449 tdmac->status = DMA_IN_PROGRESS;
450 tdmac->desc_num = num_periods;
451 desc = mmp_tdma_alloc_descriptor(tdmac);
455 if (mmp_tdma_config_write(chan, direction, &tdmac->slave_config))
459 desc = &tdmac->desc_arr[i];
462 desc->nxt_desc = tdmac->desc_arr_phys;
464 desc->nxt_desc = tdmac->desc_arr_phys +
469 desc->dst_addr = tdmac->dev_addr;
471 desc->src_addr = tdmac->dev_addr;
482 mmp_tdma_enable_irq(tdmac, true);
484 tdmac->buf_len = buf_len;
485 tdmac->period_len = period_len;
486 tdmac->pos = 0;
488 return &tdmac->desc;
491 tdmac->status = DMA_ERROR;
497 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
501 mmp_tdma_enable_irq(tdmac, false);
509 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
511 memcpy(&tdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
520 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
523 tdmac->dev_addr = dmaengine_cfg->src_addr;
524 tdmac->burst_sz = dmaengine_cfg->src_maxburst;
525 tdmac->buswidth = dmaengine_cfg->src_addr_width;
527 tdmac->dev_addr = dmaengine_cfg->dst_addr;
528 tdmac->burst_sz = dmaengine_cfg->dst_maxburst;
529 tdmac->buswidth = dmaengine_cfg->dst_addr_width;
531 tdmac->dir = dir;
539 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
541 tdmac->pos = mmp_tdma_get_pos(tdmac);
543 tdmac->buf_len - tdmac->pos);
545 return tdmac->status;
550 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
552 mmp_tdma_enable_chan(tdmac);
567 struct mmp_tdma_chan *tdmac;
575 tdmac = devm_kzalloc(tdev->dev, sizeof(*tdmac), GFP_KERNEL);
576 if (!tdmac)
580 tdmac->irq = irq;
581 tdmac->dev = tdev->dev;
582 tdmac->chan.device = &tdev->device;
583 tdmac->idx = idx;
584 tdmac->type = type;
585 tdmac->reg_base = tdev->base + idx * 4;
586 tdmac->pool = pool;
587 tdmac->status = DMA_COMPLETE;
588 tdev->tdmac[tdmac->idx] = tdmac;
589 tasklet_setup(&tdmac->tasklet, dma_do_tasklet);
592 list_add_tail(&tdmac->chan.device_node,