Lines Matching defs:pd_chan
164 struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
166 return list_first_entry(&pd_chan->active_list,
171 struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
173 return list_first_entry(&pd_chan->queue,
203 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
217 if (pd_chan->dir == DMA_MEM_TO_DEV)
235 if (pd_chan->dir == DMA_MEM_TO_DEV)
283 static u32 pdc_get_status0(struct pch_dma_chan *pd_chan)
285 struct pch_dma *pd = to_pd(pd_chan->chan.device);
290 DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id));
293 static u32 pdc_get_status2(struct pch_dma_chan *pd_chan)
295 struct pch_dma *pd = to_pd(pd_chan->chan.device);
300 DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8)));
303 static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
307 if (pd_chan->chan.chan_id < 8)
308 sts = pdc_get_status0(pd_chan);
310 sts = pdc_get_status2(pd_chan);
319 static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
321 if (!pdc_is_idle(pd_chan)) {
322 dev_err(chan2dev(&pd_chan->chan),
327 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n",
328 pd_chan->chan.chan_id, desc->regs.dev_addr);
329 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n",
330 pd_chan->chan.chan_id, desc->regs.mem_addr);
331 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x\n",
332 pd_chan->chan.chan_id, desc->regs.size);
333 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n",
334 pd_chan->chan.chan_id, desc->regs.next);
337 channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr);
338 channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr);
339 channel_writel(pd_chan, SIZE, desc->regs.size);
340 channel_writel(pd_chan, NEXT, desc->regs.next);
341 pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT);
343 channel_writel(pd_chan, NEXT, desc->txd.phys);
344 pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG);
348 static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
355 list_splice_init(&desc->tx_list, &pd_chan->free_list);
356 list_move(&desc->desc_node, &pd_chan->free_list);
361 static void pdc_complete_all(struct pch_dma_chan *pd_chan)
366 BUG_ON(!pdc_is_idle(pd_chan));
368 if (!list_empty(&pd_chan->queue))
369 pdc_dostart(pd_chan, pdc_first_queued(pd_chan));
371 list_splice_init(&pd_chan->active_list, &list);
372 list_splice_init(&pd_chan->queue, &pd_chan->active_list);
375 pdc_chain_complete(pd_chan, desc);
378 static void pdc_handle_error(struct pch_dma_chan *pd_chan)
382 bad_desc = pdc_first_active(pd_chan);
385 list_splice_init(&pd_chan->queue, pd_chan->active_list.prev);
387 if (!list_empty(&pd_chan->active_list))
388 pdc_dostart(pd_chan, pdc_first_active(pd_chan));
390 dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted\n");
391 dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d\n",
394 pdc_chain_complete(pd_chan, bad_desc);
397 static void pdc_advance_work(struct pch_dma_chan *pd_chan)
399 if (list_empty(&pd_chan->active_list) ||
400 list_is_singular(&pd_chan->active_list)) {
401 pdc_complete_all(pd_chan);
403 pdc_chain_complete(pd_chan, pdc_first_active(pd_chan));
404 pdc_dostart(pd_chan, pdc_first_active(pd_chan));
411 struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
413 spin_lock(&pd_chan->lock);
415 if (list_empty(&pd_chan->active_list)) {
416 list_add_tail(&desc->desc_node, &pd_chan->active_list);
417 pdc_dostart(pd_chan, desc);
419 list_add_tail(&desc->desc_node, &pd_chan->queue);
422 spin_unlock(&pd_chan->lock);
444 static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
450 spin_lock(&pd_chan->lock);
451 list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
458 dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc);
460 spin_unlock(&pd_chan->lock);
461 dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
464 ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC);
466 spin_lock(&pd_chan->lock);
467 pd_chan->descs_allocated++;
468 spin_unlock(&pd_chan->lock);
470 dev_err(chan2dev(&pd_chan->chan),
478 static void pdc_desc_put(struct pch_dma_chan *pd_chan,
482 spin_lock(&pd_chan->lock);
483 list_splice_init(&desc->tx_list, &pd_chan->free_list);
484 list_add(&desc->desc_node, &pd_chan->free_list);
485 spin_unlock(&pd_chan->lock);
491 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
496 if (!pdc_is_idle(pd_chan)) {
501 if (!list_empty(&pd_chan->free_list))
502 return pd_chan->descs_allocated;
516 spin_lock_irq(&pd_chan->lock);
517 list_splice(&tmp_list, &pd_chan->free_list);
518 pd_chan->descs_allocated = i;
520 spin_unlock_irq(&pd_chan->lock);
524 return pd_chan->descs_allocated;
529 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
534 BUG_ON(!pdc_is_idle(pd_chan));
535 BUG_ON(!list_empty(&pd_chan->active_list));
536 BUG_ON(!list_empty(&pd_chan->queue));
538 spin_lock_irq(&pd_chan->lock);
539 list_splice_init(&pd_chan->free_list, &tmp_list);
540 pd_chan->descs_allocated = 0;
541 spin_unlock_irq(&pd_chan->lock);
557 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
559 if (pdc_is_idle(pd_chan)) {
560 spin_lock(&pd_chan->lock);
561 pdc_advance_work(pd_chan);
562 spin_unlock(&pd_chan->lock);
571 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
592 pd_chan->dir = direction;
596 desc = pdc_desc_get(pd_chan);
648 pdc_desc_put(pd_chan, first);
654 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
658 spin_lock_irq(&pd_chan->lock);
660 pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
662 list_splice_init(&pd_chan->active_list, &list);
663 list_splice_init(&pd_chan->queue, &list);
666 pdc_chain_complete(pd_chan, desc);
668 spin_unlock_irq(&pd_chan->lock);
675 struct pch_dma_chan *pd_chan = from_tasklet(pd_chan, t, tasklet);
678 if (!pdc_is_idle(pd_chan)) {
679 dev_err(chan2dev(&pd_chan->chan),
684 spin_lock_irqsave(&pd_chan->lock, flags);
685 if (test_and_clear_bit(0, &pd_chan->err_status))
686 pdc_handle_error(pd_chan);
688 pdc_advance_work(pd_chan);
689 spin_unlock_irqrestore(&pd_chan->lock, flags);
695 struct pch_dma_chan *pd_chan;
708 pd_chan = &pd->channels[i];
713 set_bit(0, &pd_chan->err_status);
715 tasklet_schedule(&pd_chan->tasklet);
721 set_bit(0, &pd_chan->err_status);
723 tasklet_schedule(&pd_chan->tasklet);
740 struct pch_dma_chan *pd_chan;
750 pd_chan = to_pd_chan(chan);
752 pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR);
753 pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR);
754 pd->ch_regs[i].size = channel_readl(pd_chan, SIZE);
755 pd->ch_regs[i].next = channel_readl(pd_chan, NEXT);
763 struct pch_dma_chan *pd_chan;
773 pd_chan = to_pd_chan(chan);
775 channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr);
776 channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr);
777 channel_writel(pd_chan, SIZE, pd->ch_regs[i].size);
778 channel_writel(pd_chan, NEXT, pd->ch_regs[i].next);
872 struct pch_dma_chan *pd_chan = &pd->channels[i];
874 pd_chan->chan.device = &pd->dma;
875 dma_cookie_init(&pd_chan->chan);
877 pd_chan->membase = ®s->desc[i];
879 spin_lock_init(&pd_chan->lock);
881 INIT_LIST_HEAD(&pd_chan->active_list);
882 INIT_LIST_HEAD(&pd_chan->queue);
883 INIT_LIST_HEAD(&pd_chan->free_list);
885 tasklet_setup(&pd_chan->tasklet, pdc_tasklet);
886 list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels);
926 struct pch_dma_chan *pd_chan;
936 pd_chan = to_pd_chan(chan);
938 tasklet_kill(&pd_chan->tasklet);