Lines Matching refs:td_chan

103 static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan)
105 int id = td_chan->chan.chan_id;
106 return (struct timb_dma *)((u8 *)td_chan -
111 static void __td_enable_chan_irq(struct timb_dma_chan *td_chan)
113 int id = td_chan->chan.chan_id;
114 struct timb_dma *td = tdchantotd(td_chan);
120 dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id,
126 static bool __td_dma_done_ack(struct timb_dma_chan *td_chan)
128 int id = td_chan->chan.chan_id;
129 struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan -
134 dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td);
145 static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
149 dev_err(chan2dev(&td_chan->chan), "Too big sg element\n");
155 dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n",
160 dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: 0x%llx\n",
178 static void __td_start_dma(struct timb_dma_chan *td_chan)
182 if (td_chan->ongoing) {
183 dev_err(chan2dev(&td_chan->chan),
188 td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
191 dev_dbg(chan2dev(&td_chan->chan),
192 "td_chan: %p, chan: %d, membase: %p\n",
193 td_chan, td_chan->chan.chan_id, td_chan->membase);
195 if (td_chan->direction == DMA_DEV_TO_MEM) {
198 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR);
199 iowrite32(td_desc->txd.phys, td_chan->membase +
202 iowrite32(td_chan->bytes_per_line, td_chan->membase +
205 iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER);
208 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR);
209 iowrite32(td_desc->txd.phys, td_chan->membase +
213 td_chan->ongoing = true;
216 __td_enable_chan_irq(td_chan);
219 static void __td_finish(struct timb_dma_chan *td_chan)
226 if (list_empty(&td_chan->active_list))
229 td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
233 dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n",
237 if (td_chan->direction == DMA_DEV_TO_MEM)
238 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER);
241 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR);
244 td_chan->ongoing = false;
248 list_move(&td_desc->desc_node, &td_chan->free_list);
264 struct timb_dma_chan *td_chan = td->channels + i;
265 if (td_chan->ongoing) {
267 list_entry(td_chan->active_list.next,
277 static void __td_start_next(struct timb_dma_chan *td_chan)
281 BUG_ON(list_empty(&td_chan->queue));
282 BUG_ON(td_chan->ongoing);
284 td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc,
287 dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n",
290 list_move(&td_desc->desc_node, &td_chan->active_list);
291 __td_start_dma(td_chan);
298 struct timb_dma_chan *td_chan = container_of(txd->chan,
302 spin_lock_bh(&td_chan->lock);
305 if (list_empty(&td_chan->active_list)) {
308 list_add_tail(&td_desc->desc_node, &td_chan->active_list);
309 __td_start_dma(td_chan);
314 list_add_tail(&td_desc->desc_node, &td_chan->queue);
317 spin_unlock_bh(&td_chan->lock);
322 static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
324 struct dma_chan *chan = &td_chan->chan;
332 td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE;
370 static void td_desc_put(struct timb_dma_chan *td_chan,
373 dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc);
375 spin_lock_bh(&td_chan->lock);
376 list_add(&td_desc->desc_node, &td_chan->free_list);
377 spin_unlock_bh(&td_chan->lock);
380 static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan)
385 spin_lock_bh(&td_chan->lock);
386 list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list,
393 dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n",
396 spin_unlock_bh(&td_chan->lock);
403 struct timb_dma_chan *td_chan =
409 BUG_ON(!list_empty(&td_chan->free_list));
410 for (i = 0; i < td_chan->descs; i++) {
411 struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan);
422 td_desc_put(td_chan, td_desc);
425 spin_lock_bh(&td_chan->lock);
427 spin_unlock_bh(&td_chan->lock);
434 struct timb_dma_chan *td_chan =
442 BUG_ON(!list_empty(&td_chan->active_list));
443 BUG_ON(!list_empty(&td_chan->queue));
445 spin_lock_bh(&td_chan->lock);
446 list_splice_init(&td_chan->free_list, &list);
447 spin_unlock_bh(&td_chan->lock);
472 struct timb_dma_chan *td_chan =
476 spin_lock_bh(&td_chan->lock);
478 if (!list_empty(&td_chan->active_list))
480 if (__td_dma_done_ack(td_chan))
481 __td_finish(td_chan);
483 if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue))
484 __td_start_next(td_chan);
486 spin_unlock_bh(&td_chan->lock);
494 struct timb_dma_chan *td_chan =
507 if (td_chan->direction != direction) {
513 td_desc = td_desc_get(td_chan);
528 err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg,
533 td_desc_put(td_chan, td_desc);
547 struct timb_dma_chan *td_chan =
554 spin_lock_bh(&td_chan->lock);
555 list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
557 list_move(&td_desc->desc_node, &td_chan->free_list);
560 __td_finish(td_chan);
561 spin_unlock_bh(&td_chan->lock);
582 struct timb_dma_chan *td_chan = td->channels + i;
583 spin_lock(&td_chan->lock);
584 __td_finish(td_chan);
585 if (!list_empty(&td_chan->queue))
586 __td_start_next(td_chan);
587 spin_unlock(&td_chan->lock);
684 struct timb_dma_chan *td_chan = &td->channels[i];
695 td_chan->chan.device = &td->dma;
696 dma_cookie_init(&td_chan->chan);
697 spin_lock_init(&td_chan->lock);
698 INIT_LIST_HEAD(&td_chan->active_list);
699 INIT_LIST_HEAD(&td_chan->queue);
700 INIT_LIST_HEAD(&td_chan->free_list);
702 td_chan->descs = pchan->descriptors;
703 td_chan->desc_elems = pchan->descriptor_elements;
704 td_chan->bytes_per_line = pchan->bytes_per_line;
705 td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM :
708 td_chan->membase = td->membase +
713 i, td_chan->membase);
715 list_add_tail(&td_chan->chan.device_node, &td->dma.channels);