Lines Matching defs:td_desc
180 struct timb_dma_desc *td_desc;
188 td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
199 iowrite32(td_desc->txd.phys, td_chan->membase +
209 iowrite32(td_desc->txd.phys, td_chan->membase +
215 if (td_desc->interrupt)
223 struct timb_dma_desc *td_desc;
229 td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
231 txd = &td_desc->txd;
248 list_move(&td_desc->desc_node, &td_chan->free_list);
266 struct timb_dma_desc *td_desc =
269 if (td_desc->interrupt)
279 struct timb_dma_desc *td_desc;
284 td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc,
288 __func__, td_desc->txd.cookie);
290 list_move(&td_desc->desc_node, &td_chan->active_list);
296 struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc,
308 list_add_tail(&td_desc->desc_node, &td_chan->active_list);
314 list_add_tail(&td_desc->desc_node, &td_chan->queue);
325 struct timb_dma_desc *td_desc;
328 td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL);
329 if (!td_desc)
332 td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE;
334 td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL);
335 if (!td_desc->desc_list)
338 dma_async_tx_descriptor_init(&td_desc->txd, chan);
339 td_desc->txd.tx_submit = td_tx_submit;
340 td_desc->txd.flags = DMA_CTRL_ACK;
342 td_desc->txd.phys = dma_map_single(chan2dmadev(chan),
343 td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE);
345 err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys);
351 return td_desc;
353 kfree(td_desc->desc_list);
354 kfree(td_desc);
360 static void td_free_desc(struct timb_dma_desc *td_desc)
362 dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc);
363 dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys,
364 td_desc->desc_list_len, DMA_TO_DEVICE);
366 kfree(td_desc->desc_list);
367 kfree(td_desc);
371 struct timb_dma_desc *td_desc)
373 dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc);
376 list_add(&td_desc->desc_node, &td_chan->free_list);
382 struct timb_dma_desc *td_desc, *_td_desc;
386 list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list,
388 if (async_tx_test_ack(&td_desc->txd)) {
389 list_del(&td_desc->desc_node);
390 ret = td_desc;
394 td_desc);
411 struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan);
412 if (!td_desc) {
422 td_desc_put(td_chan, td_desc);
436 struct timb_dma_desc *td_desc, *_td_desc;
449 list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) {
451 td_desc);
452 td_free_desc(td_desc);
496 struct timb_dma_desc *td_desc;
513 td_desc = td_desc_get(td_chan);
514 if (!td_desc) {
519 td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
523 if (desc_usage > td_desc->desc_list_len) {
528 err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg,
533 td_desc_put(td_chan, td_desc);
539 dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
540 td_desc->desc_list_len, DMA_TO_DEVICE);
542 return &td_desc->txd;
549 struct timb_dma_desc *td_desc, *_td_desc;
555 list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
557 list_move(&td_desc->desc_node, &td_chan->free_list);