Lines Matching defs:async_desc
355 struct adm_async_desc *async_desc;
405 async_desc = kzalloc(sizeof(*async_desc), GFP_NOWAIT);
406 if (!async_desc) {
407 dev_err(adev->dev, "not enough memory for async_desc struct\n");
411 async_desc->mux = achan->mux ? ADM_CRCI_CTL_MUX_SEL : 0;
412 async_desc->crci = crci;
413 async_desc->blk_size = blk_size;
414 async_desc->dma_len = single_count * sizeof(struct adm_desc_hw_single) +
418 async_desc->cpl = kzalloc(async_desc->dma_len, GFP_NOWAIT);
419 if (!async_desc->cpl) {
424 async_desc->adev = adev;
427 cple = PTR_ALIGN(async_desc->cpl, ADM_DESC_ALIGN);
431 async_desc->length += sg_dma_len(sg);
441 async_desc->dma_addr = dma_map_single(adev->dev, async_desc->cpl,
442 async_desc->dma_len,
444 if (dma_mapping_error(adev->dev, async_desc->dma_addr)) {
449 cple_addr = async_desc->dma_addr + ((void *)cple - async_desc->cpl);
455 *cple |= (async_desc->dma_addr + ADM_DESC_ALIGN) >> 3;
459 return vchan_tx_prep(&achan->vc, &async_desc->vd, flags);
462 kfree(async_desc);
518 struct adm_async_desc *async_desc;
528 async_desc = container_of(vd, struct adm_async_desc, vd);
529 achan->curr_txd = async_desc;
549 if (async_desc->crci) {
550 writel(async_desc->mux | async_desc->blk_size,
551 adev->regs + ADM_CRCI_CTL(async_desc->crci, adev->ee));
558 writel(ALIGN(async_desc->dma_addr, ADM_DESC_ALIGN) >> 3,
573 struct adm_async_desc *async_desc;
603 async_desc = achan->curr_txd;
607 if (async_desc) {
608 vchan_cookie_complete(&async_desc->vd);
688 struct adm_async_desc *async_desc = container_of(vd,
691 dma_unmap_single(async_desc->adev->dev, async_desc->dma_addr,
692 async_desc->dma_len, DMA_TO_DEVICE);
693 kfree(async_desc->cpl);
694 kfree(async_desc);