Lines Matching refs:dma
3 * Filename: dma.c
74 struct rsxx_dma *dma;
114 static unsigned int get_dma_size(struct rsxx_dma *dma)
116 if (dma->sub_page.cnt)
117 return dma->sub_page.cnt << 9;
126 struct rsxx_dma *dma)
128 trackers->list[tag].dma = dma;
134 return trackers->list[tag].dma;
157 trackers->list[tag].dma = NULL;
210 static void rsxx_free_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma)
212 if (dma->cmd != HW_CMD_BLK_DISCARD) {
213 if (!dma_mapping_error(&ctrl->card->dev->dev, dma->dma_addr)) {
214 dma_unmap_page(&ctrl->card->dev->dev, dma->dma_addr,
215 get_dma_size(dma),
216 dma->cmd == HW_CMD_BLK_WRITE ?
222 kmem_cache_free(rsxx_dma_pool, dma);
226 struct rsxx_dma *dma,
236 if (dma->cb)
237 dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0);
239 rsxx_free_dma(ctrl, dma);
245 struct rsxx_dma *dma;
249 list_for_each_entry_safe(dma, tmp, q, list) {
250 list_del(&dma->list);
252 rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
254 rsxx_free_dma(ctrl, dma);
262 struct rsxx_dma *dma)
270 list_add(&dma->list, &ctrl->queue);
275 struct rsxx_dma *dma,
283 dma->cmd, dma->laddr, hw_st);
292 switch (dma->cmd) {
296 dma->cmd = HW_CMD_BLK_RECON_READ;
331 dma->cmd, dma->laddr, hw_st);
338 rsxx_requeue_dma(ctrl, dma);
340 rsxx_complete_dma(ctrl, dma, status);
354 * The dma engine was stalled because the SW_CMD_IDX write
384 struct rsxx_dma *dma;
409 dma = list_entry(ctrl->queue.next, struct rsxx_dma, list);
410 list_del(&dma->list);
421 rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
425 if (dma->cmd != HW_CMD_BLK_DISCARD) {
426 if (dma->cmd == HW_CMD_BLK_WRITE)
441 dma->dma_addr = dma_map_page(&ctrl->card->dev->dev, dma->page,
442 dma->pg_off, dma->sub_page.cnt << 9, dir);
443 if (dma_mapping_error(&ctrl->card->dev->dev, dma->dma_addr)) {
445 rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
450 set_tracker_dma(ctrl->trackers, tag, dma);
451 hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd;
455 ((dma->sub_page.cnt & 0x7) << 4) |
456 (dma->sub_page.off & 0x7);
459 cpu_to_le32(dma->laddr);
462 cpu_to_le64(dma->dma_addr);
466 ctrl->id, dma->laddr, tag, ctrl->cmd.idx);
471 if (dma->cmd == HW_CMD_BLK_WRITE)
473 else if (dma->cmd == HW_CMD_BLK_DISCARD)
496 struct rsxx_dma *dma;
525 dma = get_tracker_dma(ctrl->trackers, tag);
526 if (dma == NULL) {
541 ctrl->id, dma->laddr, tag, status, count,
550 rsxx_handle_dma_error(ctrl, dma, status);
552 rsxx_complete_dma(ctrl, dma, 0);
606 struct rsxx_dma *dma;
608 dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
609 if (!dma)
612 dma->cmd = HW_CMD_BLK_DISCARD;
613 dma->laddr = laddr;
614 dma->dma_addr = 0;
615 dma->sub_page.off = 0;
616 dma->sub_page.cnt = 0;
617 dma->page = NULL;
618 dma->pg_off = 0;
619 dma->cb = cb;
620 dma->cb_data = cb_data;
622 dev_dbg(CARD_TO_DEV(card), "Queuing[D] laddr %x\n", dma->laddr);
624 list_add_tail(&dma->list, q);
640 struct rsxx_dma *dma;
642 dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
643 if (!dma)
646 dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
647 dma->laddr = laddr;
648 dma->sub_page.off = (dma_off >> 9);
649 dma->sub_page.cnt = (dma_len >> 9);
650 dma->page = page;
651 dma->pg_off = pg_off;
652 dma->cb = cb;
653 dma->cb_data = cb_data;
657 dir ? 'W' : 'R', dma->laddr, dma->sub_page.off,
658 dma->sub_page.cnt, dma->page, dma->pg_off);
661 list_add_tail(&dma->list, q);
818 ctrl->trackers->list[i].dma = NULL;
964 struct rsxx_dma *dma;
970 dma = get_tracker_dma(ctrl->trackers, i);
971 if (dma) {
973 rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
1024 struct rsxx_dma *dma;
1036 dma = get_tracker_dma(card->ctrl[i].trackers, j);
1037 if (dma == NULL)
1040 if (dma->cmd == HW_CMD_BLK_WRITE)
1042 else if (dma->cmd == HW_CMD_BLK_DISCARD)
1047 if (dma->cmd != HW_CMD_BLK_DISCARD) {
1048 dma_unmap_page(&card->dev->dev, dma->dma_addr,
1049 get_dma_size(dma),
1050 dma->cmd == HW_CMD_BLK_WRITE ?
1055 list_add_tail(&dma->list, &issued_dmas[i]);