Lines Matching defs:iop_chan
37 * Caller must hold &iop_chan->lock while calling this function
53 struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
80 struct iop_adma_chan *iop_chan)
91 if (desc->chain_node.next == &iop_chan->chain)
94 dev_dbg(iop_chan->device->common.dev,
104 static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
108 u32 current_desc = iop_chan_get_current_descriptor(iop_chan);
109 int busy = iop_chan_is_busy(iop_chan);
112 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
116 list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
173 &iop_chan->chain, chain_node) {
191 &iop_chan->chain, chain_node) {
193 grp_iter, iop_chan, cookie);
197 iop_chan);
221 iter, iop_chan, cookie);
223 if (iop_adma_clean_slot(iter, iop_chan))
228 iop_chan->common.completed_cookie = cookie;
234 iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
236 spin_lock_bh(&iop_chan->lock);
237 __iop_adma_slot_cleanup(iop_chan);
238 spin_unlock_bh(&iop_chan->lock);
243 struct iop_adma_chan *iop_chan = from_tasklet(iop_chan, t,
251 spin_lock_nested(&iop_chan->lock, SINGLE_DEPTH_NESTING);
252 __iop_adma_slot_cleanup(iop_chan);
253 spin_unlock(&iop_chan->lock);
257 iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots,
271 iter = iop_chan->last_used;
273 iter = list_entry(&iop_chan->all_slots,
278 iter, _iter, &iop_chan->all_slots, slot_node) {
308 dev_dbg(iop_chan->device->common.dev,
335 iop_chan->last_used = last_used;
345 __iop_adma_slot_cleanup(iop_chan);
350 static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan)
352 dev_dbg(iop_chan->device->common.dev, "pending: %d\n",
353 iop_chan->pending);
355 if (iop_chan->pending >= IOP_ADMA_THRESHOLD) {
356 iop_chan->pending = 0;
357 iop_chan_append(iop_chan);
365 struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
374 spin_lock_bh(&iop_chan->lock);
377 old_chain_tail = list_entry(iop_chan->chain.prev,
395 iop_chan->pending += slot_cnt;
396 iop_adma_check_threshold(iop_chan);
397 spin_unlock_bh(&iop_chan->lock);
399 dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n",
405 static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
406 static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
412 * Note: We keep the slots for 1 operation on iop_chan->chain at all times. To
422 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
424 int init = iop_chan->slots_allocated ? 0 : 1;
426 dev_get_platdata(&iop_chan->device->pdev->dev);
431 idx = iop_chan->slots_allocated;
441 hw_desc = (char *) iop_chan->device->dma_desc_pool_virt;
449 dma_desc = iop_chan->device->dma_desc_pool;
453 spin_lock_bh(&iop_chan->lock);
454 iop_chan->slots_allocated++;
455 list_add_tail(&slot->slot_node, &iop_chan->all_slots);
456 spin_unlock_bh(&iop_chan->lock);
457 } while (iop_chan->slots_allocated < num_descs_in_pool);
459 if (idx && !iop_chan->last_used)
460 iop_chan->last_used = list_entry(iop_chan->all_slots.next,
464 dev_dbg(iop_chan->device->common.dev,
466 iop_chan->slots_allocated, iop_chan->last_used);
471 iop_chan->device->common.cap_mask))
472 iop_chan_start_null_memcpy(iop_chan);
474 iop_chan->device->common.cap_mask))
475 iop_chan_start_null_xor(iop_chan);
486 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
490 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
492 spin_lock_bh(&iop_chan->lock);
493 slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan);
494 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
497 iop_desc_init_interrupt(grp_start, iop_chan);
500 spin_unlock_bh(&iop_chan->lock);
509 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
517 dev_dbg(iop_chan->device->common.dev, "%s len: %zu\n",
520 spin_lock_bh(&iop_chan->lock);
522 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
526 iop_desc_set_byte_count(grp_start, iop_chan, len);
527 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
531 spin_unlock_bh(&iop_chan->lock);
541 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
549 dev_dbg(iop_chan->device->common.dev,
553 spin_lock_bh(&iop_chan->lock);
555 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
559 iop_desc_set_byte_count(grp_start, iop_chan, len);
560 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
566 spin_unlock_bh(&iop_chan->lock);
576 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
583 dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
586 spin_lock_bh(&iop_chan->lock);
588 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
601 spin_unlock_bh(&iop_chan->lock);
611 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
620 dev_dbg(iop_chan->device->common.dev,
631 spin_lock_bh(&iop_chan->lock);
633 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
638 iop_desc_set_byte_count(g, iop_chan, len);
665 spin_unlock_bh(&iop_chan->lock);
676 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
684 dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
687 spin_lock_bh(&iop_chan->lock);
689 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
709 spin_unlock_bh(&iop_chan->lock);
716 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
720 iop_adma_slot_cleanup(iop_chan);
722 spin_lock_bh(&iop_chan->lock);
723 list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
729 iter, _iter, &iop_chan->all_slots, slot_node) {
732 iop_chan->slots_allocated--;
734 iop_chan->last_used = NULL;
736 dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n",
737 __func__, iop_chan->slots_allocated);
738 spin_unlock_bh(&iop_chan->lock);
756 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
763 iop_adma_slot_cleanup(iop_chan);
818 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
820 if (iop_chan->pending) {
821 iop_chan->pending = 0;
822 iop_chan_append(iop_chan);
840 struct iop_adma_chan *iop_chan;
886 iop_chan = to_iop_adma_chan(dma_chan);
887 dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
921 struct iop_adma_chan *iop_chan;
986 iop_chan = to_iop_adma_chan(dma_chan);
987 dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
998 dma_sync_single_for_device(&iop_chan->device->pdev->dev, dest_dma,
1246 struct iop_adma_chan *iop_chan;
1256 iop_chan = to_iop_adma_chan(chan);
1258 kfree(iop_chan);
1270 struct iop_adma_chan *iop_chan;
1341 iop_chan = kzalloc(sizeof(*iop_chan), GFP_KERNEL);
1342 if (!iop_chan) {
1346 iop_chan->device = adev;
1348 iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start,
1350 if (!iop_chan->mmr_base) {
1354 tasklet_setup(&iop_chan->irq_tasklet, iop_adma_tasklet);
1357 iop_adma_device_clear_err_status(iop_chan);
1371 handler[i], 0, pdev->name, iop_chan);
1377 spin_lock_init(&iop_chan->lock);
1378 INIT_LIST_HEAD(&iop_chan->chain);
1379 INIT_LIST_HEAD(&iop_chan->all_slots);
1380 iop_chan->common.device = dma_dev;
1381 dma_cookie_init(&iop_chan->common);
1382 list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);
1425 kfree(iop_chan);
1435 static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
1441 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1443 spin_lock_bh(&iop_chan->lock);
1445 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1449 list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
1452 iop_desc_set_byte_count(grp_start, iop_chan, 0);
1453 iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1461 iop_chan->common.completed_cookie = cookie - 1;
1464 BUG_ON(iop_chan_is_busy(iop_chan));
1467 iop_adma_device_clear_err_status(iop_chan);
1470 iop_chan_disable(iop_chan);
1473 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1481 iop_chan_enable(iop_chan);
1483 dev_err(iop_chan->device->common.dev,
1485 spin_unlock_bh(&iop_chan->lock);
1488 static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
1494 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1496 spin_lock_bh(&iop_chan->lock);
1498 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1501 list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
1504 iop_desc_set_byte_count(grp_start, iop_chan, 0);
1505 iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1514 iop_chan->common.completed_cookie = cookie - 1;
1517 BUG_ON(iop_chan_is_busy(iop_chan));
1520 iop_adma_device_clear_err_status(iop_chan);
1523 iop_chan_disable(iop_chan);
1526 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1534 iop_chan_enable(iop_chan);
1536 dev_err(iop_chan->device->common.dev,
1538 spin_unlock_bh(&iop_chan->lock);