Lines Matching defs:schan
51 /* Called under spin_lock_irq(&schan->chan_lock") */
52 static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)
54 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
59 if (ops->channel_busy(schan))
63 list_for_each_entry(sdesc, &schan->ld_queue, node)
65 ops->start_xfer(schan, sdesc);
74 struct shdma_chan *schan = to_shdma_chan(tx->chan);
79 spin_lock_irq(&schan->chan_lock);
81 power_up = list_empty(&schan->ld_queue);
94 &chunk->node == &schan->ld_free))
105 list_move_tail(&chunk->node, &schan->ld_queue);
107 dev_dbg(schan->dev, "submit #%d@%p on %d\n",
108 tx->cookie, &chunk->async_tx, schan->id);
113 schan->pm_state = SHDMA_PM_BUSY;
115 ret = pm_runtime_get(schan->dev);
117 spin_unlock_irq(&schan->chan_lock);
119 dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
121 pm_runtime_barrier(schan->dev);
123 spin_lock_irq(&schan->chan_lock);
126 if (schan->pm_state != SHDMA_PM_ESTABLISHED) {
128 to_shdma_dev(schan->dma_chan.device);
130 dev_dbg(schan->dev, "Bring up channel %d\n",
131 schan->id);
137 ops->setup_xfer(schan, schan->slave_id);
139 if (schan->pm_state == SHDMA_PM_PENDING)
140 shdma_chan_xfer_ld_queue(schan);
141 schan->pm_state = SHDMA_PM_ESTABLISHED;
148 schan->pm_state = SHDMA_PM_PENDING;
151 spin_unlock_irq(&schan->chan_lock);
157 static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
161 list_for_each_entry(sdesc, &schan->ld_free, node)
171 static int shdma_setup_slave(struct shdma_chan *schan, dma_addr_t slave_addr)
173 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
177 if (schan->dev->of_node) {
178 match = schan->hw_req;
179 ret = ops->set_slave(schan, match, slave_addr, true);
183 match = schan->real_slave_id;
186 if (schan->real_slave_id < 0 || schan->real_slave_id >= slave_num)
189 if (test_and_set_bit(schan->real_slave_id, shdma_slave_used))
192 ret = ops->set_slave(schan, match, slave_addr, false);
194 clear_bit(schan->real_slave_id, shdma_slave_used);
198 schan->slave_id = schan->real_slave_id;
205 struct shdma_chan *schan = to_shdma_chan(chan);
206 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
218 schan->real_slave_id = slave->slave_id;
219 ret = shdma_setup_slave(schan, 0);
224 schan->slave_id = -EINVAL;
227 schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
229 if (!schan->desc) {
233 schan->desc_num = NR_DESCS_PER_CHANNEL;
236 desc = ops->embedded_desc(schan->desc, i);
238 &schan->dma_chan);
242 list_add(&desc->node, &schan->ld_free);
276 struct shdma_chan *schan;
286 schan = to_shdma_chan(chan);
290 * For DT, the schan->slave_id field is generated by the
295 if (schan->dev->of_node) {
296 ret = sdev->ops->set_slave(schan, slave_id, 0, true);
300 schan->real_slave_id = schan->slave_id;
313 ret = sdev->ops->set_slave(schan, slave_id, 0, true);
317 schan->real_slave_id = slave_id;
323 static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
335 spin_lock_irqsave(&schan->chan_lock, flags);
336 list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
357 if (schan->dma_chan.completed_cookie != desc->cookie - 1)
358 dev_dbg(schan->dev,
361 schan->dma_chan.completed_cookie + 1);
362 schan->dma_chan.completed_cookie = desc->cookie;
370 dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
371 tx->cookie, tx, schan->id);
393 dev_dbg(schan->dev, "descriptor %p #%d completed.\n",
403 list_move(&desc->node, &schan->ld_free);
410 if (list_empty(&schan->ld_queue)) {
411 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
412 pm_runtime_put(schan->dev);
413 schan->pm_state = SHDMA_PM_ESTABLISHED;
414 } else if (schan->pm_state == SHDMA_PM_PENDING) {
415 shdma_chan_xfer_ld_queue(schan);
425 schan->dma_chan.completed_cookie = schan->dma_chan.cookie;
427 list_splice_tail(&cyclic_list, &schan->ld_queue);
429 spin_unlock_irqrestore(&schan->chan_lock, flags);
441 static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all)
443 while (__ld_cleanup(schan, all))
452 struct shdma_chan *schan = to_shdma_chan(chan);
458 spin_lock_irq(&schan->chan_lock);
459 ops->halt_channel(schan);
460 spin_unlock_irq(&schan->chan_lock);
465 if (!list_empty(&schan->ld_queue))
466 shdma_chan_ld_cleanup(schan, true);
468 if (schan->slave_id >= 0) {
470 clear_bit(schan->slave_id, shdma_slave_used);
474 schan->real_slave_id = 0;
476 spin_lock_irq(&schan->chan_lock);
478 list_splice_init(&schan->ld_free, &list);
479 schan->desc_num = 0;
481 spin_unlock_irq(&schan->chan_lock);
483 kfree(schan->desc);
488 * @schan: DMA channel
501 static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
505 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
514 new = shdma_get_desc(schan);
516 dev_err(schan->dev, "No free link descriptor available\n");
520 ops->desc_setup(schan, new, *src, *dst, ©_size);
531 dev_dbg(schan->dev,
560 static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
572 chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len);
575 spin_lock_irqsave(&schan->chan_lock, irq_flags);
596 dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n",
600 new = shdma_add_desc(schan, flags,
604 new = shdma_add_desc(schan, flags,
623 list_splice_tail(&tx_list, &schan->ld_free);
625 spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
632 list_splice(&tx_list, &schan->ld_free);
634 spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
643 struct shdma_chan *schan = to_shdma_chan(chan);
649 BUG_ON(!schan->desc_num);
657 return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
665 struct shdma_chan *schan = to_shdma_chan(chan);
666 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
668 int slave_id = schan->slave_id;
674 BUG_ON(!schan->desc_num);
678 dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n",
683 slave_addr = ops->slave_addr(schan);
685 return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
696 struct shdma_chan *schan = to_shdma_chan(chan);
697 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
701 int slave_id = schan->slave_id;
709 BUG_ON(!schan->desc_num);
712 dev_err(schan->dev, "sg length %d exceeds limit %d",
719 dev_warn(schan->dev,
725 slave_addr = ops->slave_addr(schan);
746 desc = shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
755 struct shdma_chan *schan = to_shdma_chan(chan);
760 spin_lock_irqsave(&schan->chan_lock, flags);
761 ops->halt_channel(schan);
763 if (ops->get_partial && !list_empty(&schan->ld_queue)) {
765 struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
767 desc->partial = ops->get_partial(schan, desc);
770 spin_unlock_irqrestore(&schan->chan_lock, flags);
772 shdma_chan_ld_cleanup(schan, true);
780 struct shdma_chan *schan = to_shdma_chan(chan);
793 return shdma_setup_slave(schan,
800 struct shdma_chan *schan = to_shdma_chan(chan);
802 spin_lock_irq(&schan->chan_lock);
803 if (schan->pm_state == SHDMA_PM_ESTABLISHED)
804 shdma_chan_xfer_ld_queue(schan);
806 schan->pm_state = SHDMA_PM_PENDING;
807 spin_unlock_irq(&schan->chan_lock);
814 struct shdma_chan *schan = to_shdma_chan(chan);
818 shdma_chan_ld_cleanup(schan, false);
820 spin_lock_irqsave(&schan->chan_lock, flags);
831 list_for_each_entry(sdesc, &schan->ld_queue, node)
838 spin_unlock_irqrestore(&schan->chan_lock, flags);
847 struct shdma_chan *schan;
852 shdma_for_each_chan(schan, sdev, i) {
856 if (!schan)
859 spin_lock(&schan->chan_lock);
862 ops->halt_channel(schan);
864 list_splice_init(&schan->ld_queue, &dl);
867 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
868 pm_runtime_put(schan->dev);
870 schan->pm_state = SHDMA_PM_ESTABLISHED;
872 spin_unlock(&schan->chan_lock);
882 spin_lock(&schan->chan_lock);
883 list_splice(&dl, &schan->ld_free);
884 spin_unlock(&schan->chan_lock);
895 struct shdma_chan *schan = dev;
897 to_shdma_dev(schan->dma_chan.device)->ops;
900 spin_lock(&schan->chan_lock);
902 ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE;
904 spin_unlock(&schan->chan_lock);
911 struct shdma_chan *schan = dev;
913 to_shdma_dev(schan->dma_chan.device)->ops;
916 spin_lock_irq(&schan->chan_lock);
917 list_for_each_entry(sdesc, &schan->ld_queue, node) {
919 ops->desc_completed(schan, sdesc)) {
920 dev_dbg(schan->dev, "done #%d@%p\n",
927 shdma_chan_xfer_ld_queue(schan);
928 spin_unlock_irq(&schan->chan_lock);
930 shdma_chan_ld_cleanup(schan, false);
935 int shdma_request_irq(struct shdma_chan *schan, int irq,
938 int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq,
939 chan_irqt, flags, name, schan);
941 schan->irq = ret < 0 ? ret : irq;
948 struct shdma_chan *schan, int id)
950 schan->pm_state = SHDMA_PM_ESTABLISHED;
953 schan->dma_chan.device = &sdev->dma_dev;
954 dma_cookie_init(&schan->dma_chan);
956 schan->dev = sdev->dma_dev.dev;
957 schan->id = id;
959 if (!schan->max_xfer_len)
960 schan->max_xfer_len = PAGE_SIZE;
962 spin_lock_init(&schan->chan_lock);
965 INIT_LIST_HEAD(&schan->ld_queue);
966 INIT_LIST_HEAD(&schan->ld_free);
969 list_add_tail(&schan->dma_chan.device_node,
971 sdev->schan[id] = schan;
975 void shdma_chan_remove(struct shdma_chan *schan)
977 list_del(&schan->dma_chan.device_node);
1003 sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL);
1004 if (!sdev->schan)
1031 kfree(sdev->schan);