Lines Matching defs:schan

51 /* Called under spin_lock_irq(&schan->chan_lock") */
52 static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)
54 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
59 if (ops->channel_busy(schan))
63 list_for_each_entry(sdesc, &schan->ld_queue, node)
65 ops->start_xfer(schan, sdesc);
74 struct shdma_chan *schan = to_shdma_chan(tx->chan);
79 spin_lock_irq(&schan->chan_lock);
81 power_up = list_empty(&schan->ld_queue);
94 &chunk->node == &schan->ld_free))
105 list_move_tail(&chunk->node, &schan->ld_queue);
107 dev_dbg(schan->dev, "submit #%d@%p on %d\n",
108 tx->cookie, &chunk->async_tx, schan->id);
113 schan->pm_state = SHDMA_PM_BUSY;
115 ret = pm_runtime_get(schan->dev);
117 spin_unlock_irq(&schan->chan_lock);
119 dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
121 pm_runtime_barrier(schan->dev);
123 spin_lock_irq(&schan->chan_lock);
126 if (schan->pm_state != SHDMA_PM_ESTABLISHED) {
128 to_shdma_dev(schan->dma_chan.device);
130 dev_dbg(schan->dev, "Bring up channel %d\n",
131 schan->id);
137 ops->setup_xfer(schan, schan->slave_id);
139 if (schan->pm_state == SHDMA_PM_PENDING)
140 shdma_chan_xfer_ld_queue(schan);
141 schan->pm_state = SHDMA_PM_ESTABLISHED;
148 schan->pm_state = SHDMA_PM_PENDING;
151 spin_unlock_irq(&schan->chan_lock);
157 static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
161 list_for_each_entry(sdesc, &schan->ld_free, node)
171 static int shdma_setup_slave(struct shdma_chan *schan, dma_addr_t slave_addr)
173 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
177 if (schan->dev->of_node) {
178 match = schan->hw_req;
179 ret = ops->set_slave(schan, match, slave_addr, true);
183 match = schan->real_slave_id;
186 if (schan->real_slave_id < 0 || schan->real_slave_id >= slave_num)
189 if (test_and_set_bit(schan->real_slave_id, shdma_slave_used))
192 ret = ops->set_slave(schan, match, slave_addr, false);
194 clear_bit(schan->real_slave_id, shdma_slave_used);
198 schan->slave_id = schan->real_slave_id;
205 struct shdma_chan *schan = to_shdma_chan(chan);
206 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
218 schan->real_slave_id = slave->slave_id;
219 ret = shdma_setup_slave(schan, 0);
224 schan->slave_id = -EINVAL;
227 schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
229 if (!schan->desc) {
233 schan->desc_num = NR_DESCS_PER_CHANNEL;
236 desc = ops->embedded_desc(schan->desc, i);
238 &schan->dma_chan);
242 list_add(&desc->node, &schan->ld_free);
276 struct shdma_chan *schan;
286 schan = to_shdma_chan(chan);
290 * For DT, the schan->slave_id field is generated by the
295 if (schan->dev->of_node) {
296 ret = sdev->ops->set_slave(schan, slave_id, 0, true);
300 schan->real_slave_id = schan->slave_id;
313 ret = sdev->ops->set_slave(schan, slave_id, 0, true);
317 schan->real_slave_id = slave_id;
323 static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
335 spin_lock_irqsave(&schan->chan_lock, flags);
336 list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
357 if (schan->dma_chan.completed_cookie != desc->cookie - 1)
358 dev_dbg(schan->dev,
361 schan->dma_chan.completed_cookie + 1);
362 schan->dma_chan.completed_cookie = desc->cookie;
370 dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
371 tx->cookie, tx, schan->id);
393 dev_dbg(schan->dev, "descriptor %p #%d completed.\n",
403 list_move(&desc->node, &schan->ld_free);
410 if (list_empty(&schan->ld_queue)) {
411 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
412 pm_runtime_put(schan->dev);
413 schan->pm_state = SHDMA_PM_ESTABLISHED;
414 } else if (schan->pm_state == SHDMA_PM_PENDING) {
415 shdma_chan_xfer_ld_queue(schan);
425 schan->dma_chan.completed_cookie = schan->dma_chan.cookie;
427 list_splice_tail(&cyclic_list, &schan->ld_queue);
429 spin_unlock_irqrestore(&schan->chan_lock, flags);
441 static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all)
443 while (__ld_cleanup(schan, all))
452 struct shdma_chan *schan = to_shdma_chan(chan);
458 spin_lock_irq(&schan->chan_lock);
459 ops->halt_channel(schan);
460 spin_unlock_irq(&schan->chan_lock);
465 if (!list_empty(&schan->ld_queue))
466 shdma_chan_ld_cleanup(schan, true);
468 if (schan->slave_id >= 0) {
470 clear_bit(schan->slave_id, shdma_slave_used);
474 schan->real_slave_id = 0;
476 spin_lock_irq(&schan->chan_lock);
478 list_splice_init(&schan->ld_free, &list);
479 schan->desc_num = 0;
481 spin_unlock_irq(&schan->chan_lock);
483 kfree(schan->desc);
488 * @schan: DMA channel
501 static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
505 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
514 new = shdma_get_desc(schan);
516 dev_err(schan->dev, "No free link descriptor available\n");
520 ops->desc_setup(schan, new, *src, *dst, &copy_size);
531 dev_dbg(schan->dev,
560 static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
572 chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len);
575 spin_lock_irqsave(&schan->chan_lock, irq_flags);
596 dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n",
600 new = shdma_add_desc(schan, flags,
604 new = shdma_add_desc(schan, flags,
623 list_splice_tail(&tx_list, &schan->ld_free);
625 spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
632 list_splice(&tx_list, &schan->ld_free);
634 spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
643 struct shdma_chan *schan = to_shdma_chan(chan);
649 BUG_ON(!schan->desc_num);
657 return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
665 struct shdma_chan *schan = to_shdma_chan(chan);
666 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
668 int slave_id = schan->slave_id;
674 BUG_ON(!schan->desc_num);
678 dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n",
683 slave_addr = ops->slave_addr(schan);
685 return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
696 struct shdma_chan *schan = to_shdma_chan(chan);
697 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
701 int slave_id = schan->slave_id;
709 BUG_ON(!schan->desc_num);
712 dev_err(schan->dev, "sg length %d exceeds limit %d",
719 dev_warn(schan->dev,
725 slave_addr = ops->slave_addr(schan);
746 desc = shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
755 struct shdma_chan *schan = to_shdma_chan(chan);
760 spin_lock_irqsave(&schan->chan_lock, flags);
761 ops->halt_channel(schan);
763 if (ops->get_partial && !list_empty(&schan->ld_queue)) {
765 struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
767 desc->partial = ops->get_partial(schan, desc);
770 spin_unlock_irqrestore(&schan->chan_lock, flags);
772 shdma_chan_ld_cleanup(schan, true);
780 struct shdma_chan *schan = to_shdma_chan(chan);
794 config->slave_id != schan->real_slave_id))
795 schan->real_slave_id = config->slave_id;
801 return shdma_setup_slave(schan,
808 struct shdma_chan *schan = to_shdma_chan(chan);
810 spin_lock_irq(&schan->chan_lock);
811 if (schan->pm_state == SHDMA_PM_ESTABLISHED)
812 shdma_chan_xfer_ld_queue(schan);
814 schan->pm_state = SHDMA_PM_PENDING;
815 spin_unlock_irq(&schan->chan_lock);
822 struct shdma_chan *schan = to_shdma_chan(chan);
826 shdma_chan_ld_cleanup(schan, false);
828 spin_lock_irqsave(&schan->chan_lock, flags);
839 list_for_each_entry(sdesc, &schan->ld_queue, node)
846 spin_unlock_irqrestore(&schan->chan_lock, flags);
855 struct shdma_chan *schan;
860 shdma_for_each_chan(schan, sdev, i) {
864 if (!schan)
867 spin_lock(&schan->chan_lock);
870 ops->halt_channel(schan);
872 list_splice_init(&schan->ld_queue, &dl);
875 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
876 pm_runtime_put(schan->dev);
878 schan->pm_state = SHDMA_PM_ESTABLISHED;
880 spin_unlock(&schan->chan_lock);
890 spin_lock(&schan->chan_lock);
891 list_splice(&dl, &schan->ld_free);
892 spin_unlock(&schan->chan_lock);
903 struct shdma_chan *schan = dev;
905 to_shdma_dev(schan->dma_chan.device)->ops;
908 spin_lock(&schan->chan_lock);
910 ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE;
912 spin_unlock(&schan->chan_lock);
919 struct shdma_chan *schan = dev;
921 to_shdma_dev(schan->dma_chan.device)->ops;
924 spin_lock_irq(&schan->chan_lock);
925 list_for_each_entry(sdesc, &schan->ld_queue, node) {
927 ops->desc_completed(schan, sdesc)) {
928 dev_dbg(schan->dev, "done #%d@%p\n",
935 shdma_chan_xfer_ld_queue(schan);
936 spin_unlock_irq(&schan->chan_lock);
938 shdma_chan_ld_cleanup(schan, false);
943 int shdma_request_irq(struct shdma_chan *schan, int irq,
946 int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq,
947 chan_irqt, flags, name, schan);
949 schan->irq = ret < 0 ? ret : irq;
956 struct shdma_chan *schan, int id)
958 schan->pm_state = SHDMA_PM_ESTABLISHED;
961 schan->dma_chan.device = &sdev->dma_dev;
962 dma_cookie_init(&schan->dma_chan);
964 schan->dev = sdev->dma_dev.dev;
965 schan->id = id;
967 if (!schan->max_xfer_len)
968 schan->max_xfer_len = PAGE_SIZE;
970 spin_lock_init(&schan->chan_lock);
973 INIT_LIST_HEAD(&schan->ld_queue);
974 INIT_LIST_HEAD(&schan->ld_free);
977 list_add_tail(&schan->dma_chan.device_node,
979 sdev->schan[id] = schan;
983 void shdma_chan_remove(struct shdma_chan *schan)
985 list_del(&schan->dma_chan.device_node);
1011 sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL);
1012 if (!sdev->schan)
1039 kfree(sdev->schan);