Lines Matching refs:chan
66 axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val)
68 iowrite32(val, chan->chan_regs + reg);
71 static inline u32 axi_chan_ioread32(struct axi_dma_chan *chan, u32 reg)
73 return ioread32(chan->chan_regs + reg);
77 axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val)
83 iowrite32(lower_32_bits(val), chan->chan_regs + reg);
84 iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4);
87 static inline void axi_chan_config_write(struct axi_dma_chan *chan,
94 if (chan->chip->dw->hdata->reg_map_8_channels &&
95 !chan->chip->dw->hdata->use_cfg2) {
110 axi_chan_iowrite32(chan, CH_CFG_L, cfg_lo);
111 axi_chan_iowrite32(chan, CH_CFG_H, cfg_hi);
150 static inline void axi_chan_irq_disable(struct axi_dma_chan *chan, u32 irq_mask)
155 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, DWAXIDMAC_IRQ_NONE);
157 val = axi_chan_ioread32(chan, CH_INTSTATUS_ENA);
159 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val);
163 static inline void axi_chan_irq_set(struct axi_dma_chan *chan, u32 irq_mask)
165 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, irq_mask);
168 static inline void axi_chan_irq_sig_set(struct axi_dma_chan *chan, u32 irq_mask)
170 axi_chan_iowrite32(chan, CH_INTSIGNAL_ENA, irq_mask);
173 static inline void axi_chan_irq_clear(struct axi_dma_chan *chan, u32 irq_mask)
175 axi_chan_iowrite32(chan, CH_INTCLEAR, irq_mask);
178 static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan)
180 return axi_chan_ioread32(chan, CH_INTSTATUS);
183 static inline void axi_chan_disable(struct axi_dma_chan *chan)
187 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
188 val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
189 if (chan->chip->dw->hdata->reg_map_8_channels)
190 val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
192 val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
193 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
196 static inline void axi_chan_enable(struct axi_dma_chan *chan)
200 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
201 if (chan->chip->dw->hdata->reg_map_8_channels)
202 val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
203 BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
205 val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
206 BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
207 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
210 static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan)
214 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
216 return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT));
225 axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
226 axi_chan_disable(&chip->dw->chan[i]);
233 static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src,
236 u32 max_width = chan->chip->dw->hdata->m_data_width;
241 static inline const char *axi_chan_name(struct axi_dma_chan *chan)
243 return dma_chan_name(&chan->vc.chan);
263 static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan,
269 lli = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys);
271 dev_err(chan2dev(chan), "%s: not enough descriptors available\n",
272 axi_chan_name(chan));
276 atomic_inc(&chan->descs_allocated);
284 struct axi_dma_chan *chan = desc->chan;
285 int count = atomic_read(&chan->descs_allocated);
291 dma_pool_free(chan->desc_pool, hw_desc->lli, hw_desc->llp);
296 atomic_sub(descs_put, &chan->descs_allocated);
297 dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n",
298 axi_chan_name(chan), descs_put,
299 atomic_read(&chan->descs_allocated));
311 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
325 spin_lock_irqsave(&chan->vc.lock, flags);
327 vdesc = vchan_find_desc(&chan->vc, cookie);
336 spin_unlock_irqrestore(&chan->vc.lock, flags);
347 static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)
349 axi_chan_iowrite64(chan, CH_LLP, adr);
352 static void dw_axi_dma_set_byte_halfword(struct axi_dma_chan *chan, bool set)
357 if (!chan->chip->apb_regs) {
358 dev_dbg(chan->chip->dev, "apb_regs not initialized\n");
362 reg_width = __ffs(chan->config.dst_addr_width);
366 val = ioread32(chan->chip->apb_regs + offset);
369 val |= BIT(chan->id);
371 val &= ~BIT(chan->id);
373 iowrite32(val, chan->chip->apb_regs + offset);
375 /* Called in chan locked context */
376 static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
379 u32 priority = chan->chip->dw->hdata->priority[chan->id];
384 if (unlikely(axi_chan_is_hw_enable(chan))) {
385 dev_err(chan2dev(chan), "%s is non-idle!\n",
386 axi_chan_name(chan));
391 axi_dma_enable(chan->chip);
399 switch (chan->direction) {
401 dw_axi_dma_set_byte_halfword(chan, true);
402 config.tt_fc = chan->config.device_fc ?
405 if (chan->chip->apb_regs)
406 config.dst_per = chan->id;
408 config.dst_per = chan->hw_handshake_num;
411 config.tt_fc = chan->config.device_fc ?
414 if (chan->chip->apb_regs)
415 config.src_per = chan->id;
417 config.src_per = chan->hw_handshake_num;
422 axi_chan_config_write(chan, &config);
424 write_chan_llp(chan, first->hw_desc[0].llp | lms);
427 axi_chan_irq_sig_set(chan, irq_mask);
431 axi_chan_irq_set(chan, irq_mask);
433 axi_chan_enable(chan);
436 static void axi_chan_start_first_queued(struct axi_dma_chan *chan)
441 vd = vchan_next_desc(&chan->vc);
446 dev_vdbg(chan2dev(chan), "%s: started %u\n", axi_chan_name(chan),
448 axi_chan_block_xfer_start(chan, desc);
453 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
456 spin_lock_irqsave(&chan->vc.lock, flags);
457 if (vchan_issue_pending(&chan->vc))
458 axi_chan_start_first_queued(chan);
459 spin_unlock_irqrestore(&chan->vc.lock, flags);
464 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
466 vchan_synchronize(&chan->vc);
471 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
474 if (axi_chan_is_hw_enable(chan)) {
475 dev_err(chan2dev(chan), "%s is non-idle!\n",
476 axi_chan_name(chan));
481 chan->desc_pool = dma_pool_create(dev_name(chan2dev(chan)),
482 chan->chip->dev,
485 if (!chan->desc_pool) {
486 dev_err(chan2dev(chan), "No memory for descriptors\n");
489 dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan));
491 pm_runtime_get(chan->chip->dev);
498 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
501 if (axi_chan_is_hw_enable(chan))
503 axi_chan_name(chan));
505 axi_chan_disable(chan);
506 axi_chan_irq_disable(chan, DWAXIDMAC_IRQ_ALL);
508 vchan_free_chan_resources(&chan->vc);
510 dma_pool_destroy(chan->desc_pool);
511 chan->desc_pool = NULL;
514 axi_chan_name(chan), atomic_read(&chan->descs_allocated));
516 pm_runtime_put(chan->chip->dev);
519 static void dw_axi_dma_set_hw_channel(struct axi_dma_chan *chan, bool set)
521 struct axi_dma_chip *chip = chan->chip;
535 val = chan->hw_handshake_num;
545 (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
546 reg_value |= (val << (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
594 if (desc->chan->chip->dw->hdata->nr_masters > 1)
602 static int dw_axi_dma_set_hw_desc(struct axi_dma_chan *chan,
606 unsigned int data_width = BIT(chan->chip->dw->hdata->m_data_width);
615 axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
622 dev_err(chan->chip->dev, "invalid buffer alignment\n");
626 switch (chan->direction) {
628 reg_width = __ffs(chan->config.dst_addr_width);
629 device_addr = chan->config.dst_addr;
637 reg_width = __ffs(chan->config.src_addr_width);
638 device_addr = chan->config.src_addr;
652 hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
658 if (chan->chip->dw->hdata->restrict_axi_burst_len) {
659 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
667 if (chan->direction == DMA_MEM_TO_DEV) {
687 static size_t calculate_block_len(struct axi_dma_chan *chan,
694 axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
698 data_width = BIT(chan->chip->dw->hdata->m_data_width);
706 reg_width = __ffs(chan->config.src_addr_width);
722 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
737 axi_block_len = calculate_block_len(chan, dma_addr, buf_len, direction);
750 chan->direction = direction;
751 desc->chan = chan;
752 chan->cyclic = true;
759 status = dw_axi_dma_set_hw_desc(chan, hw_desc, src_addr,
782 dw_axi_dma_set_hw_channel(chan, true);
784 return vchan_tx_prep(&chan->vc, &desc->vd, flags);
799 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
819 axi_block_len = calculate_block_len(chan, mem, len, direction);
830 desc->chan = chan;
832 chan->direction = direction;
842 status = dw_axi_dma_set_hw_desc(chan, hw_desc, mem, segment_len);
862 dw_axi_dma_set_hw_channel(chan, true);
864 return vchan_tx_prep(&chan->vc, &desc->vd, flags);
877 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
885 dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx",
886 axi_chan_name(chan), &src_adr, &dst_adr, len, flags);
888 max_block_ts = chan->chip->dw->hdata->block_size[chan->id];
889 xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, len);
895 desc->chan = chan;
907 xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, xfer_len);
920 hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
929 if (chan->chip->dw->hdata->restrict_axi_burst_len) {
930 u32 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
968 return vchan_tx_prep(&chan->vc, &desc->vd, flags);
979 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
981 memcpy(&chan->config, config, sizeof(*config));
986 static void axi_chan_dump_lli(struct axi_dma_chan *chan,
990 dev_err(dchan2dev(&chan->vc.chan), "NULL LLI\n");
994 dev_err(dchan2dev(&chan->vc.chan),
1004 static void axi_chan_list_dump_lli(struct axi_dma_chan *chan,
1007 int count = atomic_read(&chan->descs_allocated);
1011 axi_chan_dump_lli(chan, &desc_head->hw_desc[i]);
1014 static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
1019 spin_lock_irqsave(&chan->vc.lock, flags);
1021 axi_chan_disable(chan);
1024 vd = vchan_next_desc(&chan->vc);
1026 dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
1027 axi_chan_name(chan));
1034 dev_err(chan2dev(chan),
1036 axi_chan_name(chan), vd->tx.cookie, status);
1037 axi_chan_list_dump_lli(chan, vd_to_axi_desc(vd));
1042 axi_chan_start_first_queued(chan);
1045 spin_unlock_irqrestore(&chan->vc.lock, flags);
1048 static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
1050 int count = atomic_read(&chan->descs_allocated);
1058 spin_lock_irqsave(&chan->vc.lock, flags);
1059 if (unlikely(axi_chan_is_hw_enable(chan))) {
1060 dev_err(chan2dev(chan), "BUG: %s caught DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n",
1061 axi_chan_name(chan));
1062 axi_chan_disable(chan);
1066 vd = vchan_next_desc(&chan->vc);
1068 dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
1069 axi_chan_name(chan));
1073 if (chan->cyclic) {
1076 llp = lo_hi_readq(chan->chan_regs + CH_LLP);
1080 axi_chan_irq_clear(chan, hw_desc->lli->status_lo);
1090 axi_chan_enable(chan);
1098 axi_chan_start_first_queued(chan);
1102 spin_unlock_irqrestore(&chan->vc.lock, flags);
1109 struct axi_dma_chan *chan;
1118 chan = &dw->chan[i];
1119 status = axi_chan_irq_read(chan);
1120 axi_chan_irq_clear(chan, status);
1123 axi_chan_name(chan), i, status);
1126 axi_chan_handle_err(chan, status);
1128 axi_chan_block_xfer_complete(chan);
1139 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1140 u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT;
1146 axi_chan_disable(chan);
1148 ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,
1152 "%s failed to stop\n", axi_chan_name(chan));
1154 if (chan->direction != DMA_MEM_TO_MEM)
1155 dw_axi_dma_set_hw_channel(chan, false);
1156 if (chan->direction == DMA_MEM_TO_DEV)
1157 dw_axi_dma_set_byte_halfword(chan, false);
1159 spin_lock_irqsave(&chan->vc.lock, flags);
1161 vchan_get_all_descriptors(&chan->vc, &head);
1163 chan->cyclic = false;
1164 spin_unlock_irqrestore(&chan->vc.lock, flags);
1166 vchan_dma_desc_free_list(&chan->vc, &head);
1168 dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
1175 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1180 spin_lock_irqsave(&chan->vc.lock, flags);
1182 if (chan->chip->dw->hdata->reg_map_8_channels) {
1183 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
1184 val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |
1185 BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
1186 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
1188 val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
1189 val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
1190 BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
1191 axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);
1195 if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED)
1201 axi_chan_irq_clear(chan, DWAXIDMAC_IRQ_SUSPENDED);
1203 chan->is_paused = true;
1205 spin_unlock_irqrestore(&chan->vc.lock, flags);
1210 /* Called in chan locked context */
1211 static inline void axi_chan_resume(struct axi_dma_chan *chan)
1215 if (chan->chip->dw->hdata->reg_map_8_channels) {
1216 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
1217 val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
1218 val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
1219 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
1221 val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
1222 val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);
1223 val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);
1224 axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);
1227 chan->is_paused = false;
1232 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1235 spin_lock_irqsave(&chan->vc.lock, flags);
1237 if (chan->is_paused)
1238 axi_chan_resume(chan);
1240 spin_unlock_irqrestore(&chan->vc.lock, flags);
1292 struct axi_dma_chan *chan;
1299 chan = dchan_to_axi_dma_chan(dchan);
1300 chan->hw_handshake_num = dma_spec->args[0];
1439 dw->chan = devm_kcalloc(chip->dev, hdata->nr_channels,
1440 sizeof(*dw->chan), GFP_KERNEL);
1441 if (!dw->chan)
1451 struct axi_dma_chan *chan = &dw->chan[i];
1453 chan->chip = chip;
1454 chan->id = i;
1455 chan->chan_regs = chip->regs + COMMON_REG_LEN + i * CHAN_REG_LEN;
1456 atomic_set(&chan->descs_allocated, 0);
1458 chan->vc.desc_free = vchan_desc_put;
1459 vchan_init(&chan->vc, &dw->dma);
1542 struct axi_dma_chan *chan, *_chan;
1550 axi_chan_disable(&chip->dw->chan[i]);
1551 axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
1562 list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
1563 vc.chan.device_node) {
1564 list_del(&chan->vc.chan.device_node);
1565 tasklet_kill(&chan->vc.task);