Lines Matching refs:d40c
603 static struct device *chan2dev(struct d40_chan *d40c)
605 return &d40c->chan.dev->device;
627 #define chan_err(d40c, format, arg...) \
628 d40_err(chan2dev(d40c), format, ## arg)
634 static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
637 bool is_log = chan_is_logical(d40c);
669 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
674 if (dma_mapping_error(d40c->base->dev,
686 static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
689 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
701 static int d40_lcla_alloc_one(struct d40_chan *d40c,
708 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
715 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
717 if (!d40c->base->lcla_pool.alloc_map[idx]) {
718 d40c->base->lcla_pool.alloc_map[idx] = d40d;
725 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
730 static int d40_lcla_free_all(struct d40_chan *d40c,
737 if (chan_is_physical(d40c))
740 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
743 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
745 if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
746 d40c->base->lcla_pool.alloc_map[idx] = NULL;
755 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
766 static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
770 if (!list_empty(&d40c->client)) {
774 list_for_each_entry_safe(d, _d, &d40c->client, node) {
785 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
793 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
796 d40_pool_lli_free(d40c, d40d);
797 d40_lcla_free_all(d40c, d40d);
798 kmem_cache_free(d40c->base->desc_slab, d40d);
801 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
803 list_add_tail(&desc->node, &d40c->active);
823 static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc)
825 list_add_tail(&desc->node, &d40c->done);
940 static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
942 if (chan_is_physical(d40c)) {
943 d40_phy_lli_load(d40c, d40d);
946 d40_log_lli_to_lcxa(d40c, d40d);
949 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
951 return list_first_entry_or_null(&d40c->active, struct d40_desc, node);
955 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
959 list_add_tail(&desc->node, &d40c->pending_queue);
962 static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
964 return list_first_entry_or_null(&d40c->pending_queue, struct d40_desc,
968 static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
970 return list_first_entry_or_null(&d40c->queue, struct d40_desc, node);
973 static struct d40_desc *d40_first_done(struct d40_chan *d40c)
975 return list_first_entry_or_null(&d40c->done, struct d40_desc, node);
1038 static int __d40_execute_command_phy(struct d40_chan *d40c,
1049 ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
1054 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
1056 if (d40c->phy_chan->num % 2 == 0)
1057 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1059 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1063 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1064 D40_CHAN_POS(d40c->phy_chan->num);
1070 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
1071 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
1078 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1079 D40_CHAN_POS(d40c->phy_chan->num);
1094 chan_err(d40c,
1096 d40c->phy_chan->num, d40c->log_num,
1104 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
1108 static void d40_term_all(struct d40_chan *d40c)
1114 while ((d40d = d40_first_done(d40c))) {
1116 d40_desc_free(d40c, d40d);
1120 while ((d40d = d40_first_active_get(d40c))) {
1122 d40_desc_free(d40c, d40d);
1126 while ((d40d = d40_first_queued(d40c))) {
1128 d40_desc_free(d40c, d40d);
1132 while ((d40d = d40_first_pending(d40c))) {
1134 d40_desc_free(d40c, d40d);
1138 if (!list_empty(&d40c->client))
1139 list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
1141 d40_desc_free(d40c, d40d);
1145 if (!list_empty(&d40c->prepare_queue))
1147 &d40c->prepare_queue, node) {
1149 d40_desc_free(d40c, d40d);
1152 d40c->pending_tx = 0;
1155 static void __d40_config_set_event(struct d40_chan *d40c,
1159 void __iomem *addr = chan_base(d40c) + reg;
1199 chan_err(d40c,
1201 "status %x\n", d40c->phy_chan->num,
1202 d40c->log_num, status);
1223 dev_dbg(chan2dev(d40c),
1238 static void d40_config_set_event(struct d40_chan *d40c,
1241 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1244 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
1245 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
1246 __d40_config_set_event(d40c, event_type, event,
1249 if (d40c->dma_cfg.dir != DMA_DEV_TO_MEM)
1250 __d40_config_set_event(d40c, event_type, event,
1254 static u32 d40_chan_has_events(struct d40_chan *d40c)
1256 void __iomem *chanbase = chan_base(d40c);
1266 __d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
1273 if (d40c->phy_chan->num % 2 == 0)
1274 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1276 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1279 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
1286 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1287 D40_CHAN_POS(d40c->phy_chan->num);
1290 d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
1292 d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
1294 if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
1295 ret = __d40_execute_command_phy(d40c, command);
1301 d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
1302 ret = __d40_execute_command_phy(d40c, command);
1310 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1314 static int d40_channel_execute_command(struct d40_chan *d40c,
1317 if (chan_is_logical(d40c))
1318 return __d40_execute_command_log(d40c, command);
1320 return __d40_execute_command_phy(d40c, command);
1323 static u32 d40_get_prmo(struct d40_chan *d40c)
1342 if (chan_is_physical(d40c))
1343 return phy_map[d40c->dma_cfg.mode_opt];
1345 return log_map[d40c->dma_cfg.mode_opt];
1348 static void d40_config_write(struct d40_chan *d40c)
1354 addr_base = (d40c->phy_chan->num % 2) * 4;
1356 var = ((u32)(chan_is_logical(d40c)) + 1) <<
1357 D40_CHAN_POS(d40c->phy_chan->num);
1358 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
1361 var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
1363 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
1365 if (chan_is_logical(d40c)) {
1366 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
1368 void __iomem *chanbase = chan_base(d40c);
1371 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
1372 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
1384 static u32 d40_residue(struct d40_chan *d40c)
1388 if (chan_is_logical(d40c))
1389 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1392 u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
1397 return num_elt * d40c->dma_cfg.dst_info.data_width;
1400 static bool d40_tx_is_linked(struct d40_chan *d40c)
1404 if (chan_is_logical(d40c))
1405 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1407 is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
1415 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1419 if (d40c->phy_chan == NULL) {
1420 chan_err(d40c, "Channel is not allocated!\n");
1424 if (!d40c->busy)
1427 spin_lock_irqsave(&d40c->lock, flags);
1428 pm_runtime_get_sync(d40c->base->dev);
1430 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1432 pm_runtime_mark_last_busy(d40c->base->dev);
1433 pm_runtime_put_autosuspend(d40c->base->dev);
1434 spin_unlock_irqrestore(&d40c->lock, flags);
1440 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1444 if (d40c->phy_chan == NULL) {
1445 chan_err(d40c, "Channel is not allocated!\n");
1449 if (!d40c->busy)
1452 spin_lock_irqsave(&d40c->lock, flags);
1453 pm_runtime_get_sync(d40c->base->dev);
1456 if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1457 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1459 pm_runtime_mark_last_busy(d40c->base->dev);
1460 pm_runtime_put_autosuspend(d40c->base->dev);
1461 spin_unlock_irqrestore(&d40c->lock, flags);
1467 struct d40_chan *d40c = container_of(tx->chan,
1474 spin_lock_irqsave(&d40c->lock, flags);
1476 d40_desc_queue(d40c, d40d);
1477 spin_unlock_irqrestore(&d40c->lock, flags);
1482 static int d40_start(struct d40_chan *d40c)
1484 return d40_channel_execute_command(d40c, D40_DMA_RUN);
1487 static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1493 d40d = d40_first_queued(d40c);
1496 if (!d40c->busy) {
1497 d40c->busy = true;
1498 pm_runtime_get_sync(d40c->base->dev);
1505 d40_desc_submit(d40c, d40d);
1508 d40_desc_load(d40c, d40d);
1511 err = d40_start(d40c);
1521 static void dma_tc_handle(struct d40_chan *d40c)
1526 d40d = d40_first_active_get(d40c);
1539 && !d40_tx_is_linked(d40c)
1540 && !d40_residue(d40c)) {
1541 d40_lcla_free_all(d40c, d40d);
1542 d40_desc_load(d40c, d40d);
1543 (void) d40_start(d40c);
1549 d40_lcla_free_all(d40c, d40d);
1552 d40_desc_load(d40c, d40d);
1554 (void) d40_start(d40c);
1558 if (d40_queue_start(d40c) == NULL) {
1559 d40c->busy = false;
1561 pm_runtime_mark_last_busy(d40c->base->dev);
1562 pm_runtime_put_autosuspend(d40c->base->dev);
1566 d40_desc_done(d40c, d40d);
1569 d40c->pending_tx++;
1570 tasklet_schedule(&d40c->tasklet);
1576 struct d40_chan *d40c = from_tasklet(d40c, t, tasklet);
1582 spin_lock_irqsave(&d40c->lock, flags);
1585 d40d = d40_first_done(d40c);
1588 d40d = d40_first_active_get(d40c);
1600 if (d40c->pending_tx == 0) {
1601 spin_unlock_irqrestore(&d40c->lock, flags);
1612 d40_desc_free(d40c, d40d);
1615 d40_lcla_free_all(d40c, d40d);
1616 list_add_tail(&d40d->node, &d40c->client);
1621 d40c->pending_tx--;
1623 if (d40c->pending_tx)
1624 tasklet_schedule(&d40c->tasklet);
1626 spin_unlock_irqrestore(&d40c->lock, flags);
1634 if (d40c->pending_tx > 0)
1635 d40c->pending_tx--;
1636 spin_unlock_irqrestore(&d40c->lock, flags);
1645 struct d40_chan *d40c;
1671 d40c = base->lookup_phy_chans[idx];
1673 d40c = base->lookup_log_chans[il[row].offset + idx];
1675 if (!d40c) {
1686 spin_lock(&d40c->lock);
1689 dma_tc_handle(d40c);
1694 spin_unlock(&d40c->lock);
1702 static int d40_validate_conf(struct d40_chan *d40c,
1709 chan_err(d40c, "Invalid direction.\n");
1713 if ((is_log && conf->dev_type > d40c->base->num_log_chans) ||
1714 (!is_log && conf->dev_type > d40c->base->num_phy_chans) ||
1716 chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type);
1725 chan_err(d40c, "periph to periph not supported\n");
1738 chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
1832 static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
1834 int dev_type = d40c->dma_cfg.dev_type;
1843 bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
1845 phys = d40c->base->phy_res;
1846 num_phy_chans = d40c->base->num_phy_chans;
1848 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
1851 } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
1852 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1863 if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1865 if (d40c->dma_cfg.use_fixed_channel) {
1866 i = d40c->dma_cfg.phy_channel;
1880 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1893 d40c->phy_chan = &phys[i];
1894 d40c->log_num = D40_PHY_CHAN;
1901 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1904 if (d40c->dma_cfg.use_fixed_channel) {
1905 i = d40c->dma_cfg.phy_channel;
1908 dev_err(chan2dev(d40c),
1917 dev_err(chan2dev(d40c),
1946 d40c->phy_chan = &phys[i];
1947 d40c->log_num = log_num;
1951 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1953 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1959 static int d40_config_memcpy(struct d40_chan *d40c)
1961 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1964 d40c->dma_cfg = dma40_memcpy_conf_log;
1965 d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id];
1967 d40_log_cfg(&d40c->dma_cfg,
1968 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1972 d40c->dma_cfg = dma40_memcpy_conf_phy;
1975 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
1978 d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
1979 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
1982 chan_err(d40c, "No memcpy\n");
1989 static int d40_free_dma(struct d40_chan *d40c)
1993 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1994 struct d40_phy_res *phy = d40c->phy_chan;
1998 d40_term_all(d40c);
2001 chan_err(d40c, "phy == null\n");
2007 chan_err(d40c, "channel already free\n");
2011 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2012 d40c->dma_cfg.dir == DMA_MEM_TO_MEM)
2014 else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2017 chan_err(d40c, "Unknown direction\n");
2021 pm_runtime_get_sync(d40c->base->dev);
2022 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
2024 chan_err(d40c, "stop failed\n");
2028 d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
2030 if (chan_is_logical(d40c))
2031 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
2033 d40c->base->lookup_phy_chans[phy->num] = NULL;
2035 if (d40c->busy) {
2036 pm_runtime_mark_last_busy(d40c->base->dev);
2037 pm_runtime_put_autosuspend(d40c->base->dev);
2040 d40c->busy = false;
2041 d40c->phy_chan = NULL;
2042 d40c->configured = false;
2044 pm_runtime_mark_last_busy(d40c->base->dev);
2045 pm_runtime_put_autosuspend(d40c->base->dev);
2049 static bool d40_is_paused(struct d40_chan *d40c)
2051 void __iomem *chanbase = chan_base(d40c);
2056 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
2058 spin_lock_irqsave(&d40c->lock, flags);
2060 if (chan_is_physical(d40c)) {
2061 if (d40c->phy_chan->num % 2 == 0)
2062 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
2064 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
2067 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
2068 D40_CHAN_POS(d40c->phy_chan->num);
2074 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2075 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
2077 } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
2080 chan_err(d40c, "Unknown direction\n");
2090 spin_unlock_irqrestore(&d40c->lock, flags);
2097 struct d40_chan *d40c =
2102 spin_lock_irqsave(&d40c->lock, flags);
2103 bytes_left = d40_residue(d40c);
2104 spin_unlock_irqrestore(&d40c->lock, flags);
2275 struct d40_chan *d40c =
2280 err = d40_validate_conf(d40c, info);
2282 d40c->dma_cfg = *info;
2284 err = d40_config_memcpy(d40c);
2287 d40c->configured = true;
2293 static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
2295 bool realtime = d40c->dma_cfg.realtime;
2296 bool highprio = d40c->dma_cfg.high_priority;
2302 struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
2313 if (!src && chan_is_logical(d40c))
2322 writel(bit, d40c->base->virtbase + prioreg + group * 4);
2323 writel(bit, d40c->base->virtbase + rtreg + group * 4);
2326 static void d40_set_prio_realtime(struct d40_chan *d40c)
2328 if (d40c->base->rev < 3)
2331 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
2332 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2333 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true);
2335 if ((d40c->dma_cfg.dir == DMA_MEM_TO_DEV) ||
2336 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2337 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false);
2393 struct d40_chan *d40c =
2396 spin_lock_irqsave(&d40c->lock, flags);
2401 if (!d40c->configured) {
2402 err = d40_config_memcpy(d40c);
2404 chan_err(d40c, "Failed to configure memcpy channel\n");
2409 err = d40_allocate_channel(d40c, &is_free_phy);
2411 chan_err(d40c, "Failed to allocate channel\n");
2412 d40c->configured = false;
2416 pm_runtime_get_sync(d40c->base->dev);
2418 d40_set_prio_realtime(d40c);
2420 if (chan_is_logical(d40c)) {
2421 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2422 d40c->lcpa = d40c->base->lcpa_base +
2423 d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE;
2425 d40c->lcpa = d40c->base->lcpa_base +
2426 d40c->dma_cfg.dev_type *
2430 d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2431 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2434 dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
2435 chan_is_logical(d40c) ? "logical" : "physical",
2436 d40c->phy_chan->num,
2437 d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
2446 d40_config_write(d40c);
2448 pm_runtime_mark_last_busy(d40c->base->dev);
2449 pm_runtime_put_autosuspend(d40c->base->dev);
2450 spin_unlock_irqrestore(&d40c->lock, flags);
2456 struct d40_chan *d40c =
2461 if (d40c->phy_chan == NULL) {
2462 chan_err(d40c, "Cannot free unallocated channel\n");
2466 spin_lock_irqsave(&d40c->lock, flags);
2468 err = d40_free_dma(d40c);
2471 chan_err(d40c, "Failed to free channel\n");
2472 spin_unlock_irqrestore(&d40c->lock, flags);
2542 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2545 if (d40c->phy_chan == NULL) {
2546 chan_err(d40c, "Cannot read status of unallocated channel\n");
2554 if (d40_is_paused(d40c))
2562 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2565 if (d40c->phy_chan == NULL) {
2566 chan_err(d40c, "Channel is not allocated!\n");
2570 spin_lock_irqsave(&d40c->lock, flags);
2572 list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
2575 if (!d40c->busy)
2576 (void) d40_queue_start(d40c);
2578 spin_unlock_irqrestore(&d40c->lock, flags);
2584 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2587 if (d40c->phy_chan == NULL) {
2588 chan_err(d40c, "Channel is not allocated!\n");
2592 spin_lock_irqsave(&d40c->lock, flags);
2594 pm_runtime_get_sync(d40c->base->dev);
2595 ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
2597 chan_err(d40c, "Failed to stop channel\n");
2599 d40_term_all(d40c);
2600 pm_runtime_mark_last_busy(d40c->base->dev);
2601 pm_runtime_put_autosuspend(d40c->base->dev);
2602 if (d40c->busy) {
2603 pm_runtime_mark_last_busy(d40c->base->dev);
2604 pm_runtime_put_autosuspend(d40c->base->dev);
2606 d40c->busy = false;
2608 spin_unlock_irqrestore(&d40c->lock, flags);
2613 dma40_config_to_halfchannel(struct d40_chan *d40c,
2619 if (chan_is_logical(d40c)) {
2648 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2650 memcpy(&d40c->slave_config, config, sizeof(*config));
2660 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2661 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2667 if (d40c->phy_chan == NULL) {
2668 chan_err(d40c, "Channel is not allocated!\n");
2681 dev_dbg(d40c->base->dev,
2697 dev_dbg(d40c->base->dev,
2709 dev_err(d40c->base->dev,
2716 dev_err(d40c->base->dev, "no address supplied\n");
2721 dev_err(d40c->base->dev,
2750 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
2755 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
2761 if (chan_is_logical(d40c))
2762 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2764 d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg);
2767 d40c->runtime_addr = config_addr;
2768 d40c->runtime_direction = direction;
2769 dev_dbg(d40c->base->dev,
2787 struct d40_chan *d40c;
2792 d40c = &chans[i];
2793 d40c->base = base;
2794 d40c->chan.device = dma;
2796 spin_lock_init(&d40c->lock);
2798 d40c->log_num = D40_PHY_CHAN;
2800 INIT_LIST_HEAD(&d40c->done);
2801 INIT_LIST_HEAD(&d40c->active);
2802 INIT_LIST_HEAD(&d40c->queue);
2803 INIT_LIST_HEAD(&d40c->pending_queue);
2804 INIT_LIST_HEAD(&d40c->client);
2805 INIT_LIST_HEAD(&d40c->prepare_queue);
2807 tasklet_setup(&d40c->tasklet, dma_tasklet);
2809 list_add_tail(&d40c->chan.device_node,