Lines Matching refs:d40c
628 static struct device *chan2dev(struct d40_chan *d40c)
630 return &d40c->chan.dev->device;
652 #define chan_err(d40c, format, arg...) \
653 d40_err(chan2dev(d40c), format, ## arg)
659 static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
662 bool is_log = chan_is_logical(d40c);
694 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
699 if (dma_mapping_error(d40c->base->dev,
711 static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
714 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
726 static int d40_lcla_alloc_one(struct d40_chan *d40c,
733 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
740 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
742 if (!d40c->base->lcla_pool.alloc_map[idx]) {
743 d40c->base->lcla_pool.alloc_map[idx] = d40d;
750 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
755 static int d40_lcla_free_all(struct d40_chan *d40c,
762 if (chan_is_physical(d40c))
765 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
768 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
770 if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
771 d40c->base->lcla_pool.alloc_map[idx] = NULL;
780 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
791 static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
795 if (!list_empty(&d40c->client)) {
799 list_for_each_entry_safe(d, _d, &d40c->client, node) {
810 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
818 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
821 d40_pool_lli_free(d40c, d40d);
822 d40_lcla_free_all(d40c, d40d);
823 kmem_cache_free(d40c->base->desc_slab, d40d);
826 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
828 list_add_tail(&desc->node, &d40c->active);
848 static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc)
850 list_add_tail(&desc->node, &d40c->done);
965 static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
967 if (chan_is_physical(d40c)) {
968 d40_phy_lli_load(d40c, d40d);
971 d40_log_lli_to_lcxa(d40c, d40d);
974 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
976 return list_first_entry_or_null(&d40c->active, struct d40_desc, node);
980 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
984 list_add_tail(&desc->node, &d40c->pending_queue);
987 static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
989 return list_first_entry_or_null(&d40c->pending_queue, struct d40_desc,
993 static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
995 return list_first_entry_or_null(&d40c->queue, struct d40_desc, node);
998 static struct d40_desc *d40_first_done(struct d40_chan *d40c)
1000 return list_first_entry_or_null(&d40c->done, struct d40_desc, node);
1063 static int __d40_execute_command_phy(struct d40_chan *d40c,
1074 ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
1079 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
1081 if (d40c->phy_chan->num % 2 == 0)
1082 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1084 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1088 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1089 D40_CHAN_POS(d40c->phy_chan->num);
1095 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
1096 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
1103 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1104 D40_CHAN_POS(d40c->phy_chan->num);
1119 chan_err(d40c,
1121 d40c->phy_chan->num, d40c->log_num,
1129 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
1133 static void d40_term_all(struct d40_chan *d40c)
1139 while ((d40d = d40_first_done(d40c))) {
1141 d40_desc_free(d40c, d40d);
1145 while ((d40d = d40_first_active_get(d40c))) {
1147 d40_desc_free(d40c, d40d);
1151 while ((d40d = d40_first_queued(d40c))) {
1153 d40_desc_free(d40c, d40d);
1157 while ((d40d = d40_first_pending(d40c))) {
1159 d40_desc_free(d40c, d40d);
1163 if (!list_empty(&d40c->client))
1164 list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
1166 d40_desc_free(d40c, d40d);
1170 if (!list_empty(&d40c->prepare_queue))
1172 &d40c->prepare_queue, node) {
1174 d40_desc_free(d40c, d40d);
1177 d40c->pending_tx = 0;
1180 static void __d40_config_set_event(struct d40_chan *d40c,
1184 void __iomem *addr = chan_base(d40c) + reg;
1224 chan_err(d40c,
1226 "status %x\n", d40c->phy_chan->num,
1227 d40c->log_num, status);
1248 dev_dbg(chan2dev(d40c),
1263 static void d40_config_set_event(struct d40_chan *d40c,
1266 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1269 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
1270 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
1271 __d40_config_set_event(d40c, event_type, event,
1274 if (d40c->dma_cfg.dir != DMA_DEV_TO_MEM)
1275 __d40_config_set_event(d40c, event_type, event,
1279 static u32 d40_chan_has_events(struct d40_chan *d40c)
1281 void __iomem *chanbase = chan_base(d40c);
1291 __d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
1298 if (d40c->phy_chan->num % 2 == 0)
1299 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1301 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1304 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
1311 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1312 D40_CHAN_POS(d40c->phy_chan->num);
1315 d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
1317 d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
1319 if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
1320 ret = __d40_execute_command_phy(d40c, command);
1326 d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
1327 ret = __d40_execute_command_phy(d40c, command);
1335 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1339 static int d40_channel_execute_command(struct d40_chan *d40c,
1342 if (chan_is_logical(d40c))
1343 return __d40_execute_command_log(d40c, command);
1345 return __d40_execute_command_phy(d40c, command);
1348 static u32 d40_get_prmo(struct d40_chan *d40c)
1367 if (chan_is_physical(d40c))
1368 return phy_map[d40c->dma_cfg.mode_opt];
1370 return log_map[d40c->dma_cfg.mode_opt];
1373 static void d40_config_write(struct d40_chan *d40c)
1379 addr_base = (d40c->phy_chan->num % 2) * 4;
1381 var = ((u32)(chan_is_logical(d40c)) + 1) <<
1382 D40_CHAN_POS(d40c->phy_chan->num);
1383 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
1386 var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
1388 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
1390 if (chan_is_logical(d40c)) {
1391 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
1393 void __iomem *chanbase = chan_base(d40c);
1396 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
1397 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
1409 static u32 d40_residue(struct d40_chan *d40c)
1413 if (chan_is_logical(d40c))
1414 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1417 u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
1422 return num_elt * d40c->dma_cfg.dst_info.data_width;
1425 static bool d40_tx_is_linked(struct d40_chan *d40c)
1429 if (chan_is_logical(d40c))
1430 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1432 is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
1440 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1444 if (d40c->phy_chan == NULL) {
1445 chan_err(d40c, "Channel is not allocated!\n");
1449 if (!d40c->busy)
1452 spin_lock_irqsave(&d40c->lock, flags);
1453 pm_runtime_get_sync(d40c->base->dev);
1455 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1457 pm_runtime_mark_last_busy(d40c->base->dev);
1458 pm_runtime_put_autosuspend(d40c->base->dev);
1459 spin_unlock_irqrestore(&d40c->lock, flags);
1465 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1469 if (d40c->phy_chan == NULL) {
1470 chan_err(d40c, "Channel is not allocated!\n");
1474 if (!d40c->busy)
1477 spin_lock_irqsave(&d40c->lock, flags);
1478 pm_runtime_get_sync(d40c->base->dev);
1481 if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1482 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1484 pm_runtime_mark_last_busy(d40c->base->dev);
1485 pm_runtime_put_autosuspend(d40c->base->dev);
1486 spin_unlock_irqrestore(&d40c->lock, flags);
1492 struct d40_chan *d40c = container_of(tx->chan,
1499 spin_lock_irqsave(&d40c->lock, flags);
1501 d40_desc_queue(d40c, d40d);
1502 spin_unlock_irqrestore(&d40c->lock, flags);
1507 static int d40_start(struct d40_chan *d40c)
1509 return d40_channel_execute_command(d40c, D40_DMA_RUN);
1512 static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1518 d40d = d40_first_queued(d40c);
1521 if (!d40c->busy) {
1522 d40c->busy = true;
1523 pm_runtime_get_sync(d40c->base->dev);
1530 d40_desc_submit(d40c, d40d);
1533 d40_desc_load(d40c, d40d);
1536 err = d40_start(d40c);
1546 static void dma_tc_handle(struct d40_chan *d40c)
1551 d40d = d40_first_active_get(d40c);
1564 && !d40_tx_is_linked(d40c)
1565 && !d40_residue(d40c)) {
1566 d40_lcla_free_all(d40c, d40d);
1567 d40_desc_load(d40c, d40d);
1568 (void) d40_start(d40c);
1574 d40_lcla_free_all(d40c, d40d);
1577 d40_desc_load(d40c, d40d);
1579 (void) d40_start(d40c);
1583 if (d40_queue_start(d40c) == NULL) {
1584 d40c->busy = false;
1586 pm_runtime_mark_last_busy(d40c->base->dev);
1587 pm_runtime_put_autosuspend(d40c->base->dev);
1591 d40_desc_done(d40c, d40d);
1594 d40c->pending_tx++;
1595 tasklet_schedule(&d40c->tasklet);
1601 struct d40_chan *d40c = from_tasklet(d40c, t, tasklet);
1607 spin_lock_irqsave(&d40c->lock, flags);
1610 d40d = d40_first_done(d40c);
1613 d40d = d40_first_active_get(d40c);
1625 if (d40c->pending_tx == 0) {
1626 spin_unlock_irqrestore(&d40c->lock, flags);
1637 d40_desc_free(d40c, d40d);
1640 d40_lcla_free_all(d40c, d40d);
1641 list_add_tail(&d40d->node, &d40c->client);
1646 d40c->pending_tx--;
1648 if (d40c->pending_tx)
1649 tasklet_schedule(&d40c->tasklet);
1651 spin_unlock_irqrestore(&d40c->lock, flags);
1659 if (d40c->pending_tx > 0)
1660 d40c->pending_tx--;
1661 spin_unlock_irqrestore(&d40c->lock, flags);
1670 struct d40_chan *d40c;
1695 d40c = base->lookup_phy_chans[idx];
1697 d40c = base->lookup_log_chans[il[row].offset + idx];
1699 if (!d40c) {
1710 spin_lock(&d40c->lock);
1713 dma_tc_handle(d40c);
1718 spin_unlock(&d40c->lock);
1726 static int d40_validate_conf(struct d40_chan *d40c,
1733 chan_err(d40c, "Invalid direction.\n");
1737 if ((is_log && conf->dev_type > d40c->base->num_log_chans) ||
1738 (!is_log && conf->dev_type > d40c->base->num_phy_chans) ||
1740 chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type);
1749 chan_err(d40c, "periph to periph not supported\n");
1762 chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
1856 static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
1858 int dev_type = d40c->dma_cfg.dev_type;
1867 bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
1869 phys = d40c->base->phy_res;
1870 num_phy_chans = d40c->base->num_phy_chans;
1872 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
1875 } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
1876 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1887 if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1889 if (d40c->dma_cfg.use_fixed_channel) {
1890 i = d40c->dma_cfg.phy_channel;
1904 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1917 d40c->phy_chan = &phys[i];
1918 d40c->log_num = D40_PHY_CHAN;
1925 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1928 if (d40c->dma_cfg.use_fixed_channel) {
1929 i = d40c->dma_cfg.phy_channel;
1932 dev_err(chan2dev(d40c),
1941 dev_err(chan2dev(d40c),
1970 d40c->phy_chan = &phys[i];
1971 d40c->log_num = log_num;
1975 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1977 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1983 static int d40_config_memcpy(struct d40_chan *d40c)
1985 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1988 d40c->dma_cfg = dma40_memcpy_conf_log;
1989 d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id];
1991 d40_log_cfg(&d40c->dma_cfg,
1992 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1996 d40c->dma_cfg = dma40_memcpy_conf_phy;
1999 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
2002 d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
2003 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
2006 chan_err(d40c, "No memcpy\n");
2013 static int d40_free_dma(struct d40_chan *d40c)
2017 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
2018 struct d40_phy_res *phy = d40c->phy_chan;
2022 d40_term_all(d40c);
2025 chan_err(d40c, "phy == null\n");
2031 chan_err(d40c, "channel already free\n");
2035 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2036 d40c->dma_cfg.dir == DMA_MEM_TO_MEM)
2038 else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2041 chan_err(d40c, "Unknown direction\n");
2045 pm_runtime_get_sync(d40c->base->dev);
2046 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
2048 chan_err(d40c, "stop failed\n");
2052 d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
2054 if (chan_is_logical(d40c))
2055 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
2057 d40c->base->lookup_phy_chans[phy->num] = NULL;
2059 if (d40c->busy) {
2060 pm_runtime_mark_last_busy(d40c->base->dev);
2061 pm_runtime_put_autosuspend(d40c->base->dev);
2064 d40c->busy = false;
2065 d40c->phy_chan = NULL;
2066 d40c->configured = false;
2068 pm_runtime_mark_last_busy(d40c->base->dev);
2069 pm_runtime_put_autosuspend(d40c->base->dev);
2073 static bool d40_is_paused(struct d40_chan *d40c)
2075 void __iomem *chanbase = chan_base(d40c);
2080 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
2082 spin_lock_irqsave(&d40c->lock, flags);
2084 if (chan_is_physical(d40c)) {
2085 if (d40c->phy_chan->num % 2 == 0)
2086 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
2088 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
2091 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
2092 D40_CHAN_POS(d40c->phy_chan->num);
2098 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2099 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
2101 } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
2104 chan_err(d40c, "Unknown direction\n");
2114 spin_unlock_irqrestore(&d40c->lock, flags);
2121 struct d40_chan *d40c =
2126 spin_lock_irqsave(&d40c->lock, flags);
2127 bytes_left = d40_residue(d40c);
2128 spin_unlock_irqrestore(&d40c->lock, flags);
2299 struct d40_chan *d40c =
2304 err = d40_validate_conf(d40c, info);
2306 d40c->dma_cfg = *info;
2308 err = d40_config_memcpy(d40c);
2311 d40c->configured = true;
2316 static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
2318 bool realtime = d40c->dma_cfg.realtime;
2319 bool highprio = d40c->dma_cfg.high_priority;
2325 struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
2336 if (!src && chan_is_logical(d40c))
2345 writel(bit, d40c->base->virtbase + prioreg + group * 4);
2346 writel(bit, d40c->base->virtbase + rtreg + group * 4);
2349 static void d40_set_prio_realtime(struct d40_chan *d40c)
2351 if (d40c->base->rev < 3)
2354 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
2355 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2356 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true);
2358 if ((d40c->dma_cfg.dir == DMA_MEM_TO_DEV) ||
2359 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2360 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false);
2416 struct d40_chan *d40c =
2419 spin_lock_irqsave(&d40c->lock, flags);
2424 if (!d40c->configured) {
2425 err = d40_config_memcpy(d40c);
2427 chan_err(d40c, "Failed to configure memcpy channel\n");
2432 err = d40_allocate_channel(d40c, &is_free_phy);
2434 chan_err(d40c, "Failed to allocate channel\n");
2435 d40c->configured = false;
2439 pm_runtime_get_sync(d40c->base->dev);
2441 d40_set_prio_realtime(d40c);
2443 if (chan_is_logical(d40c)) {
2444 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2445 d40c->lcpa = d40c->base->lcpa_base +
2446 d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE;
2448 d40c->lcpa = d40c->base->lcpa_base +
2449 d40c->dma_cfg.dev_type *
2453 d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2454 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2457 dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
2458 chan_is_logical(d40c) ? "logical" : "physical",
2459 d40c->phy_chan->num,
2460 d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
2469 d40_config_write(d40c);
2471 pm_runtime_mark_last_busy(d40c->base->dev);
2472 pm_runtime_put_autosuspend(d40c->base->dev);
2473 spin_unlock_irqrestore(&d40c->lock, flags);
2479 struct d40_chan *d40c =
2484 if (d40c->phy_chan == NULL) {
2485 chan_err(d40c, "Cannot free unallocated channel\n");
2489 spin_lock_irqsave(&d40c->lock, flags);
2491 err = d40_free_dma(d40c);
2494 chan_err(d40c, "Failed to free channel\n");
2495 spin_unlock_irqrestore(&d40c->lock, flags);
2565 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2568 if (d40c->phy_chan == NULL) {
2569 chan_err(d40c, "Cannot read status of unallocated channel\n");
2577 if (d40_is_paused(d40c))
2585 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2588 if (d40c->phy_chan == NULL) {
2589 chan_err(d40c, "Channel is not allocated!\n");
2593 spin_lock_irqsave(&d40c->lock, flags);
2595 list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
2598 if (!d40c->busy)
2599 (void) d40_queue_start(d40c);
2601 spin_unlock_irqrestore(&d40c->lock, flags);
2607 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2610 if (d40c->phy_chan == NULL) {
2611 chan_err(d40c, "Channel is not allocated!\n");
2615 spin_lock_irqsave(&d40c->lock, flags);
2617 pm_runtime_get_sync(d40c->base->dev);
2618 ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
2620 chan_err(d40c, "Failed to stop channel\n");
2622 d40_term_all(d40c);
2623 pm_runtime_mark_last_busy(d40c->base->dev);
2624 pm_runtime_put_autosuspend(d40c->base->dev);
2625 if (d40c->busy) {
2626 pm_runtime_mark_last_busy(d40c->base->dev);
2627 pm_runtime_put_autosuspend(d40c->base->dev);
2629 d40c->busy = false;
2631 spin_unlock_irqrestore(&d40c->lock, flags);
2636 dma40_config_to_halfchannel(struct d40_chan *d40c,
2642 if (chan_is_logical(d40c)) {
2671 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2673 memcpy(&d40c->slave_config, config, sizeof(*config));
2683 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2684 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2690 if (d40c->phy_chan == NULL) {
2691 chan_err(d40c, "Channel is not allocated!\n");
2704 dev_dbg(d40c->base->dev,
2720 dev_dbg(d40c->base->dev,
2732 dev_err(d40c->base->dev,
2739 dev_err(d40c->base->dev, "no address supplied\n");
2744 dev_err(d40c->base->dev,
2773 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
2778 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
2784 if (chan_is_logical(d40c))
2785 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2787 d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg);
2790 d40c->runtime_addr = config_addr;
2791 d40c->runtime_direction = direction;
2792 dev_dbg(d40c->base->dev,
2810 struct d40_chan *d40c;
2815 d40c = &chans[i];
2816 d40c->base = base;
2817 d40c->chan.device = dma;
2819 spin_lock_init(&d40c->lock);
2821 d40c->log_num = D40_PHY_CHAN;
2823 INIT_LIST_HEAD(&d40c->done);
2824 INIT_LIST_HEAD(&d40c->active);
2825 INIT_LIST_HEAD(&d40c->queue);
2826 INIT_LIST_HEAD(&d40c->pending_queue);
2827 INIT_LIST_HEAD(&d40c->client);
2828 INIT_LIST_HEAD(&d40c->prepare_queue);
2830 tasklet_setup(&d40c->tasklet, dma_tasklet);
2832 list_add_tail(&d40c->chan.device_node,