Lines Matching refs:base
327 * @base: Pointer to memory area when the pre_alloc_lli's are not large
331 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
336 void *base;
347 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
383 * @base: The virtual address of LCLA. 18 bit aligned.
393 void *base;
448 * @base: Pointer to the device instance struct.
475 struct d40_base *base;
525 * @virtbase: The virtual base address of the DMA's register.
620 return chan->base->virtbase + D40_DREG_PCBASE +
639 void *base;
647 base = d40d->lli_pool.pre_alloc_lli;
649 d40d->lli_pool.base = NULL;
653 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
654 d40d->lli_pool.base = base;
656 if (d40d->lli_pool.base == NULL)
661 d40d->lli_log.src = PTR_ALIGN(base, align);
666 d40d->lli_phy.src = PTR_ALIGN(base, align);
669 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
674 if (dma_mapping_error(d40c->base->dev,
676 kfree(d40d->lli_pool.base);
677 d40d->lli_pool.base = NULL;
689 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
692 kfree(d40d->lli_pool.base);
693 d40d->lli_pool.base = NULL;
708 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
717 if (!d40c->base->lcla_pool.alloc_map[idx]) {
718 d40c->base->lcla_pool.alloc_map[idx] = d40d;
725 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
740 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
745 if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
746 d40c->base->lcla_pool.alloc_map[idx] = NULL;
755 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
785 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
798 kmem_cache_free(d40c->base->desc_slab, d40d);
810 void __iomem *base = chan_base(chan);
812 writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
813 writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
814 writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
815 writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
817 writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
818 writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
819 writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
820 writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
830 struct d40_lcla_pool *pool = &chan->base->lcla_pool;
837 bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
890 struct d40_log_lli *lcla = pool->base + lcla_offset;
924 dma_sync_single_range_for_device(chan->base->dev,
1054 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
1057 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1059 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1104 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
1274 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1276 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1358 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
1363 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
1428 pm_runtime_get_sync(d40c->base->dev);
1432 pm_runtime_mark_last_busy(d40c->base->dev);
1433 pm_runtime_put_autosuspend(d40c->base->dev);
1453 pm_runtime_get_sync(d40c->base->dev);
1459 pm_runtime_mark_last_busy(d40c->base->dev);
1460 pm_runtime_put_autosuspend(d40c->base->dev);
1498 pm_runtime_get_sync(d40c->base->dev);
1561 pm_runtime_mark_last_busy(d40c->base->dev);
1562 pm_runtime_put_autosuspend(d40c->base->dev);
1647 struct d40_base *base = data;
1648 u32 *regs = base->regs_interrupt;
1649 struct d40_interrupt_lookup *il = base->gen_dmac.il;
1650 u32 il_size = base->gen_dmac.il_size;
1652 spin_lock_irqsave(&base->interrupt_lock, flags);
1656 regs[i] = readl(base->virtbase + il[i].src);
1671 d40c = base->lookup_phy_chans[idx];
1673 d40c = base->lookup_log_chans[il[row].offset + idx];
1684 writel(BIT(idx), base->virtbase + il[row].clr);
1691 d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1697 spin_unlock_irqrestore(&base->interrupt_lock, flags);
1713 if ((is_log && conf->dev_type > d40c->base->num_log_chans) ||
1714 (!is_log && conf->dev_type > d40c->base->num_phy_chans) ||
1845 phys = d40c->base->phy_res;
1846 num_phy_chans = d40c->base->num_phy_chans;
1880 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1901 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1951 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1953 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
2021 pm_runtime_get_sync(d40c->base->dev);
2031 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
2033 d40c->base->lookup_phy_chans[phy->num] = NULL;
2036 pm_runtime_mark_last_busy(d40c->base->dev);
2037 pm_runtime_put_autosuspend(d40c->base->dev);
2044 pm_runtime_mark_last_busy(d40c->base->dev);
2045 pm_runtime_put_autosuspend(d40c->base->dev);
2062 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
2064 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
2164 dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
2302 struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
2322 writel(bit, d40c->base->virtbase + prioreg + group * 4);
2323 writel(bit, d40c->base->virtbase + rtreg + group * 4);
2328 if (d40c->base->rev < 3)
2416 pm_runtime_get_sync(d40c->base->dev);
2422 d40c->lcpa = d40c->base->lcpa_base +
2425 d40c->lcpa = d40c->base->lcpa_base +
2448 pm_runtime_mark_last_busy(d40c->base->dev);
2449 pm_runtime_put_autosuspend(d40c->base->dev);
2594 pm_runtime_get_sync(d40c->base->dev);
2600 pm_runtime_mark_last_busy(d40c->base->dev);
2601 pm_runtime_put_autosuspend(d40c->base->dev);
2603 pm_runtime_mark_last_busy(d40c->base->dev);
2604 pm_runtime_put_autosuspend(d40c->base->dev);
2681 dev_dbg(d40c->base->dev,
2697 dev_dbg(d40c->base->dev,
2709 dev_err(d40c->base->dev,
2716 dev_err(d40c->base->dev, "no address supplied\n");
2721 dev_err(d40c->base->dev,
2769 dev_dbg(d40c->base->dev,
2782 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2793 d40c->base = base;
2814 static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2843 dev->dev = base->dev;
2846 static int __init d40_dmaengine_init(struct d40_base *base,
2851 d40_chan_init(base, &base->dma_slave, base->log_chans,
2852 0, base->num_log_chans);
2854 dma_cap_zero(base->dma_slave.cap_mask);
2855 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2856 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2858 d40_ops_init(base, &base->dma_slave);
2860 err = dmaenginem_async_device_register(&base->dma_slave);
2863 d40_err(base->dev, "Failed to register slave channels\n");
2867 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2868 base->num_log_chans, base->num_memcpy_chans);
2870 dma_cap_zero(base->dma_memcpy.cap_mask);
2871 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2873 d40_ops_init(base, &base->dma_memcpy);
2875 err = dmaenginem_async_device_register(&base->dma_memcpy);
2878 d40_err(base->dev,
2883 d40_chan_init(base, &base->dma_both, base->phy_chans,
2886 dma_cap_zero(base->dma_both.cap_mask);
2887 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2888 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2889 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2891 d40_ops_init(base, &base->dma_both);
2892 err = dmaenginem_async_device_register(&base->dma_both);
2895 d40_err(base->dev,
2908 struct d40_base *base = dev_get_drvdata(dev);
2915 if (base->lcpa_regulator)
2916 ret = regulator_disable(base->lcpa_regulator);
2922 struct d40_base *base = dev_get_drvdata(dev);
2925 if (base->lcpa_regulator) {
2926 ret = regulator_enable(base->lcpa_regulator);
2951 static void d40_save_restore_registers(struct d40_base *base, bool save)
2956 for (i = 0; i < base->num_phy_chans; i++) {
2960 if (base->phy_res[i].reserved)
2963 addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
2966 dma40_backup(addr, &base->reg_val_backup_chan[idx],
2973 dma40_backup(base->virtbase, base->reg_val_backup,
2978 if (base->gen_dmac.backup)
2979 dma40_backup(base->virtbase, base->reg_val_backup_v4,
2980 base->gen_dmac.backup,
2981 base->gen_dmac.backup_size,
2987 struct d40_base *base = dev_get_drvdata(dev);
2989 d40_save_restore_registers(base, true);
2992 if (base->rev != 1)
2993 writel_relaxed(base->gcc_pwr_off_mask,
2994 base->virtbase + D40_DREG_GCC);
3001 struct d40_base *base = dev_get_drvdata(dev);
3003 d40_save_restore_registers(base, false);
3006 base->virtbase + D40_DREG_GCC);
3020 static int __init d40_phy_res_init(struct d40_base *base)
3028 val[0] = readl(base->virtbase + D40_DREG_PRSME);
3029 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
3031 for (i = 0; i < base->num_phy_chans; i++) {
3032 base->phy_res[i].num = i;
3036 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
3037 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
3038 base->phy_res[i].reserved = true;
3046 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
3047 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
3048 base->phy_res[i].reserved = false;
3051 spin_lock_init(&base->phy_res[i].lock);
3055 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
3056 int chan = base->plat_data->disabled_channels[i];
3058 base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
3059 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
3060 base->phy_res[chan].reserved = true;
3069 for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) {
3070 int chan = base->plat_data->soft_lli_chans[i];
3072 base->phy_res[chan].use_soft_lli = true;
3075 dev_info(base->dev, "%d of %d physical DMA channels available\n",
3076 num_phy_chans_avail, base->num_phy_chans);
3079 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
3081 for (i = 0; i < base->num_phy_chans; i++) {
3083 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
3085 dev_info(base->dev,
3098 writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3099 base->gcc_pwr_off_mask = gcc;
3110 struct d40_base *base;
3132 /* Get IO for DMAC base address */
3133 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
3138 D40_NAME " I/O base") == NULL)
3196 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
3200 if (base == NULL)
3203 base->rev = rev;
3204 base->clk = clk;
3205 base->num_memcpy_chans = num_memcpy_chans;
3206 base->num_phy_chans = num_phy_chans;
3207 base->num_log_chans = num_log_chans;
3208 base->phy_start = res->start;
3209 base->phy_size = resource_size(res);
3210 base->virtbase = virtbase;
3211 base->plat_data = plat_data;
3212 base->dev = &pdev->dev;
3213 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
3214 base->log_chans = &base->phy_chans[num_phy_chans];
3216 if (base->plat_data->num_of_phy_chans == 14) {
3217 base->gen_dmac.backup = d40_backup_regs_v4b;
3218 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B;
3219 base->gen_dmac.interrupt_en = D40_DREG_CPCMIS;
3220 base->gen_dmac.interrupt_clear = D40_DREG_CPCICR;
3221 base->gen_dmac.realtime_en = D40_DREG_CRSEG1;
3222 base->gen_dmac.realtime_clear = D40_DREG_CRCEG1;
3223 base->gen_dmac.high_prio_en = D40_DREG_CPSEG1;
3224 base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1;
3225 base->gen_dmac.il = il_v4b;
3226 base->gen_dmac.il_size = ARRAY_SIZE(il_v4b);
3227 base->gen_dmac.init_reg = dma_init_reg_v4b;
3228 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b);
3230 if (base->rev >= 3) {
3231 base->gen_dmac.backup = d40_backup_regs_v4a;
3232 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A;
3234 base->gen_dmac.interrupt_en = D40_DREG_PCMIS;
3235 base->gen_dmac.interrupt_clear = D40_DREG_PCICR;
3236 base->gen_dmac.realtime_en = D40_DREG_RSEG1;
3237 base->gen_dmac.realtime_clear = D40_DREG_RCEG1;
3238 base->gen_dmac.high_prio_en = D40_DREG_PSEG1;
3239 base->gen_dmac.high_prio_clear = D40_DREG_PCEG1;
3240 base->gen_dmac.il = il_v4a;
3241 base->gen_dmac.il_size = ARRAY_SIZE(il_v4a);
3242 base->gen_dmac.init_reg = dma_init_reg_v4a;
3243 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
3246 base->phy_res = kcalloc(num_phy_chans,
3247 sizeof(*base->phy_res),
3249 if (!base->phy_res)
3252 base->lookup_phy_chans = kcalloc(num_phy_chans,
3253 sizeof(*base->lookup_phy_chans),
3255 if (!base->lookup_phy_chans)
3258 base->lookup_log_chans = kcalloc(num_log_chans,
3259 sizeof(*base->lookup_log_chans),
3261 if (!base->lookup_log_chans)
3264 base->reg_val_backup_chan = kmalloc_array(base->num_phy_chans,
3267 if (!base->reg_val_backup_chan)
3270 base->lcla_pool.alloc_map = kcalloc(num_phy_chans
3272 sizeof(*base->lcla_pool.alloc_map),
3274 if (!base->lcla_pool.alloc_map)
3277 base->regs_interrupt = kmalloc_array(base->gen_dmac.il_size,
3278 sizeof(*base->regs_interrupt),
3280 if (!base->regs_interrupt)
3283 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
3286 if (base->desc_slab == NULL)
3290 return base;
3292 kfree(base->regs_interrupt);
3294 kfree(base->lcla_pool.alloc_map);
3296 kfree(base->reg_val_backup_chan);
3298 kfree(base->lookup_log_chans);
3300 kfree(base->lookup_phy_chans);
3302 kfree(base->phy_res);
3304 kfree(base);
3318 static void __init d40_hw_init(struct d40_base *base)
3326 struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg;
3327 u32 reg_size = base->gen_dmac.init_reg_size;
3331 base->virtbase + dma_init_reg[i].reg);
3334 for (i = 0; i < base->num_phy_chans; i++) {
3338 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
3356 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
3357 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
3358 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
3359 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
3362 writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en);
3365 writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear);
3368 base->gen_dmac.init_reg = NULL;
3369 base->gen_dmac.init_reg_size = 0;
3372 static int __init d40_lcla_allocate(struct d40_base *base)
3374 struct d40_lcla_pool *pool = &base->lcla_pool;
3391 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
3395 base->lcla_pool.pages);
3398 d40_err(base->dev, "Failed to allocate %d pages.\n",
3399 base->lcla_pool.pages);
3403 free_pages(page_list[j], base->lcla_pool.pages);
3413 free_pages(page_list[j], base->lcla_pool.pages);
3416 base->lcla_pool.base = (void *)page_list[i];
3422 dev_warn(base->dev,
3424 __func__, base->lcla_pool.pages);
3425 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
3426 base->num_phy_chans +
3429 if (!base->lcla_pool.base_unaligned) {
3434 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
3438 pool->dma_addr = dma_map_single(base->dev, pool->base,
3439 SZ_1K * base->num_phy_chans,
3441 if (dma_mapping_error(base->dev, pool->dma_addr)) {
3447 writel(virt_to_phys(base->lcla_pool.base),
3448 base->virtbase + D40_DREG_LCLA);
3511 struct d40_base *base;
3528 base = d40_hw_detect_init(pdev);
3529 if (!base)
3532 num_reserved_chans = d40_phy_res_init(base);
3534 platform_set_drvdata(pdev, base);
3536 spin_lock_init(&base->interrupt_lock);
3537 spin_lock_init(&base->execmd_lock);
3546 base->lcpa_size = resource_size(res);
3547 base->phy_lcpa = res->start;
3557 val = readl(base->virtbase + D40_DREG_LCPA);
3563 writel(res->start, base->virtbase + D40_DREG_LCPA);
3565 base->lcpa_base = ioremap(res->start, resource_size(res));
3566 if (!base->lcpa_base) {
3572 if (base->plat_data->use_esram_lcla) {
3581 base->lcla_pool.base = ioremap(res->start,
3583 if (!base->lcla_pool.base) {
3588 writel(res->start, base->virtbase + D40_DREG_LCLA);
3591 ret = d40_lcla_allocate(base);
3598 spin_lock_init(&base->lcla_pool.lock);
3600 base->irq = platform_get_irq(pdev, 0);
3601 if (base->irq < 0) {
3602 ret = base->irq;
3606 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
3612 if (base->plat_data->use_esram_lcla) {
3614 base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
3615 if (IS_ERR(base->lcpa_regulator)) {
3617 ret = PTR_ERR(base->lcpa_regulator);
3618 base->lcpa_regulator = NULL;
3622 ret = regulator_enable(base->lcpa_regulator);
3626 regulator_put(base->lcpa_regulator);
3627 base->lcpa_regulator = NULL;
3632 writel_relaxed(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3634 pm_runtime_irq_safe(base->dev);
3635 pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
3636 pm_runtime_use_autosuspend(base->dev);
3637 pm_runtime_mark_last_busy(base->dev);
3638 pm_runtime_set_active(base->dev);
3639 pm_runtime_enable(base->dev);
3641 ret = d40_dmaengine_init(base, num_reserved_chans);
3645 ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
3651 d40_hw_init(base);
3660 dev_info(base->dev, "initialized\n");
3663 kmem_cache_destroy(base->desc_slab);
3664 if (base->virtbase)
3665 iounmap(base->virtbase);
3667 if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
3668 iounmap(base->lcla_pool.base);
3669 base->lcla_pool.base = NULL;
3672 if (base->lcla_pool.dma_addr)
3673 dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
3674 SZ_1K * base->num_phy_chans,
3677 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
3678 free_pages((unsigned long)base->lcla_pool.base,
3679 base->lcla_pool.pages);
3681 kfree(base->lcla_pool.base_unaligned);
3683 if (base->lcpa_base)
3684 iounmap(base->lcpa_base);
3686 if (base->phy_lcpa)
3687 release_mem_region(base->phy_lcpa,
3688 base->lcpa_size);
3689 if (base->phy_start)
3690 release_mem_region(base->phy_start,
3691 base->phy_size);
3692 if (base->clk) {
3693 clk_disable_unprepare(base->clk);
3694 clk_put(base->clk);
3697 if (base->lcpa_regulator) {
3698 regulator_disable(base->lcpa_regulator);
3699 regulator_put(base->lcpa_regulator);
3701 pm_runtime_disable(base->dev);
3703 kfree(base->lcla_pool.alloc_map);
3704 kfree(base->lookup_log_chans);
3705 kfree(base->lookup_phy_chans);
3706 kfree(base->phy_res);
3707 kfree(base);