Lines Matching refs:od
253 struct omap_dmadev *od = to_omap_dma_dev(vd->tx.chan->device);
258 dma_pool_free(od->desc_pool, d->sg[i].t2_desc,
351 static void omap_dma_glbl_write(struct omap_dmadev *od, unsigned reg, unsigned val)
353 const struct omap_dma_reg *r = od->reg_map + reg;
357 omap_dma_write(val, r->type, od->base + r->offset);
360 static unsigned omap_dma_glbl_read(struct omap_dmadev *od, unsigned reg)
362 const struct omap_dma_reg *r = od->reg_map + reg;
366 return omap_dma_read(r->type, od->base + r->offset);
401 static void omap_dma_clear_lch(struct omap_dmadev *od, int lch)
406 c = od->lch_map[lch];
410 for (i = CSDP; i <= od->cfg->lch_end; i++)
414 static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c,
417 c->channel_base = od->base + od->plat->channel_stride * lch;
419 od->lch_map[lch] = c;
424 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
427 if (__dma_omap15xx(od->plat->dma_attr))
449 } else if (od->ll123_supported) {
487 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
496 if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) {
499 sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
502 omap_dma_glbl_write(od, OCP_SYSCONFIG, val);
511 omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig);
525 if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
633 struct omap_dmadev *od = devid;
636 spin_lock(&od->irq_lock);
638 status = omap_dma_glbl_read(od, IRQSTATUS_L1);
639 status &= od->irq_enable_mask;
641 spin_unlock(&od->irq_lock);
653 c = od->lch_map[channel];
656 dev_err(od->ddev.dev, "invalid channel %u\n", channel);
661 omap_dma_glbl_write(od, IRQSTATUS_L1, mask);
666 spin_unlock(&od->irq_lock);
671 static int omap_dma_get_lch(struct omap_dmadev *od, int *lch)
675 mutex_lock(&od->lch_lock);
676 channel = find_first_zero_bit(od->lch_bitmap, od->lch_count);
677 if (channel >= od->lch_count)
679 set_bit(channel, od->lch_bitmap);
680 mutex_unlock(&od->lch_lock);
682 omap_dma_clear_lch(od, channel);
688 mutex_unlock(&od->lch_lock);
694 static void omap_dma_put_lch(struct omap_dmadev *od, int lch)
696 omap_dma_clear_lch(od, lch);
697 mutex_lock(&od->lch_lock);
698 clear_bit(lch, od->lch_bitmap);
699 mutex_unlock(&od->lch_lock);
702 static inline bool omap_dma_legacy(struct omap_dmadev *od)
704 return IS_ENABLED(CONFIG_ARCH_OMAP1) && od->legacy;
709 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
711 struct device *dev = od->ddev.dev;
714 if (omap_dma_legacy(od)) {
718 ret = omap_dma_get_lch(od, &c->dma_ch);
724 omap_dma_assign(od, c, c->dma_ch);
726 if (!omap_dma_legacy(od)) {
729 spin_lock_irq(&od->irq_lock);
731 omap_dma_glbl_write(od, IRQSTATUS_L1, val);
732 od->irq_enable_mask |= val;
733 omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
735 val = omap_dma_glbl_read(od, IRQENABLE_L0);
737 omap_dma_glbl_write(od, IRQENABLE_L0, val);
738 spin_unlock_irq(&od->irq_lock);
743 if (__dma_omap16xx(od->plat->dma_attr)) {
754 if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
762 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
765 if (!omap_dma_legacy(od)) {
766 spin_lock_irq(&od->irq_lock);
767 od->irq_enable_mask &= ~BIT(c->dma_ch);
768 omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
769 spin_unlock_irq(&od->irq_lock);
773 od->lch_map[c->dma_ch] = NULL;
776 if (omap_dma_legacy(od))
779 omap_dma_put_lch(od, c->dma_ch);
781 dev_dbg(od->ddev.dev, "freeing channel %u used for %u\n", c->dma_ch,
825 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
829 if (val == 0 && od->plat->errata & DMA_ERRATA_3_3)
837 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
840 if (__dma_omap15xx(od->plat->dma_attr)) {
863 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
866 if (__dma_omap15xx(od->plat->dma_attr)) {
963 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1083 if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
1099 d->using_ll = od->ll123_supported;
1109 osg->t2_desc = dma_pool_alloc(od->desc_pool, GFP_ATOMIC,
1131 dma_pool_free(od->desc_pool, osg->t2_desc,
1145 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1232 if (__dma_omap15xx(od->plat->dma_attr))
1429 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1434 spin_lock_irqsave(&od->irq_lock, flags);
1474 spin_unlock_irqrestore(&od->irq_lock, flags);
1482 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1486 spin_lock_irqsave(&od->irq_lock, flags);
1498 spin_unlock_irqrestore(&od->irq_lock, flags);
1503 static int omap_dma_chan_init(struct omap_dmadev *od)
1511 c->reg_map = od->reg_map;
1513 vchan_init(&c->vc, &od->ddev);
1518 static void omap_dma_free(struct omap_dmadev *od)
1520 while (!list_empty(&od->ddev.channels)) {
1521 struct omap_chan *c = list_first_entry(&od->ddev.channels,
1531 static bool omap_dma_busy(struct omap_dmadev *od)
1537 lch = find_next_bit(od->lch_bitmap, od->lch_count, lch + 1);
1538 if (lch >= od->lch_count)
1540 c = od->lch_map[lch];
1554 struct omap_dmadev *od;
1556 od = container_of(nb, struct omap_dmadev, nb);
1560 if (omap_dma_busy(od))
1576 static void omap_dma_context_save(struct omap_dmadev *od)
1578 od->context.irqenable_l0 = omap_dma_glbl_read(od, IRQENABLE_L0);
1579 od->context.irqenable_l1 = omap_dma_glbl_read(od, IRQENABLE_L1);
1580 od->context.ocp_sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
1581 od->context.gcr = omap_dma_glbl_read(od, GCR);
1584 static void omap_dma_context_restore(struct omap_dmadev *od)
1588 omap_dma_glbl_write(od, GCR, od->context.gcr);
1589 omap_dma_glbl_write(od, OCP_SYSCONFIG, od->context.ocp_sysconfig);
1590 omap_dma_glbl_write(od, IRQENABLE_L0, od->context.irqenable_l0);
1591 omap_dma_glbl_write(od, IRQENABLE_L1, od->context.irqenable_l1);
1594 if (od->plat->errata & DMA_ROMCODE_BUG)
1595 omap_dma_glbl_write(od, IRQSTATUS_L0, 0);
1598 for (i = 0; i < od->lch_count; i++)
1599 omap_dma_clear_lch(od, i);
1606 struct omap_dmadev *od;
1608 od = container_of(nb, struct omap_dmadev, nb);
1612 if (omap_dma_busy(od))
1614 omap_dma_context_save(od);
1619 omap_dma_context_restore(od);
1626 static void omap_dma_init_gcr(struct omap_dmadev *od, int arb_rate,
1632 if (!od->cfg->rw_priority)
1644 omap_dma_glbl_write(od, GCR, val);
1660 struct omap_dmadev *od;
1664 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
1665 if (!od)
1668 od->base = devm_platform_ioremap_resource(pdev, 0);
1669 if (IS_ERR(od->base))
1670 return PTR_ERR(od->base);
1674 od->cfg = conf;
1675 od->plat = dev_get_platdata(&pdev->dev);
1676 if (!od->plat) {
1681 od->cfg = &default_cfg;
1683 od->plat = omap_get_plat_info();
1684 if (!od->plat)
1690 od->reg_map = od->plat->reg_map;
1692 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
1693 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
1694 dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
1695 dma_cap_set(DMA_INTERLEAVE, od->ddev.cap_mask);
1696 od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
1697 od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
1698 od->ddev.device_tx_status = omap_dma_tx_status;
1699 od->ddev.device_issue_pending = omap_dma_issue_pending;
1700 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
1701 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
1702 od->ddev.device_prep_dma_memcpy = omap_dma_prep_dma_memcpy;
1703 od->ddev.device_prep_interleaved_dma = omap_dma_prep_dma_interleaved;
1704 od->ddev.device_config = omap_dma_slave_config;
1705 od->ddev.device_pause = omap_dma_pause;
1706 od->ddev.device_resume = omap_dma_resume;
1707 od->ddev.device_terminate_all = omap_dma_terminate_all;
1708 od->ddev.device_synchronize = omap_dma_synchronize;
1709 od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
1710 od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
1711 od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1712 if (__dma_omap15xx(od->plat->dma_attr))
1713 od->ddev.residue_granularity =
1716 od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1717 od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */
1718 od->ddev.dev = &pdev->dev;
1719 INIT_LIST_HEAD(&od->ddev.channels);
1720 mutex_init(&od->lch_lock);
1721 spin_lock_init(&od->lock);
1722 spin_lock_init(&od->irq_lock);
1725 od->dma_requests = OMAP_SDMA_REQUESTS;
1728 &od->dma_requests)) {
1736 od->lch_count = od->plat->dma_attr->lch_count;
1737 if (unlikely(!od->lch_count))
1738 od->lch_count = OMAP_SDMA_CHANNELS;
1740 &od->lch_count)) {
1744 od->lch_count = OMAP_SDMA_CHANNELS;
1753 bitmap_from_arr32(od->lch_bitmap, &val, od->lch_count);
1755 if (od->plat->dma_attr->dev_caps & HS_CHANNELS_RESERVED)
1756 bitmap_set(od->lch_bitmap, 0, 2);
1758 od->lch_map = devm_kcalloc(&pdev->dev, od->lch_count,
1759 sizeof(*od->lch_map),
1761 if (!od->lch_map)
1764 for (i = 0; i < od->dma_requests; i++) {
1765 rc = omap_dma_chan_init(od);
1767 omap_dma_free(od);
1775 od->legacy = true;
1778 od->irq_enable_mask = 0;
1779 omap_dma_glbl_write(od, IRQENABLE_L1, 0);
1782 IRQF_SHARED, "omap-dma-engine", od);
1784 omap_dma_free(od);
1789 if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
1790 od->ll123_supported = true;
1792 od->ddev.filter.map = od->plat->slave_map;
1793 od->ddev.filter.mapcnt = od->plat->slavecnt;
1794 od->ddev.filter.fn = omap_dma_filter_fn;
1796 if (od->ll123_supported) {
1797 od->desc_pool = dma_pool_create(dev_name(&pdev->dev),
1801 if (!od->desc_pool) {
1804 od->ll123_supported = false;
1808 rc = dma_async_device_register(&od->ddev);
1812 omap_dma_free(od);
1816 platform_set_drvdata(pdev, od);
1819 omap_dma_info.dma_cap = od->ddev.cap_mask;
1826 dma_async_device_unregister(&od->ddev);
1827 omap_dma_free(od);
1831 omap_dma_init_gcr(od, DMA_DEFAULT_ARB_RATE, DMA_DEFAULT_FIFO_DEPTH, 0);
1833 if (od->cfg->needs_busy_check) {
1834 od->nb.notifier_call = omap_dma_busy_notifier;
1835 cpu_pm_register_notifier(&od->nb);
1836 } else if (od->cfg->may_lose_context) {
1837 od->nb.notifier_call = omap_dma_context_notifier;
1838 cpu_pm_register_notifier(&od->nb);
1842 od->ll123_supported ? " (LinkedList1/2/3 supported)" : "");
1849 struct omap_dmadev *od = platform_get_drvdata(pdev);
1852 if (od->cfg->may_lose_context)
1853 cpu_pm_unregister_notifier(&od->nb);
1859 devm_free_irq(&pdev->dev, irq, od);
1861 dma_async_device_unregister(&od->ddev);
1863 if (!omap_dma_legacy(od)) {
1865 omap_dma_glbl_write(od, IRQENABLE_L0, 0);
1868 if (od->ll123_supported)
1869 dma_pool_destroy(od->desc_pool);
1871 omap_dma_free(od);
1931 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1935 if (req <= od->dma_requests) {