Lines Matching defs:sdmac

314  * @sdmac:		sdma_channel pointer
326 struct sdma_channel *sdmac;
623 static int sdma_config_ownership(struct sdma_channel *sdmac,
626 struct sdma_engine *sdma = sdmac->sdma;
627 int channel = sdmac->channel;
722 static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
724 struct sdma_engine *sdma = sdmac->sdma;
725 int channel = sdmac->channel;
734 static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
736 struct sdma_engine *sdma = sdmac->sdma;
737 int channel = sdmac->channel;
751 static void sdma_start_desc(struct sdma_channel *sdmac)
753 struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc);
755 struct sdma_engine *sdma = sdmac->sdma;
756 int channel = sdmac->channel;
759 sdmac->desc = NULL;
762 sdmac->desc = desc = to_sdma_desc(&vd->tx);
768 sdma_enable_channel(sdma, sdmac->channel);
771 static void sdma_update_channel_loop(struct sdma_channel *sdmac)
775 enum dma_status old_status = sdmac->status;
781 while (sdmac->desc) {
782 struct sdma_desc *desc = sdmac->desc;
791 sdmac->status = DMA_ERROR;
812 spin_unlock(&sdmac->vc.lock);
814 spin_lock(&sdmac->vc.lock);
817 sdmac->status = old_status;
823 struct sdma_channel *sdmac = (struct sdma_channel *) data;
827 sdmac->desc->chn_real_count = 0;
832 for (i = 0; i < sdmac->desc->num_bd; i++) {
833 bd = &sdmac->desc->bd[i];
837 sdmac->desc->chn_real_count += bd->mode.count;
841 sdmac->status = DMA_ERROR;
843 sdmac->status = DMA_COMPLETE;
858 struct sdma_channel *sdmac = &sdma->channel[channel];
861 spin_lock(&sdmac->vc.lock);
862 desc = sdmac->desc;
864 if (sdmac->flags & IMX_DMA_SG_LOOP) {
865 sdma_update_channel_loop(sdmac);
867 mxc_sdma_handle_channel_normal(sdmac);
869 sdma_start_desc(sdmac);
873 spin_unlock(&sdmac->vc.lock);
883 static void sdma_get_pc(struct sdma_channel *sdmac,
886 struct sdma_engine *sdma = sdmac->sdma;
894 sdmac->pc_from_device = 0;
895 sdmac->pc_to_device = 0;
896 sdmac->device_to_device = 0;
897 sdmac->pc_to_pc = 0;
971 sdmac->pc_from_device = per_2_emi;
972 sdmac->pc_to_device = emi_2_per;
973 sdmac->device_to_device = per_2_per;
974 sdmac->pc_to_pc = emi_2_emi;
977 static int sdma_load_context(struct sdma_channel *sdmac)
979 struct sdma_engine *sdma = sdmac->sdma;
980 int channel = sdmac->channel;
987 if (sdmac->direction == DMA_DEV_TO_MEM)
988 load_address = sdmac->pc_from_device;
989 else if (sdmac->direction == DMA_DEV_TO_DEV)
990 load_address = sdmac->device_to_device;
991 else if (sdmac->direction == DMA_MEM_TO_MEM)
992 load_address = sdmac->pc_to_pc;
994 load_address = sdmac->pc_to_device;
1000 dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level);
1001 dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
1002 dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
1003 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
1004 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
1014 context->gReg[0] = sdmac->event_mask[1];
1015 context->gReg[1] = sdmac->event_mask[0];
1016 context->gReg[2] = sdmac->per_addr;
1017 context->gReg[6] = sdmac->shp_addr;
1018 context->gReg[7] = sdmac->watermark_level;
1039 struct sdma_channel *sdmac = to_sdma_chan(chan);
1040 struct sdma_engine *sdma = sdmac->sdma;
1041 int channel = sdmac->channel;
1044 sdmac->status = DMA_ERROR;
1050 struct sdma_channel *sdmac = container_of(work, struct sdma_channel,
1063 spin_lock_irqsave(&sdmac->vc.lock, flags);
1064 vchan_get_all_descriptors(&sdmac->vc, &head);
1065 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1066 vchan_dma_desc_free_list(&sdmac->vc, &head);
1071 struct sdma_channel *sdmac = to_sdma_chan(chan);
1074 spin_lock_irqsave(&sdmac->vc.lock, flags);
1078 if (sdmac->desc) {
1079 vchan_terminate_vdesc(&sdmac->desc->vd);
1080 sdmac->desc = NULL;
1081 schedule_work(&sdmac->terminate_worker);
1084 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1091 struct sdma_channel *sdmac = to_sdma_chan(chan);
1093 vchan_synchronize(&sdmac->vc);
1095 flush_work(&sdmac->terminate_worker);
1098 static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
1100 struct sdma_engine *sdma = sdmac->sdma;
1102 int lwml = sdmac->watermark_level & SDMA_WATERMARK_LEVEL_LWML;
1103 int hwml = (sdmac->watermark_level & SDMA_WATERMARK_LEVEL_HWML) >> 16;
1105 set_bit(sdmac->event_id0 % 32, &sdmac->event_mask[1]);
1106 set_bit(sdmac->event_id1 % 32, &sdmac->event_mask[0]);
1108 if (sdmac->event_id0 > 31)
1109 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_LWE;
1111 if (sdmac->event_id1 > 31)
1112 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_HWE;
1120 sdmac->watermark_level &= ~(SDMA_WATERMARK_LEVEL_LWML |
1122 sdmac->watermark_level |= hwml;
1123 sdmac->watermark_level |= lwml << 16;
1124 swap(sdmac->event_mask[0], sdmac->event_mask[1]);
1127 if (sdmac->per_address2 >= sdma->spba_start_addr &&
1128 sdmac->per_address2 <= sdma->spba_end_addr)
1129 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SP;
1131 if (sdmac->per_address >= sdma->spba_start_addr &&
1132 sdmac->per_address <= sdma->spba_end_addr)
1133 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP;
1135 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT;
1140 struct sdma_channel *sdmac = to_sdma_chan(chan);
1144 sdmac->event_mask[0] = 0;
1145 sdmac->event_mask[1] = 0;
1146 sdmac->shp_addr = 0;
1147 sdmac->per_addr = 0;
1149 switch (sdmac->peripheral_type) {
1151 sdma_config_ownership(sdmac, false, true, true);
1154 sdma_config_ownership(sdmac, false, true, false);
1157 sdma_config_ownership(sdmac, true, true, false);
1161 sdma_get_pc(sdmac, sdmac->peripheral_type);
1163 if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
1164 (sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
1166 if (sdmac->event_id1) {
1167 if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
1168 sdmac->peripheral_type == IMX_DMATYPE_ASRC)
1169 sdma_set_watermarklevel_for_p2p(sdmac);
1171 __set_bit(sdmac->event_id0, sdmac->event_mask);
1174 sdmac->shp_addr = sdmac->per_address;
1175 sdmac->per_addr = sdmac->per_address2;
1177 sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
1183 static int sdma_set_channel_priority(struct sdma_channel *sdmac,
1186 struct sdma_engine *sdma = sdmac->sdma;
1187 int channel = sdmac->channel;
1226 desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size,
1240 dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd,
1254 struct sdma_channel *sdmac = to_sdma_chan(chan);
1269 dev_dbg(sdmac->sdma->dev, "MEMCPY in case?\n");
1276 sdma_get_pc(sdmac, IMX_DMATYPE_MEMORY);
1292 sdmac->peripheral_type = data->peripheral_type;
1293 sdmac->event_id0 = data->dma_request;
1294 sdmac->event_id1 = data->dma_request2;
1296 ret = clk_enable(sdmac->sdma->clk_ipg);
1299 ret = clk_enable(sdmac->sdma->clk_ahb);
1303 ret = sdma_set_channel_priority(sdmac, prio);
1310 clk_disable(sdmac->sdma->clk_ahb);
1312 clk_disable(sdmac->sdma->clk_ipg);
1318 struct sdma_channel *sdmac = to_sdma_chan(chan);
1319 struct sdma_engine *sdma = sdmac->sdma;
1325 sdma_event_disable(sdmac, sdmac->event_id0);
1326 if (sdmac->event_id1)
1327 sdma_event_disable(sdmac, sdmac->event_id1);
1329 sdmac->event_id0 = 0;
1330 sdmac->event_id1 = 0;
1332 sdma_set_channel_priority(sdmac, 0);
1338 static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
1347 sdmac->status = DMA_IN_PROGRESS;
1348 sdmac->direction = direction;
1349 sdmac->flags = 0;
1355 desc->sdmac = sdmac;
1363 sdma_config_ownership(sdmac, false, true, false);
1365 if (sdma_load_context(sdmac))
1382 struct sdma_channel *sdmac = to_sdma_chan(chan);
1383 struct sdma_engine *sdma = sdmac->sdma;
1384 int channel = sdmac->channel;
1396 desc = sdma_transfer_init(sdmac, DMA_MEM_TO_MEM,
1431 return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1439 struct sdma_channel *sdmac = to_sdma_chan(chan);
1440 struct sdma_engine *sdma = sdmac->sdma;
1442 int channel = sdmac->channel;
1446 sdma_config_write(chan, &sdmac->slave_config, direction);
1448 desc = sdma_transfer_init(sdmac, direction, sg_len);
1472 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1475 switch (sdmac->word_size) {
1509 return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1514 sdmac->status = DMA_ERROR;
1523 struct sdma_channel *sdmac = to_sdma_chan(chan);
1524 struct sdma_engine *sdma = sdmac->sdma;
1526 int channel = sdmac->channel;
1532 sdma_config_write(chan, &sdmac->slave_config, direction);
1534 desc = sdma_transfer_init(sdmac, direction, num_periods);
1540 sdmac->flags |= IMX_DMA_SG_LOOP;
1556 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1558 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
1561 bd->mode.command = sdmac->word_size;
1580 return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1585 sdmac->status = DMA_ERROR;
1593 struct sdma_channel *sdmac = to_sdma_chan(chan);
1596 sdmac->per_address = dmaengine_cfg->src_addr;
1597 sdmac->watermark_level = dmaengine_cfg->src_maxburst *
1599 sdmac->word_size = dmaengine_cfg->src_addr_width;
1601 sdmac->per_address2 = dmaengine_cfg->src_addr;
1602 sdmac->per_address = dmaengine_cfg->dst_addr;
1603 sdmac->watermark_level = dmaengine_cfg->src_maxburst &
1605 sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) &
1607 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1609 sdmac->per_address = dmaengine_cfg->dst_addr;
1610 sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1612 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1614 sdmac->direction = direction;
1621 struct sdma_channel *sdmac = to_sdma_chan(chan);
1623 memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
1626 if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
1628 sdma_event_enable(sdmac, sdmac->event_id0);
1630 if (sdmac->event_id1) {
1631 if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
1633 sdma_event_enable(sdmac, sdmac->event_id1);
1643 struct sdma_channel *sdmac = to_sdma_chan(chan);
1654 spin_lock_irqsave(&sdmac->vc.lock, flags);
1656 vd = vchan_find_desc(&sdmac->vc, cookie);
1659 else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie)
1660 desc = sdmac->desc;
1663 if (sdmac->flags & IMX_DMA_SG_LOOP)
1672 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1677 return sdmac->status;
1682 struct sdma_channel *sdmac = to_sdma_chan(chan);
1685 spin_lock_irqsave(&sdmac->vc.lock, flags);
1686 if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc)
1687 sdma_start_desc(sdmac);
1688 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1937 struct sdma_channel *sdmac = to_sdma_chan(chan);
1943 sdmac->data = *data;
1944 chan->private = &sdmac->data;
1967 * be set to sdmac->event_id1.
2065 struct sdma_channel *sdmac = &sdma->channel[i];
2067 sdmac->sdma = sdma;
2069 sdmac->channel = i;
2070 sdmac->vc.desc_free = sdma_desc_free;
2071 INIT_WORK(&sdmac->terminate_worker,
2079 vchan_init(&sdmac->vc, &sdma->dma_device);
2190 struct sdma_channel *sdmac = &sdma->channel[i];
2192 tasklet_kill(&sdmac->vc.task);
2193 sdma_free_chan_resources(&sdmac->vc.chan);