Lines Matching defs:idma
54 static void iomd_get_next_sg(struct iomd_dma *idma)
58 if (idma->dma.sg) {
59 idma->cur_addr = idma->dma_addr;
60 offset = idma->cur_addr & ~PAGE_MASK;
62 end = offset + idma->dma_len;
70 idma->cur_len = end - TRANSFER_SIZE;
72 idma->dma_len -= end - offset;
73 idma->dma_addr += end - offset;
75 if (idma->dma_len == 0) {
76 if (idma->dma.sgcount > 1) {
77 idma->dma.sg = sg_next(idma->dma.sg);
78 idma->dma_addr = idma->dma.sg->dma_address;
79 idma->dma_len = idma->dma.sg->length;
80 idma->dma.sgcount--;
82 idma->dma.sg = NULL;
88 idma->cur_addr = 0;
89 idma->cur_len = 0;
92 idma->cur_len |= flags;
97 struct iomd_dma *idma = dev_id;
98 void __iomem *base = idma->base;
99 unsigned int state = idma->state;
108 iomd_get_next_sg(idma);
119 writel(idma->cur_addr, base + cur);
120 writel(idma->cur_len, base + end);
123 idma->cur_len == (DMA_END_S|DMA_END_L))
130 idma->state = state;
136 struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
138 return request_irq(idma->irq, iomd_dma_handle,
139 0, idma->dma.device_id, idma);
144 struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
146 free_irq(idma->irq, idma);
157 struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
158 void __iomem *base = idma->base;
161 if (idma->dma.invalid) {
162 idma->dma.invalid = 0;
168 if (!idma->dma.sg) {
169 idma->dma.sg = &idma->dma.buf;
170 idma->dma.sgcount = 1;
171 idma->dma.buf.length = idma->dma.count;
172 idma->dma.buf.dma_address = dma_map_single(&isa_dma_dev,
173 idma->dma.addr, idma->dma.count,
174 idma->dma.dma_mode == DMA_MODE_READ ?
178 idma->dma_addr = idma->dma.sg->dma_address;
179 idma->dma_len = idma->dma.sg->length;
182 idma->state = DMA_ST_AB;
185 if (idma->dma.dma_mode == DMA_MODE_READ)
189 enable_irq(idma->irq);
194 struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
195 void __iomem *base = idma->base;
199 if (idma->state != ~DMA_ST_AB)
200 disable_irq(idma->irq);