Lines Matching refs:dma
25 int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info *dma_page, int map_offset)
40 if (PageHighMem(dma->map[map_offset])) {
43 if (dma->bouncemap[map_offset] == NULL)
44 dma->bouncemap[map_offset] = alloc_page(GFP_KERNEL);
45 if (dma->bouncemap[map_offset] == NULL)
48 src = kmap_atomic(dma->map[map_offset]) + offset;
49 memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len);
52 sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset], len, offset);
55 sg_set_page(&dma->SGlist[map_offset], dma->map[map_offset], len, offset);
63 void ivtv_udma_fill_sg_array (struct ivtv_user_dma *dma, u32 buffer_offset, u32 buffer_offset_2, u32 split) {
67 for_each_sg(dma->SGlist, sg, dma->SG_length, i) {
68 dma->SGarray[i].size = cpu_to_le32(sg_dma_len(sg));
69 dma->SGarray[i].src = cpu_to_le32(sg_dma_address(sg));
70 dma->SGarray[i].dst = cpu_to_le32(buffer_offset);
96 struct ivtv_user_dma *dma = &itv->udma;
102 if (dma->SG_length || dma->page_count) {
104 dma->SG_length, dma->page_count);
118 dma->map, 0);
124 unpin_user_pages(dma->map, err);
130 dma->page_count = user_dma.page_count;
133 if (ivtv_udma_fill_sg_list(dma, &user_dma, 0) < 0) {
134 unpin_user_pages(dma->map, dma->page_count);
135 dma->page_count = 0;
140 dma->SG_length = dma_map_sg(&itv->pdev->dev, dma->SGlist,
141 dma->page_count, DMA_TO_DEVICE);
144 ivtv_udma_fill_sg_array (dma, ivtv_dest_addr, 0, -1);
147 dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000);
150 return dma->page_count;
155 struct ivtv_user_dma *dma = &itv->udma;
160 if (dma->page_count == 0)
164 if (dma->SG_length) {
165 dma_unmap_sg(&itv->pdev->dev, dma->SGlist, dma->page_count,
167 dma->SG_length = 0;
172 unpin_user_pages(dma->map, dma->page_count);
173 dma->page_count = 0;