Lines Matching refs:dma

25 int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info *dma_page, int map_offset)
40 if (PageHighMem(dma->map[map_offset])) {
43 if (dma->bouncemap[map_offset] == NULL)
44 dma->bouncemap[map_offset] = alloc_page(GFP_KERNEL);
45 if (dma->bouncemap[map_offset] == NULL)
48 src = kmap_atomic(dma->map[map_offset]) + offset;
49 memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len);
52 sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset], len, offset);
55 sg_set_page(&dma->SGlist[map_offset], dma->map[map_offset], len, offset);
63 void ivtv_udma_fill_sg_array (struct ivtv_user_dma *dma, u32 buffer_offset, u32 buffer_offset_2, u32 split) {
67 for_each_sg(dma->SGlist, sg, dma->SG_length, i) {
68 dma->SGarray[i].size = cpu_to_le32(sg_dma_len(sg));
69 dma->SGarray[i].src = cpu_to_le32(sg_dma_address(sg));
70 dma->SGarray[i].dst = cpu_to_le32(buffer_offset);
94 struct ivtv_user_dma *dma = &itv->udma;
100 if (dma->SG_length || dma->page_count) {
102 dma->SG_length, dma->page_count);
116 dma->map, FOLL_FORCE);
122 unpin_user_pages(dma->map, err);
128 dma->page_count = user_dma.page_count;
131 if (ivtv_udma_fill_sg_list(dma, &user_dma, 0) < 0) {
132 unpin_user_pages(dma->map, dma->page_count);
133 dma->page_count = 0;
138 dma->SG_length = pci_map_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
141 ivtv_udma_fill_sg_array (dma, ivtv_dest_addr, 0, -1);
144 dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000);
147 return dma->page_count;
152 struct ivtv_user_dma *dma = &itv->udma;
157 if (dma->page_count == 0)
161 if (dma->SG_length) {
162 pci_unmap_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
163 dma->SG_length = 0;
168 unpin_user_pages(dma->map, dma->page_count);
169 dma->page_count = 0;