Lines Matching refs:buf

58 static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
62 unsigned long size = buf->size;
83 __free_page(buf->pages[last_page]);
91 buf->pages[last_page++] = &pages[i];
103 struct vb2_dma_sg_buf *buf;
111 buf = kzalloc(sizeof *buf, GFP_KERNEL);
112 if (!buf)
115 buf->vaddr = NULL;
116 buf->dma_dir = dma_dir;
117 buf->offset = 0;
118 buf->size = size;
120 buf->num_pages = size >> PAGE_SHIFT;
121 buf->dma_sgt = &buf->sg_table;
128 buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *),
130 if (!buf->pages)
133 ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
137 ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
138 buf->num_pages, 0, size, GFP_KERNEL);
143 buf->dev = get_device(dev);
145 sgt = &buf->sg_table;
150 if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
154 buf->handler.refcount = &buf->refcount;
155 buf->handler.put = vb2_dma_sg_put;
156 buf->handler.arg = buf;
158 refcount_set(&buf->refcount, 1);
161 __func__, buf->num_pages);
162 return buf;
165 put_device(buf->dev);
166 sg_free_table(buf->dma_sgt);
168 num_pages = buf->num_pages;
170 __free_page(buf->pages[num_pages]);
172 kvfree(buf->pages);
174 kfree(buf);
180 struct vb2_dma_sg_buf *buf = buf_priv;
181 struct sg_table *sgt = &buf->sg_table;
182 int i = buf->num_pages;
184 if (refcount_dec_and_test(&buf->refcount)) {
186 buf->num_pages);
187 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
189 if (buf->vaddr)
190 vm_unmap_ram(buf->vaddr, buf->num_pages);
191 sg_free_table(buf->dma_sgt);
193 __free_page(buf->pages[i]);
194 kvfree(buf->pages);
195 put_device(buf->dev);
196 kfree(buf);
202 struct vb2_dma_sg_buf *buf = buf_priv;
203 struct sg_table *sgt = buf->dma_sgt;
205 dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
210 struct vb2_dma_sg_buf *buf = buf_priv;
211 struct sg_table *sgt = buf->dma_sgt;
213 dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
220 struct vb2_dma_sg_buf *buf;
227 buf = kzalloc(sizeof *buf, GFP_KERNEL);
228 if (!buf)
231 buf->vaddr = NULL;
232 buf->dev = dev;
233 buf->dma_dir = dma_dir;
234 buf->offset = vaddr & ~PAGE_MASK;
235 buf->size = size;
236 buf->dma_sgt = &buf->sg_table;
240 buf->vec = vec;
242 buf->pages = frame_vector_pages(vec);
243 if (IS_ERR(buf->pages))
245 buf->num_pages = frame_vector_count(vec);
247 if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
248 buf->num_pages, buf->offset, size, 0))
251 sgt = &buf->sg_table;
256 if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
260 return buf;
263 sg_free_table(&buf->sg_table);
267 kfree(buf);
277 struct vb2_dma_sg_buf *buf = buf_priv;
278 struct sg_table *sgt = &buf->sg_table;
279 int i = buf->num_pages;
282 __func__, buf->num_pages);
283 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
284 if (buf->vaddr)
285 vm_unmap_ram(buf->vaddr, buf->num_pages);
286 sg_free_table(buf->dma_sgt);
287 if (buf->dma_dir == DMA_FROM_DEVICE ||
288 buf->dma_dir == DMA_BIDIRECTIONAL)
290 set_page_dirty_lock(buf->pages[i]);
291 vb2_destroy_framevec(buf->vec);
292 kfree(buf);
297 struct vb2_dma_sg_buf *buf = buf_priv;
299 BUG_ON(!buf);
301 if (!buf->vaddr) {
302 if (buf->db_attach)
303 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
305 buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1);
309 return buf->vaddr ? buf->vaddr + buf->offset : NULL;
314 struct vb2_dma_sg_buf *buf = buf_priv;
316 return refcount_read(&buf->refcount);
321 struct vb2_dma_sg_buf *buf = buf_priv;
324 if (!buf) {
329 err = vm_map_pages(vma, buf->pages, buf->num_pages);
338 vma->vm_private_data = &buf->handler;
362 struct vb2_dma_sg_buf *buf = dbuf->priv;
370 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
373 ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL);
379 rd = buf->dma_sgt->sgl;
465 struct vb2_dma_sg_buf *buf = dbuf->priv;
466 struct sg_table *sgt = buf->dma_sgt;
468 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
476 struct vb2_dma_sg_buf *buf = dbuf->priv;
477 struct sg_table *sgt = buf->dma_sgt;
479 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
485 struct vb2_dma_sg_buf *buf = dbuf->priv;
487 return vb2_dma_sg_vaddr(buf);
510 struct vb2_dma_sg_buf *buf = buf_priv;
515 exp_info.size = buf->size;
517 exp_info.priv = buf;
519 if (WARN_ON(!buf->dma_sgt))
527 refcount_inc(&buf->refcount);
538 struct vb2_dma_sg_buf *buf = mem_priv;
541 if (WARN_ON(!buf->db_attach)) {
546 if (WARN_ON(buf->dma_sgt)) {
552 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
558 buf->dma_sgt = sgt;
559 buf->vaddr = NULL;
566 struct vb2_dma_sg_buf *buf = mem_priv;
567 struct sg_table *sgt = buf->dma_sgt;
569 if (WARN_ON(!buf->db_attach)) {
579 if (buf->vaddr) {
580 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
581 buf->vaddr = NULL;
583 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
585 buf->dma_sgt = NULL;
590 struct vb2_dma_sg_buf *buf = mem_priv;
593 if (WARN_ON(buf->dma_sgt))
594 vb2_dma_sg_unmap_dmabuf(buf);
597 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
598 kfree(buf);
604 struct vb2_dma_sg_buf *buf;
613 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
614 if (!buf)
617 buf->dev = dev;
619 dba = dma_buf_attach(dbuf, buf->dev);
622 kfree(buf);
626 buf->dma_dir = dma_dir;
627 buf->size = size;
628 buf->db_attach = dba;
630 return buf;
635 struct vb2_dma_sg_buf *buf = buf_priv;
637 return buf->dma_sgt;