Lines Matching refs:sgt
52 struct sg_table *sgt;
71 struct sg_table *sgt;
204 struct sg_table *sgt;
207 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
208 if (!sgt) {
213 ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
219 return sgt;
222 kfree(sgt);
247 struct sg_table *sgt = gntdev_dmabuf_attach->sgt;
249 if (sgt) {
251 dma_unmap_sgtable(attach->dev, sgt,
254 sg_free_table(sgt);
257 kfree(sgt);
269 struct sg_table *sgt;
279 return gntdev_dmabuf_attach->sgt;
288 sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
290 if (!IS_ERR(sgt)) {
291 if (dma_map_sgtable(attach->dev, sgt, dir,
293 sg_free_table(sgt);
294 kfree(sgt);
295 sgt = ERR_PTR(-ENOMEM);
297 gntdev_dmabuf_attach->sgt = sgt;
301 if (IS_ERR(sgt))
303 return sgt;
307 struct sg_table *sgt,
578 struct sg_table *sgt;
604 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
605 if (IS_ERR(sgt)) {
606 ret = ERR_CAST(sgt);
611 if (sgt->sgl->offset) {
614 sgt->sgl->offset);
626 gntdev_dmabuf->u.imp.sgt = sgt;
635 * Now convert sgt to array of gfns without accessing underlying pages.
642 for_each_sgtable_dma_page(sgt, &sg_iter, 0) {
667 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
716 if (gntdev_dmabuf->u.imp.sgt)
717 dma_buf_unmap_attachment(attach, gntdev_dmabuf->u.imp.sgt,