1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
4 * Author: Rob Clark <rob.clark@linaro.org>
5 */
6
7#include <linux/dma-buf.h>
8#include <linux/highmem.h>
9
10#include <drm/drm_prime.h>
11
12#include "omap_drv.h"
13
14MODULE_IMPORT_NS(DMA_BUF);
15
16/* -----------------------------------------------------------------------------
17 * DMABUF Export
18 */
19
20static struct sg_table *omap_gem_map_dma_buf(
21		struct dma_buf_attachment *attachment,
22		enum dma_data_direction dir)
23{
24	struct drm_gem_object *obj = attachment->dmabuf->priv;
25	struct sg_table *sg;
26	sg = omap_gem_get_sg(obj, dir);
27	if (IS_ERR(sg))
28		return sg;
29
30	return sg;
31}
32
33static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
34		struct sg_table *sg, enum dma_data_direction dir)
35{
36	struct drm_gem_object *obj = attachment->dmabuf->priv;
37	omap_gem_put_sg(obj, sg);
38}
39
40static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
41		enum dma_data_direction dir)
42{
43	struct drm_gem_object *obj = buffer->priv;
44	struct page **pages;
45	if (omap_gem_flags(obj) & OMAP_BO_TILED_MASK) {
46		/* TODO we would need to pin at least part of the buffer to
47		 * get de-tiled view.  For now just reject it.
48		 */
49		return -ENOMEM;
50	}
51	/* make sure we have the pages: */
52	return omap_gem_get_pages(obj, &pages, true);
53}
54
55static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
56					  enum dma_data_direction dir)
57{
58	struct drm_gem_object *obj = buffer->priv;
59	omap_gem_put_pages(obj);
60	return 0;
61}
62
63static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
64		struct vm_area_struct *vma)
65{
66	struct drm_gem_object *obj = buffer->priv;
67
68	return drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
69}
70
71static const struct dma_buf_ops omap_dmabuf_ops = {
72	.map_dma_buf = omap_gem_map_dma_buf,
73	.unmap_dma_buf = omap_gem_unmap_dma_buf,
74	.release = drm_gem_dmabuf_release,
75	.begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
76	.end_cpu_access = omap_gem_dmabuf_end_cpu_access,
77	.mmap = omap_gem_dmabuf_mmap,
78};
79
80struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags)
81{
82	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
83
84	exp_info.ops = &omap_dmabuf_ops;
85	exp_info.size = omap_gem_mmap_size(obj);
86	exp_info.flags = flags;
87	exp_info.priv = obj;
88	exp_info.resv = obj->resv;
89
90	return drm_gem_dmabuf_export(obj->dev, &exp_info);
91}
92
93/* -----------------------------------------------------------------------------
94 * DMABUF Import
95 */
96
97struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
98					     struct dma_buf *dma_buf)
99{
100	struct dma_buf_attachment *attach;
101	struct drm_gem_object *obj;
102	struct sg_table *sgt;
103	int ret;
104
105	if (dma_buf->ops == &omap_dmabuf_ops) {
106		obj = dma_buf->priv;
107		if (obj->dev == dev) {
108			/*
109			 * Importing dmabuf exported from out own gem increases
110			 * refcount on gem itself instead of f_count of dmabuf.
111			 */
112			drm_gem_object_get(obj);
113			return obj;
114		}
115	}
116
117	attach = dma_buf_attach(dma_buf, dev->dev);
118	if (IS_ERR(attach))
119		return ERR_CAST(attach);
120
121	get_dma_buf(dma_buf);
122
123	sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE);
124	if (IS_ERR(sgt)) {
125		ret = PTR_ERR(sgt);
126		goto fail_detach;
127	}
128
129	obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt);
130	if (IS_ERR(obj)) {
131		ret = PTR_ERR(obj);
132		goto fail_unmap;
133	}
134
135	obj->import_attach = attach;
136
137	return obj;
138
139fail_unmap:
140	dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_TO_DEVICE);
141fail_detach:
142	dma_buf_detach(dma_buf, attach);
143	dma_buf_put(dma_buf);
144
145	return ERR_PTR(ret);
146}
147