Lines Matching defs:dmabuf

45  * dmabuf.
47 int get_each_dmabuf(int (*callback)(const struct dma_buf *dmabuf, void *private), void *private)
69 struct dma_buf *dmabuf;
73 dmabuf = dentry->d_fsdata;
74 spin_lock(&dmabuf->name_lock);
75 if (dmabuf->name) {
76 ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
78 spin_unlock(&dmabuf->name_lock);
85 struct dma_buf *dmabuf;
87 dmabuf = dentry->d_fsdata;
88 if (unlikely(!dmabuf)) {
92 BUG_ON(dmabuf->vmapping_counter);
102 BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
104 dmabuf->ops->release(dmabuf);
106 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) {
107 dma_resv_fini(dmabuf->resv);
110 WARN_ON(!list_empty(&dmabuf->attachments));
111 dma_buf_stats_teardown(dmabuf);
112 module_put(dmabuf->owner);
113 kfree(dmabuf->name);
114 kfree(dmabuf);
119 struct dma_buf *dmabuf;
125 dmabuf = file->private_data;
128 list_del(&dmabuf->list_node);
154 .name = "dmabuf",
162 struct dma_buf *dmabuf = vma->vm_file->private_data;
164 dmabuf->mmap_count++;
166 if (dmabuf->exp_vm_ops->open) {
167 dmabuf->exp_vm_ops->open(vma);
173 struct dma_buf *dmabuf = vma->vm_file->private_data;
175 if (dmabuf->mmap_count) {
176 dmabuf->mmap_count--;
179 if (dmabuf->exp_vm_ops->close) {
180 dmabuf->exp_vm_ops->close(vma);
184 static int dma_buf_do_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
187 int ret = dmabuf->ops->mmap(dmabuf, vma);
193 dmabuf->exp_vm_ops = vma->vm_ops;
194 dmabuf->vm_ops = *(dmabuf->exp_vm_ops);
196 dmabuf->vm_ops.open = dma_buf_vma_open;
197 dmabuf->vm_ops.close = dma_buf_vma_close;
198 vma->vm_ops = &dmabuf->vm_ops;
199 dmabuf->mmap_count++;
204 static int dma_buf_do_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
206 return dmabuf->ops->mmap(dmabuf, vma);
212 struct dma_buf *dmabuf;
218 dmabuf = file->private_data;
221 if (!dmabuf->ops->mmap) {
226 if ((vma->vm_pgoff + vma_pages(vma)) > (dmabuf->size >> PAGE_SHIFT)) {
230 return dma_buf_do_mmap(dmabuf, vma);
235 struct dma_buf *dmabuf;
242 dmabuf = file->private_data;
248 base = dmabuf->size;
297 struct dma_buf *dmabuf;
304 dmabuf = file->private_data;
305 if (!dmabuf || !dmabuf->resv) {
309 resv = dmabuf->resv;
311 poll_wait(file, &dmabuf->poll, poll);
336 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
343 spin_lock_irq(&dmabuf->poll.lock);
350 spin_unlock_irq(&dmabuf->poll.lock);
372 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
376 spin_lock_irq(&dmabuf->poll.lock);
382 spin_unlock_irq(&dmabuf->poll.lock);
428 * @dmabuf: [in] dmabuf buffer that will be renamed.
436 static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
445 dma_resv_lock(dmabuf->resv, NULL);
446 if (!list_empty(&dmabuf->attachments)) {
451 spin_lock(&dmabuf->name_lock);
452 kfree(dmabuf->name);
453 dmabuf->name = name;
454 spin_unlock(&dmabuf->name_lock);
457 dma_resv_unlock(dmabuf->resv);
463 struct dma_buf *dmabuf;
468 dmabuf = file->private_data;
495 ret = dma_buf_end_cpu_access(dmabuf, direction);
497 ret = dma_buf_begin_cpu_access(dmabuf, direction);
504 return dma_buf_set_name(dmabuf, (const char __user *)arg);
513 struct dma_buf *dmabuf = file->private_data;
515 seq_printf(m, "size:\t%zu\n", dmabuf->size);
517 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
518 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
519 spin_lock(&dmabuf->name_lock);
520 if (dmabuf->name) {
521 seq_printf(m, "name:\t%s\n", dmabuf->name);
523 spin_unlock(&dmabuf->name_lock);
545 static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
554 inode->i_size = dmabuf->size;
555 inode_set_bytes(inode, dmabuf->size);
557 file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf", flags, &dma_buf_fops);
562 file->private_data = dmabuf;
563 file->f_path.dentry->d_fsdata = dmabuf;
622 struct dma_buf *dmabuf;
652 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
653 if (!dmabuf) {
658 dmabuf->priv = exp_info->priv;
659 dmabuf->ops = exp_info->ops;
660 dmabuf->size = exp_info->size;
661 dmabuf->exp_name = exp_info->exp_name;
662 dmabuf->owner = exp_info->owner;
663 spin_lock_init(&dmabuf->name_lock);
664 init_waitqueue_head(&dmabuf->poll);
665 dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
666 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
669 resv = (struct dma_resv *)&dmabuf[1];
672 dmabuf->resv = resv;
674 file = dma_buf_getfile(dmabuf, exp_info->flags);
681 dmabuf->file = file;
683 ret = dma_buf_stats_setup(dmabuf);
688 mutex_init(&dmabuf->lock);
689 INIT_LIST_HEAD(&dmabuf->attachments);
692 list_add(&dmabuf->list_node, &db_list.head);
695 init_dma_buf_task_info(dmabuf);
696 return dmabuf;
707 kfree(dmabuf);
716 * @dmabuf: [in] pointer to dma_buf for which fd is required.
721 int dma_buf_fd(struct dma_buf *dmabuf, int flags)
725 if (!dmabuf || !dmabuf->file) {
734 fd_install(fd, dmabuf->file);
768 * @dmabuf: [in] buffer to reduce refcount of
774 * in turn, and frees the memory allocated for dmabuf when exported.
776 void dma_buf_put(struct dma_buf *dmabuf)
778 if (WARN_ON(!dmabuf || !dmabuf->file)) {
782 fput(dmabuf->file);
796 struct dma_buf *dmabuf = attach->dmabuf;
799 dma_resv_assert_held(dmabuf->resv);
801 if (dmabuf->ops->pin) {
802 ret = dmabuf->ops->pin(attach);
812 * @dmabuf: [in] buffer to attach device to.
825 * Note that this can fail if the backing storage of @dmabuf is in a place not
829 struct dma_buf_attachment *dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
835 if (WARN_ON(!dmabuf || !dev)) {
849 attach->dmabuf = dmabuf;
856 if (dmabuf->ops->attach) {
857 ret = dmabuf->ops->attach(dmabuf, attach);
862 dma_resv_lock(dmabuf->resv, NULL);
863 list_add(&attach->node, &dmabuf->attachments);
864 dma_resv_unlock(dmabuf->resv);
870 if (dma_buf_attachment_is_dynamic(attach) != dma_buf_is_dynamic(dmabuf)) {
873 if (dma_buf_is_dynamic(attach->dmabuf)) {
874 dma_resv_lock(attach->dmabuf->resv, NULL);
881 sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
889 if (dma_buf_is_dynamic(attach->dmabuf)) {
890 dma_resv_unlock(attach->dmabuf->resv);
903 if (dma_buf_is_dynamic(attach->dmabuf)) {
908 if (dma_buf_is_dynamic(attach->dmabuf)) {
909 dma_resv_unlock(attach->dmabuf->resv);
912 dma_buf_detach(dmabuf, attach);
919 * @dmabuf: [in] buffer to attach device to.
925 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, struct device *dev)
927 return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
932 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
934 * @dmabuf: [in] buffer to detach from.
939 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
941 if (WARN_ON(!dmabuf || !attach)) {
946 if (dma_buf_is_dynamic(attach->dmabuf)) {
947 dma_resv_lock(attach->dmabuf->resv, NULL);
950 dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
952 if (dma_buf_is_dynamic(attach->dmabuf)) {
954 dma_resv_unlock(attach->dmabuf->resv);
958 dma_resv_lock(dmabuf->resv, NULL);
960 dma_resv_unlock(dmabuf->resv);
961 if (dmabuf->ops->detach) {
962 dmabuf->ops->detach(dmabuf, attach);
976 struct dma_buf *dmabuf = attach->dmabuf;
978 dma_resv_assert_held(dmabuf->resv);
980 if (dmabuf->ops->unpin) {
981 dmabuf->ops->unpin(attach);
1008 if (WARN_ON(!attach || !attach->dmabuf)) {
1013 dma_resv_assert_held(attach->dmabuf->resv);
1028 if (dma_buf_is_dynamic(attach->dmabuf)) {
1029 dma_resv_assert_held(attach->dmabuf->resv);
1038 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
1043 if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) && !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
1047 if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
1071 if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) {
1076 dma_resv_assert_held(attach->dmabuf->resv);
1083 if (dma_buf_is_dynamic(attach->dmabuf)) {
1084 dma_resv_assert_held(attach->dmabuf->resv);
1087 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
1089 if (dma_buf_is_dynamic(attach->dmabuf) && !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
1098 * @dmabuf: [in] buffer which is moving
1103 void dma_buf_move_notify(struct dma_buf *dmabuf)
1107 dma_resv_assert_held(dmabuf->resv);
1109 list_for_each_entry(attach, &dmabuf->attachments, node) if (attach->importer_ops)
1130 * void \*dma_buf_vmap(struct dma_buf \*dmabuf)
1131 * void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
1198 static int _dma_buf_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction)
1201 struct dma_resv *resv = dmabuf->resv;
1218 * @dmabuf: [in] buffer to prepare cpu access for.
1227 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction)
1231 if (WARN_ON(!dmabuf)) {
1235 if (dmabuf->ops->begin_cpu_access) {
1236 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1244 ret = _dma_buf_begin_cpu_access(dmabuf, direction);
1251 int dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf, enum dma_data_direction direction, unsigned int offset,
1256 if (WARN_ON(!dmabuf)) {
1260 if (dmabuf->ops->begin_cpu_access_partial) {
1261 ret = dmabuf->ops->begin_cpu_access_partial(dmabuf, direction, offset, len);
1269 ret = _dma_buf_begin_cpu_access(dmabuf, direction);
1281 * @dmabuf: [in] buffer to complete cpu access for.
1288 int dma_buf_end_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction)
1292 WARN_ON(!dmabuf);
1294 if (dmabuf->ops->end_cpu_access) {
1295 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1302 int dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf, enum dma_data_direction direction, unsigned int offset,
1307 WARN_ON(!dmabuf);
1309 if (dmabuf->ops->end_cpu_access_partial) {
1310 ret = dmabuf->ops->end_cpu_access_partial(dmabuf, direction, offset, len);
1319 * @dmabuf: [in] buffer that should back the vma
1331 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, unsigned long pgoff)
1336 if (WARN_ON(!dmabuf || !vma)) {
1341 if (!dmabuf->ops->mmap) {
1351 if ((pgoff + vma_pages(vma)) > (dmabuf->size >> PAGE_SHIFT)) {
1356 get_file(dmabuf->file);
1358 vma->vm_file = dmabuf->file;
1361 ret = dmabuf->ops->mmap(dmabuf, vma);
1365 fput(dmabuf->file);
1378 * @dmabuf: [in] buffer to vmap
1387 void *dma_buf_vmap(struct dma_buf *dmabuf)
1391 if (WARN_ON(!dmabuf)) {
1395 if (!dmabuf->ops->vmap) {
1399 mutex_lock(&dmabuf->lock);
1400 if (dmabuf->vmapping_counter) {
1401 dmabuf->vmapping_counter++;
1402 BUG_ON(!dmabuf->vmap_ptr);
1403 ptr = dmabuf->vmap_ptr;
1407 BUG_ON(dmabuf->vmap_ptr);
1409 ptr = dmabuf->ops->vmap(dmabuf);
1417 dmabuf->vmap_ptr = ptr;
1418 dmabuf->vmapping_counter = 1;
1421 mutex_unlock(&dmabuf->lock);
1428 * @dmabuf: [in] buffer to vunmap
1431 void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
1433 if (WARN_ON(!dmabuf)) {
1437 BUG_ON(!dmabuf->vmap_ptr);
1438 BUG_ON(dmabuf->vmapping_counter == 0);
1439 BUG_ON(dmabuf->vmap_ptr != vaddr);
1441 mutex_lock(&dmabuf->lock);
1442 if (--dmabuf->vmapping_counter == 0) {
1443 if (dmabuf->ops->vunmap) {
1444 dmabuf->ops->vunmap(dmabuf, vaddr);
1446 dmabuf->vmap_ptr = NULL;
1448 mutex_unlock(&dmabuf->lock);
1452 int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags)
1456 if (WARN_ON(!dmabuf) || !flags) {
1460 if (dmabuf->ops->get_flags) {
1461 ret = dmabuf->ops->get_flags(dmabuf, flags);
1468 int dma_buf_get_uuid(struct dma_buf *dmabuf, uuid_t *uuid)
1470 if (WARN_ON(!dmabuf) || !uuid) {
1474 if (!dmabuf->ops->get_uuid) {
1478 return dmabuf->ops->get_uuid(dmabuf, uuid);