Lines Matching refs:dmabuf
46 struct dma_buf *dmabuf;
50 dmabuf = dentry->d_fsdata;
51 spin_lock(&dmabuf->name_lock);
52 if (dmabuf->name)
53 ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
54 spin_unlock(&dmabuf->name_lock);
62 struct dma_buf *dmabuf;
64 dmabuf = dentry->d_fsdata;
65 if (unlikely(!dmabuf))
68 BUG_ON(dmabuf->vmapping_counter);
78 BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
80 dmabuf->ops->release(dmabuf);
82 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
83 dma_resv_fini(dmabuf->resv);
85 WARN_ON(!list_empty(&dmabuf->attachments));
86 dma_buf_stats_teardown(dmabuf);
87 module_put(dmabuf->owner);
88 kfree(dmabuf->name);
89 kfree(dmabuf);
94 struct dma_buf *dmabuf;
99 dmabuf = file->private_data;
102 list_del(&dmabuf->list_node);
127 .name = "dmabuf",
134 struct dma_buf *dmabuf;
139 dmabuf = file->private_data;
142 if (!dmabuf->ops->mmap)
147 dmabuf->size >> PAGE_SHIFT)
150 return dmabuf->ops->mmap(dmabuf, vma);
155 struct dma_buf *dmabuf;
161 dmabuf = file->private_data;
167 base = dmabuf->size;
214 struct dma_buf *dmabuf;
221 dmabuf = file->private_data;
222 if (!dmabuf || !dmabuf->resv)
225 resv = dmabuf->resv;
227 poll_wait(file, &dmabuf->poll, poll);
249 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
255 spin_lock_irq(&dmabuf->poll.lock);
261 spin_unlock_irq(&dmabuf->poll.lock);
284 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
288 spin_lock_irq(&dmabuf->poll.lock);
293 spin_unlock_irq(&dmabuf->poll.lock);
338 * @dmabuf: [in] dmabuf buffer that will be renamed.
346 static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
354 dma_resv_lock(dmabuf->resv, NULL);
355 if (!list_empty(&dmabuf->attachments)) {
360 spin_lock(&dmabuf->name_lock);
361 kfree(dmabuf->name);
362 dmabuf->name = name;
363 spin_unlock(&dmabuf->name_lock);
366 dma_resv_unlock(dmabuf->resv);
373 struct dma_buf *dmabuf;
378 dmabuf = file->private_data;
403 ret = dma_buf_end_cpu_access(dmabuf, direction);
405 ret = dma_buf_begin_cpu_access(dmabuf, direction);
411 return dma_buf_set_name(dmabuf, (const char __user *)arg);
420 struct dma_buf *dmabuf = file->private_data;
422 seq_printf(m, "size:\t%zu\n", dmabuf->size);
424 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
425 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
426 spin_lock(&dmabuf->name_lock);
427 if (dmabuf->name)
428 seq_printf(m, "name:\t%s\n", dmabuf->name);
429 spin_unlock(&dmabuf->name_lock);
450 static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
458 inode->i_size = dmabuf->size;
459 inode_set_bytes(inode, dmabuf->size);
461 file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
466 file->private_data = dmabuf;
467 file->f_path.dentry->d_fsdata = dmabuf;
526 struct dma_buf *dmabuf;
556 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
557 if (!dmabuf) {
562 dmabuf->priv = exp_info->priv;
563 dmabuf->ops = exp_info->ops;
564 dmabuf->size = exp_info->size;
565 dmabuf->exp_name = exp_info->exp_name;
566 dmabuf->owner = exp_info->owner;
567 spin_lock_init(&dmabuf->name_lock);
568 init_waitqueue_head(&dmabuf->poll);
569 dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
570 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
573 resv = (struct dma_resv *)&dmabuf[1];
576 dmabuf->resv = resv;
578 file = dma_buf_getfile(dmabuf, exp_info->flags);
585 dmabuf->file = file;
587 ret = dma_buf_stats_setup(dmabuf);
591 mutex_init(&dmabuf->lock);
592 INIT_LIST_HEAD(&dmabuf->attachments);
595 list_add(&dmabuf->list_node, &db_list.head);
598 init_dma_buf_task_info(dmabuf);
599 return dmabuf;
610 kfree(dmabuf);
619 * @dmabuf: [in] pointer to dma_buf for which fd is required.
624 int dma_buf_fd(struct dma_buf *dmabuf, int flags)
628 if (!dmabuf || !dmabuf->file)
635 fd_install(fd, dmabuf->file);
669 * @dmabuf: [in] buffer to reduce refcount of
675 * in turn, and frees the memory allocated for dmabuf when exported.
677 void dma_buf_put(struct dma_buf *dmabuf)
679 if (WARN_ON(!dmabuf || !dmabuf->file))
682 fput(dmabuf->file);
689 * @dmabuf: [in] buffer to attach device to.
702 * Note that this can fail if the backing storage of @dmabuf is in a place not
707 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
714 if (WARN_ON(!dmabuf || !dev))
725 attach->dmabuf = dmabuf;
731 if (dmabuf->ops->attach) {
732 ret = dmabuf->ops->attach(dmabuf, attach);
736 dma_resv_lock(dmabuf->resv, NULL);
737 list_add(&attach->node, &dmabuf->attachments);
738 dma_resv_unlock(dmabuf->resv);
745 dma_buf_is_dynamic(dmabuf)) {
748 if (dma_buf_is_dynamic(attach->dmabuf)) {
749 dma_resv_lock(attach->dmabuf->resv, NULL);
755 sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
762 if (dma_buf_is_dynamic(attach->dmabuf))
763 dma_resv_unlock(attach->dmabuf->resv);
775 if (dma_buf_is_dynamic(attach->dmabuf))
779 if (dma_buf_is_dynamic(attach->dmabuf))
780 dma_resv_unlock(attach->dmabuf->resv);
782 dma_buf_detach(dmabuf, attach);
789 * @dmabuf: [in] buffer to attach device to.
795 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
798 return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
803 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
805 * @dmabuf: [in] buffer to detach from.
810 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
812 if (WARN_ON(!dmabuf || !attach))
816 if (dma_buf_is_dynamic(attach->dmabuf))
817 dma_resv_lock(attach->dmabuf->resv, NULL);
819 dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
821 if (dma_buf_is_dynamic(attach->dmabuf)) {
823 dma_resv_unlock(attach->dmabuf->resv);
827 dma_resv_lock(dmabuf->resv, NULL);
829 dma_resv_unlock(dmabuf->resv);
830 if (dmabuf->ops->detach)
831 dmabuf->ops->detach(dmabuf, attach);
847 struct dma_buf *dmabuf = attach->dmabuf;
850 dma_resv_assert_held(dmabuf->resv);
852 if (dmabuf->ops->pin)
853 ret = dmabuf->ops->pin(attach);
866 struct dma_buf *dmabuf = attach->dmabuf;
868 dma_resv_assert_held(dmabuf->resv);
870 if (dmabuf->ops->unpin)
871 dmabuf->ops->unpin(attach);
898 if (WARN_ON(!attach || !attach->dmabuf))
902 dma_resv_assert_held(attach->dmabuf->resv);
916 if (dma_buf_is_dynamic(attach->dmabuf)) {
917 dma_resv_assert_held(attach->dmabuf->resv);
925 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
929 if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
933 if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
958 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
962 dma_resv_assert_held(attach->dmabuf->resv);
967 if (dma_buf_is_dynamic(attach->dmabuf))
968 dma_resv_assert_held(attach->dmabuf->resv);
970 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
972 if (dma_buf_is_dynamic(attach->dmabuf) &&
981 * @dmabuf: [in] buffer which is moving
986 void dma_buf_move_notify(struct dma_buf *dmabuf)
990 dma_resv_assert_held(dmabuf->resv);
992 list_for_each_entry(attach, &dmabuf->attachments, node)
1014 * void \*dma_buf_vmap(struct dma_buf \*dmabuf)
1015 * void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
1082 static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1087 struct dma_resv *resv = dmabuf->resv;
1104 * @dmabuf: [in] buffer to prepare cpu access for.
1113 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1118 if (WARN_ON(!dmabuf))
1121 if (dmabuf->ops->begin_cpu_access)
1122 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1129 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1140 * @dmabuf: [in] buffer to complete cpu access for.
1147 int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1152 WARN_ON(!dmabuf);
1154 if (dmabuf->ops->end_cpu_access)
1155 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1164 * @dmabuf: [in] buffer that should back the vma
1176 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1182 if (WARN_ON(!dmabuf || !vma))
1186 if (!dmabuf->ops->mmap)
1195 dmabuf->size >> PAGE_SHIFT)
1199 get_file(dmabuf->file);
1201 vma->vm_file = dmabuf->file;
1204 ret = dmabuf->ops->mmap(dmabuf, vma);
1208 fput(dmabuf->file);
1221 * @dmabuf: [in] buffer to vmap
1230 void *dma_buf_vmap(struct dma_buf *dmabuf)
1234 if (WARN_ON(!dmabuf))
1237 if (!dmabuf->ops->vmap)
1240 mutex_lock(&dmabuf->lock);
1241 if (dmabuf->vmapping_counter) {
1242 dmabuf->vmapping_counter++;
1243 BUG_ON(!dmabuf->vmap_ptr);
1244 ptr = dmabuf->vmap_ptr;
1248 BUG_ON(dmabuf->vmap_ptr);
1250 ptr = dmabuf->ops->vmap(dmabuf);
1256 dmabuf->vmap_ptr = ptr;
1257 dmabuf->vmapping_counter = 1;
1260 mutex_unlock(&dmabuf->lock);
1267 * @dmabuf: [in] buffer to vunmap
1270 void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
1272 if (WARN_ON(!dmabuf))
1275 BUG_ON(!dmabuf->vmap_ptr);
1276 BUG_ON(dmabuf->vmapping_counter == 0);
1277 BUG_ON(dmabuf->vmap_ptr != vaddr);
1279 mutex_lock(&dmabuf->lock);
1280 if (--dmabuf->vmapping_counter == 0) {
1281 if (dmabuf->ops->vunmap)
1282 dmabuf->ops->vunmap(dmabuf, vaddr);
1283 dmabuf->vmap_ptr = NULL;
1285 mutex_unlock(&dmabuf->lock);