1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6
7#include <linux/module.h>
8#include <linux/vmalloc.h>
9#include <linux/mm.h>
10#include <linux/errno.h>
11#include <rdma/uverbs_ioctl.h>
12
13#include "rxe.h"
14#include "rxe_loc.h"
15#include "rxe_queue.h"
16
17void rxe_mmap_release(struct kref *ref)
18{
19	struct rxe_mmap_info *ip = container_of(ref,
20					struct rxe_mmap_info, ref);
21	struct rxe_dev *rxe = to_rdev(ip->context->device);
22
23	spin_lock_bh(&rxe->pending_lock);
24
25	if (!list_empty(&ip->pending_mmaps))
26		list_del(&ip->pending_mmaps);
27
28	spin_unlock_bh(&rxe->pending_lock);
29
30	vfree(ip->obj);		/* buf */
31	kfree(ip);
32}
33
34/*
35 * open and close keep track of how many times the memory region is mapped,
36 * to avoid releasing it.
37 */
38static void rxe_vma_open(struct vm_area_struct *vma)
39{
40	struct rxe_mmap_info *ip = vma->vm_private_data;
41
42	kref_get(&ip->ref);
43}
44
45static void rxe_vma_close(struct vm_area_struct *vma)
46{
47	struct rxe_mmap_info *ip = vma->vm_private_data;
48
49	kref_put(&ip->ref, rxe_mmap_release);
50}
51
52static const struct vm_operations_struct rxe_vm_ops = {
53	.open = rxe_vma_open,
54	.close = rxe_vma_close,
55};
56
57/**
58 * rxe_mmap - create a new mmap region
59 * @context: the IB user context of the process making the mmap() call
60 * @vma: the VMA to be initialized
61 * Return zero if the mmap is OK. Otherwise, return an errno.
62 */
63int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
64{
65	struct rxe_dev *rxe = to_rdev(context->device);
66	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
67	unsigned long size = vma->vm_end - vma->vm_start;
68	struct rxe_mmap_info *ip, *pp;
69	int ret;
70
71	/*
72	 * Search the device's list of objects waiting for a mmap call.
73	 * Normally, this list is very short since a call to create a
74	 * CQ, QP, or SRQ is soon followed by a call to mmap().
75	 */
76	spin_lock_bh(&rxe->pending_lock);
77	list_for_each_entry_safe(ip, pp, &rxe->pending_mmaps, pending_mmaps) {
78		if (context != ip->context || (__u64)offset != ip->info.offset)
79			continue;
80
81		/* Don't allow a mmap larger than the object. */
82		if (size > ip->info.size) {
83			pr_err("mmap region is larger than the object!\n");
84			spin_unlock_bh(&rxe->pending_lock);
85			ret = -EINVAL;
86			goto done;
87		}
88
89		goto found_it;
90	}
91	pr_warn("unable to find pending mmap info\n");
92	spin_unlock_bh(&rxe->pending_lock);
93	ret = -EINVAL;
94	goto done;
95
96found_it:
97	list_del_init(&ip->pending_mmaps);
98	spin_unlock_bh(&rxe->pending_lock);
99
100	ret = remap_vmalloc_range(vma, ip->obj, 0);
101	if (ret) {
102		pr_err("err %d from remap_vmalloc_range\n", ret);
103		goto done;
104	}
105
106	vma->vm_ops = &rxe_vm_ops;
107	vma->vm_private_data = ip;
108	rxe_vma_open(vma);
109done:
110	return ret;
111}
112
113/*
114 * Allocate information for rxe_mmap
115 */
116struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size,
117					   struct ib_udata *udata, void *obj)
118{
119	struct rxe_mmap_info *ip;
120
121	if (!udata)
122		return ERR_PTR(-EINVAL);
123
124	ip = kmalloc(sizeof(*ip), GFP_KERNEL);
125	if (!ip)
126		return ERR_PTR(-ENOMEM);
127
128	size = PAGE_ALIGN(size);
129
130	spin_lock_bh(&rxe->mmap_offset_lock);
131
132	if (rxe->mmap_offset == 0)
133		rxe->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
134
135	ip->info.offset = rxe->mmap_offset;
136	rxe->mmap_offset += ALIGN(size, SHMLBA);
137
138	spin_unlock_bh(&rxe->mmap_offset_lock);
139
140	INIT_LIST_HEAD(&ip->pending_mmaps);
141	ip->info.size = size;
142	ip->context =
143		container_of(udata, struct uverbs_attr_bundle, driver_udata)
144			->context;
145	ip->obj = obj;
146	kref_init(&ip->ref);
147
148	return ip;
149}
150