1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4 */
5
6#include "mana_ib.h"
7
8#define VALID_MR_FLAGS                                                         \
9	(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ)
10
11static enum gdma_mr_access_flags
12mana_ib_verbs_to_gdma_access_flags(int access_flags)
13{
14	enum gdma_mr_access_flags flags = GDMA_ACCESS_FLAG_LOCAL_READ;
15
16	if (access_flags & IB_ACCESS_LOCAL_WRITE)
17		flags |= GDMA_ACCESS_FLAG_LOCAL_WRITE;
18
19	if (access_flags & IB_ACCESS_REMOTE_WRITE)
20		flags |= GDMA_ACCESS_FLAG_REMOTE_WRITE;
21
22	if (access_flags & IB_ACCESS_REMOTE_READ)
23		flags |= GDMA_ACCESS_FLAG_REMOTE_READ;
24
25	return flags;
26}
27
28static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
29				struct gdma_create_mr_params *mr_params)
30{
31	struct gdma_create_mr_response resp = {};
32	struct gdma_create_mr_request req = {};
33	struct gdma_dev *mdev = dev->gdma_dev;
34	struct gdma_context *gc;
35	int err;
36
37	gc = mdev->gdma_context;
38
39	mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_MR, sizeof(req),
40			     sizeof(resp));
41	req.pd_handle = mr_params->pd_handle;
42	req.mr_type = mr_params->mr_type;
43
44	switch (mr_params->mr_type) {
45	case GDMA_MR_TYPE_GVA:
46		req.gva.dma_region_handle = mr_params->gva.dma_region_handle;
47		req.gva.virtual_address = mr_params->gva.virtual_address;
48		req.gva.access_flags = mr_params->gva.access_flags;
49		break;
50
51	default:
52		ibdev_dbg(&dev->ib_dev,
53			  "invalid param (GDMA_MR_TYPE) passed, type %d\n",
54			  req.mr_type);
55		return -EINVAL;
56	}
57
58	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
59
60	if (err || resp.hdr.status) {
61		ibdev_dbg(&dev->ib_dev, "Failed to create mr %d, %u", err,
62			  resp.hdr.status);
63		if (!err)
64			err = -EPROTO;
65
66		return err;
67	}
68
69	mr->ibmr.lkey = resp.lkey;
70	mr->ibmr.rkey = resp.rkey;
71	mr->mr_handle = resp.mr_handle;
72
73	return 0;
74}
75
76static int mana_ib_gd_destroy_mr(struct mana_ib_dev *dev, u64 mr_handle)
77{
78	struct gdma_destroy_mr_response resp = {};
79	struct gdma_destroy_mr_request req = {};
80	struct gdma_dev *mdev = dev->gdma_dev;
81	struct gdma_context *gc;
82	int err;
83
84	gc = mdev->gdma_context;
85
86	mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_MR, sizeof(req),
87			     sizeof(resp));
88
89	req.mr_handle = mr_handle;
90
91	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
92	if (err || resp.hdr.status) {
93		dev_err(gc->dev, "Failed to destroy MR: %d, 0x%x\n", err,
94			resp.hdr.status);
95		if (!err)
96			err = -EPROTO;
97		return err;
98	}
99
100	return 0;
101}
102
103struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
104				  u64 iova, int access_flags,
105				  struct ib_udata *udata)
106{
107	struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
108	struct gdma_create_mr_params mr_params = {};
109	struct ib_device *ibdev = ibpd->device;
110	struct mana_ib_dev *dev;
111	struct mana_ib_mr *mr;
112	u64 dma_region_handle;
113	int err;
114
115	dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
116
117	ibdev_dbg(ibdev,
118		  "start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
119		  start, iova, length, access_flags);
120
121	if (access_flags & ~VALID_MR_FLAGS)
122		return ERR_PTR(-EINVAL);
123
124	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
125	if (!mr)
126		return ERR_PTR(-ENOMEM);
127
128	mr->umem = ib_umem_get(ibdev, start, length, access_flags);
129	if (IS_ERR(mr->umem)) {
130		err = PTR_ERR(mr->umem);
131		ibdev_dbg(ibdev,
132			  "Failed to get umem for register user-mr, %d\n", err);
133		goto err_free;
134	}
135
136	err = mana_ib_gd_create_dma_region(dev, mr->umem, &dma_region_handle);
137	if (err) {
138		ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
139			  err);
140		goto err_umem;
141	}
142
143	ibdev_dbg(ibdev,
144		  "mana_ib_gd_create_dma_region ret %d gdma_region %llx\n", err,
145		  dma_region_handle);
146
147	mr_params.pd_handle = pd->pd_handle;
148	mr_params.mr_type = GDMA_MR_TYPE_GVA;
149	mr_params.gva.dma_region_handle = dma_region_handle;
150	mr_params.gva.virtual_address = iova;
151	mr_params.gva.access_flags =
152		mana_ib_verbs_to_gdma_access_flags(access_flags);
153
154	err = mana_ib_gd_create_mr(dev, mr, &mr_params);
155	if (err)
156		goto err_dma_region;
157
158	/*
159	 * There is no need to keep track of dma_region_handle after MR is
160	 * successfully created. The dma_region_handle is tracked in the PF
161	 * as part of the lifecycle of this MR.
162	 */
163
164	return &mr->ibmr;
165
166err_dma_region:
167	mana_gd_destroy_dma_region(dev->gdma_dev->gdma_context,
168				   dma_region_handle);
169
170err_umem:
171	ib_umem_release(mr->umem);
172
173err_free:
174	kfree(mr);
175	return ERR_PTR(err);
176}
177
178int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
179{
180	struct mana_ib_mr *mr = container_of(ibmr, struct mana_ib_mr, ibmr);
181	struct ib_device *ibdev = ibmr->device;
182	struct mana_ib_dev *dev;
183	int err;
184
185	dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
186
187	err = mana_ib_gd_destroy_mr(dev, mr->mr_handle);
188	if (err)
189		return err;
190
191	if (mr->umem)
192		ib_umem_release(mr->umem);
193
194	kfree(mr);
195
196	return 0;
197}
198