xref: /kernel/linux/linux-6.6/drivers/nvme/host/pr.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2015 Intel Corporation
4 *	Keith Busch <kbusch@kernel.org>
5 */
6#include <linux/blkdev.h>
7#include <linux/pr.h>
8#include <asm/unaligned.h>
9
10#include "nvme.h"
11
12static enum nvme_pr_type nvme_pr_type_from_blk(enum pr_type type)
13{
14	switch (type) {
15	case PR_WRITE_EXCLUSIVE:
16		return NVME_PR_WRITE_EXCLUSIVE;
17	case PR_EXCLUSIVE_ACCESS:
18		return NVME_PR_EXCLUSIVE_ACCESS;
19	case PR_WRITE_EXCLUSIVE_REG_ONLY:
20		return NVME_PR_WRITE_EXCLUSIVE_REG_ONLY;
21	case PR_EXCLUSIVE_ACCESS_REG_ONLY:
22		return NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY;
23	case PR_WRITE_EXCLUSIVE_ALL_REGS:
24		return NVME_PR_WRITE_EXCLUSIVE_ALL_REGS;
25	case PR_EXCLUSIVE_ACCESS_ALL_REGS:
26		return NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS;
27	}
28
29	return 0;
30}
31
32static enum pr_type block_pr_type_from_nvme(enum nvme_pr_type type)
33{
34	switch (type) {
35	case NVME_PR_WRITE_EXCLUSIVE:
36		return PR_WRITE_EXCLUSIVE;
37	case NVME_PR_EXCLUSIVE_ACCESS:
38		return PR_EXCLUSIVE_ACCESS;
39	case NVME_PR_WRITE_EXCLUSIVE_REG_ONLY:
40		return PR_WRITE_EXCLUSIVE_REG_ONLY;
41	case NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY:
42		return PR_EXCLUSIVE_ACCESS_REG_ONLY;
43	case NVME_PR_WRITE_EXCLUSIVE_ALL_REGS:
44		return PR_WRITE_EXCLUSIVE_ALL_REGS;
45	case NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS:
46		return PR_EXCLUSIVE_ACCESS_ALL_REGS;
47	}
48
49	return 0;
50}
51
52static int nvme_send_ns_head_pr_command(struct block_device *bdev,
53		struct nvme_command *c, void *data, unsigned int data_len)
54{
55	struct nvme_ns_head *head = bdev->bd_disk->private_data;
56	int srcu_idx = srcu_read_lock(&head->srcu);
57	struct nvme_ns *ns = nvme_find_path(head);
58	int ret = -EWOULDBLOCK;
59
60	if (ns) {
61		c->common.nsid = cpu_to_le32(ns->head->ns_id);
62		ret = nvme_submit_sync_cmd(ns->queue, c, data, data_len);
63	}
64	srcu_read_unlock(&head->srcu, srcu_idx);
65	return ret;
66}
67
68static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c,
69		void *data, unsigned int data_len)
70{
71	c->common.nsid = cpu_to_le32(ns->head->ns_id);
72	return nvme_submit_sync_cmd(ns->queue, c, data, data_len);
73}
74
75static int nvme_sc_to_pr_err(int nvme_sc)
76{
77	if (nvme_is_path_error(nvme_sc))
78		return PR_STS_PATH_FAILED;
79
80	switch (nvme_sc) {
81	case NVME_SC_SUCCESS:
82		return PR_STS_SUCCESS;
83	case NVME_SC_RESERVATION_CONFLICT:
84		return PR_STS_RESERVATION_CONFLICT;
85	case NVME_SC_ONCS_NOT_SUPPORTED:
86		return -EOPNOTSUPP;
87	case NVME_SC_BAD_ATTRIBUTES:
88	case NVME_SC_INVALID_OPCODE:
89	case NVME_SC_INVALID_FIELD:
90	case NVME_SC_INVALID_NS:
91		return -EINVAL;
92	default:
93		return PR_STS_IOERR;
94	}
95}
96
97static int nvme_send_pr_command(struct block_device *bdev,
98		struct nvme_command *c, void *data, unsigned int data_len)
99{
100	if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
101	    bdev->bd_disk->fops == &nvme_ns_head_ops)
102		return nvme_send_ns_head_pr_command(bdev, c, data, data_len);
103
104	return nvme_send_ns_pr_command(bdev->bd_disk->private_data, c, data,
105				       data_len);
106}
107
108static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
109				u64 key, u64 sa_key, u8 op)
110{
111	struct nvme_command c = { };
112	u8 data[16] = { 0, };
113	int ret;
114
115	put_unaligned_le64(key, &data[0]);
116	put_unaligned_le64(sa_key, &data[8]);
117
118	c.common.opcode = op;
119	c.common.cdw10 = cpu_to_le32(cdw10);
120
121	ret = nvme_send_pr_command(bdev, &c, data, sizeof(data));
122	if (ret < 0)
123		return ret;
124
125	return nvme_sc_to_pr_err(ret);
126}
127
128static int nvme_pr_register(struct block_device *bdev, u64 old,
129		u64 new, unsigned flags)
130{
131	u32 cdw10;
132
133	if (flags & ~PR_FL_IGNORE_KEY)
134		return -EOPNOTSUPP;
135
136	cdw10 = old ? 2 : 0;
137	cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
138	cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
139	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
140}
141
142static int nvme_pr_reserve(struct block_device *bdev, u64 key,
143		enum pr_type type, unsigned flags)
144{
145	u32 cdw10;
146
147	if (flags & ~PR_FL_IGNORE_KEY)
148		return -EOPNOTSUPP;
149
150	cdw10 = nvme_pr_type_from_blk(type) << 8;
151	cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
152	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
153}
154
155static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
156		enum pr_type type, bool abort)
157{
158	u32 cdw10 = nvme_pr_type_from_blk(type) << 8 | (abort ? 2 : 1);
159
160	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
161}
162
163static int nvme_pr_clear(struct block_device *bdev, u64 key)
164{
165	u32 cdw10 = 1 | (key ? 0 : 1 << 3);
166
167	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
168}
169
170static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
171{
172	u32 cdw10 = nvme_pr_type_from_blk(type) << 8 | (key ? 0 : 1 << 3);
173
174	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
175}
176
177static int nvme_pr_resv_report(struct block_device *bdev, void *data,
178		u32 data_len, bool *eds)
179{
180	struct nvme_command c = { };
181	int ret;
182
183	c.common.opcode = nvme_cmd_resv_report;
184	c.common.cdw10 = cpu_to_le32(nvme_bytes_to_numd(data_len));
185	c.common.cdw11 = cpu_to_le32(NVME_EXTENDED_DATA_STRUCT);
186	*eds = true;
187
188retry:
189	ret = nvme_send_pr_command(bdev, &c, data, data_len);
190	if (ret == NVME_SC_HOST_ID_INCONSIST &&
191	    c.common.cdw11 == cpu_to_le32(NVME_EXTENDED_DATA_STRUCT)) {
192		c.common.cdw11 = 0;
193		*eds = false;
194		goto retry;
195	}
196
197	if (ret < 0)
198		return ret;
199
200	return nvme_sc_to_pr_err(ret);
201}
202
203static int nvme_pr_read_keys(struct block_device *bdev,
204		struct pr_keys *keys_info)
205{
206	u32 rse_len, num_keys = keys_info->num_keys;
207	struct nvme_reservation_status_ext *rse;
208	int ret, i;
209	bool eds;
210
211	/*
212	 * Assume we are using 128-bit host IDs and allocate a buffer large
213	 * enough to get enough keys to fill the return keys buffer.
214	 */
215	rse_len = struct_size(rse, regctl_eds, num_keys);
216	rse = kzalloc(rse_len, GFP_KERNEL);
217	if (!rse)
218		return -ENOMEM;
219
220	ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds);
221	if (ret)
222		goto free_rse;
223
224	keys_info->generation = le32_to_cpu(rse->gen);
225	keys_info->num_keys = get_unaligned_le16(&rse->regctl);
226
227	num_keys = min(num_keys, keys_info->num_keys);
228	for (i = 0; i < num_keys; i++) {
229		if (eds) {
230			keys_info->keys[i] =
231					le64_to_cpu(rse->regctl_eds[i].rkey);
232		} else {
233			struct nvme_reservation_status *rs;
234
235			rs = (struct nvme_reservation_status *)rse;
236			keys_info->keys[i] = le64_to_cpu(rs->regctl_ds[i].rkey);
237		}
238	}
239
240free_rse:
241	kfree(rse);
242	return ret;
243}
244
245static int nvme_pr_read_reservation(struct block_device *bdev,
246		struct pr_held_reservation *resv)
247{
248	struct nvme_reservation_status_ext tmp_rse, *rse;
249	int ret, i, num_regs;
250	u32 rse_len;
251	bool eds;
252
253get_num_regs:
254	/*
255	 * Get the number of registrations so we know how big to allocate
256	 * the response buffer.
257	 */
258	ret = nvme_pr_resv_report(bdev, &tmp_rse, sizeof(tmp_rse), &eds);
259	if (ret)
260		return ret;
261
262	num_regs = get_unaligned_le16(&tmp_rse.regctl);
263	if (!num_regs) {
264		resv->generation = le32_to_cpu(tmp_rse.gen);
265		return 0;
266	}
267
268	rse_len = struct_size(rse, regctl_eds, num_regs);
269	rse = kzalloc(rse_len, GFP_KERNEL);
270	if (!rse)
271		return -ENOMEM;
272
273	ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds);
274	if (ret)
275		goto free_rse;
276
277	if (num_regs != get_unaligned_le16(&rse->regctl)) {
278		kfree(rse);
279		goto get_num_regs;
280	}
281
282	resv->generation = le32_to_cpu(rse->gen);
283	resv->type = block_pr_type_from_nvme(rse->rtype);
284
285	for (i = 0; i < num_regs; i++) {
286		if (eds) {
287			if (rse->regctl_eds[i].rcsts) {
288				resv->key = le64_to_cpu(rse->regctl_eds[i].rkey);
289				break;
290			}
291		} else {
292			struct nvme_reservation_status *rs;
293
294			rs = (struct nvme_reservation_status *)rse;
295			if (rs->regctl_ds[i].rcsts) {
296				resv->key = le64_to_cpu(rs->regctl_ds[i].rkey);
297				break;
298			}
299		}
300	}
301
302free_rse:
303	kfree(rse);
304	return ret;
305}
306
307const struct pr_ops nvme_pr_ops = {
308	.pr_register	= nvme_pr_register,
309	.pr_reserve	= nvme_pr_reserve,
310	.pr_release	= nvme_pr_release,
311	.pr_preempt	= nvme_pr_preempt,
312	.pr_clear	= nvme_pr_clear,
313	.pr_read_keys	= nvme_pr_read_keys,
314	.pr_read_reservation = nvme_pr_read_reservation,
315};
316