1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe I/O command implementation.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/blkdev.h>
8#include <linux/module.h>
9#include "nvmet.h"
10
11void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
12{
13	const struct queue_limits *ql = &bdev_get_queue(bdev)->limits;
14	/* Number of logical blocks per physical block. */
15	const u32 lpp = ql->physical_block_size / ql->logical_block_size;
16	/* Logical blocks per physical block, 0's based. */
17	const __le16 lpp0b = to0based(lpp);
18
19	/*
20	 * For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN,
21	 * NAWUPF, and NACWU are defined for this namespace and should be
22	 * used by the host for this namespace instead of the AWUN, AWUPF,
23	 * and ACWU fields in the Identify Controller data structure. If
24	 * any of these fields are zero that means that the corresponding
25	 * field from the identify controller data structure should be used.
26	 */
27	id->nsfeat |= 1 << 1;
28	id->nawun = lpp0b;
29	id->nawupf = lpp0b;
30	id->nacwu = lpp0b;
31
32	/*
33	 * Bit 4 indicates that the fields NPWG, NPWA, NPDG, NPDA, and
34	 * NOWS are defined for this namespace and should be used by
35	 * the host for I/O optimization.
36	 */
37	id->nsfeat |= 1 << 4;
38	/* NPWG = Namespace Preferred Write Granularity. 0's based */
39	id->npwg = lpp0b;
40	/* NPWA = Namespace Preferred Write Alignment. 0's based */
41	id->npwa = id->npwg;
42	/* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
43	id->npdg = to0based(ql->discard_granularity / ql->logical_block_size);
44	/* NPDG = Namespace Preferred Deallocate Alignment */
45	id->npda = id->npdg;
46	/* NOWS = Namespace Optimal Write Size */
47	id->nows = to0based(ql->io_opt / ql->logical_block_size);
48}
49
50static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns)
51{
52	struct blk_integrity *bi = bdev_get_integrity(ns->bdev);
53
54	if (bi) {
55		ns->metadata_size = bi->tuple_size;
56		if (bi->profile == &t10_pi_type1_crc)
57			ns->pi_type = NVME_NS_DPS_PI_TYPE1;
58		else if (bi->profile == &t10_pi_type3_crc)
59			ns->pi_type = NVME_NS_DPS_PI_TYPE3;
60		else
61			/* Unsupported metadata type */
62			ns->metadata_size = 0;
63	}
64}
65
66int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
67{
68	int ret;
69
70	ns->bdev = blkdev_get_by_path(ns->device_path,
71			FMODE_READ | FMODE_WRITE, NULL);
72	if (IS_ERR(ns->bdev)) {
73		ret = PTR_ERR(ns->bdev);
74		if (ret != -ENOTBLK) {
75			pr_err("failed to open block device %s: (%ld)\n",
76					ns->device_path, PTR_ERR(ns->bdev));
77		}
78		ns->bdev = NULL;
79		return ret;
80	}
81	ns->size = i_size_read(ns->bdev->bd_inode);
82	ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
83
84	ns->pi_type = 0;
85	ns->metadata_size = 0;
86	if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY_T10))
87		nvmet_bdev_ns_enable_integrity(ns);
88
89	return 0;
90}
91
92void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
93{
94	if (ns->bdev) {
95		blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ);
96		ns->bdev = NULL;
97	}
98}
99
100void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns)
101{
102	ns->size = i_size_read(ns->bdev->bd_inode);
103}
104
105static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
106{
107	u16 status = NVME_SC_SUCCESS;
108
109	if (likely(blk_sts == BLK_STS_OK))
110		return status;
111	/*
112	 * Right now there exists M : 1 mapping between block layer error
113	 * to the NVMe status code (see nvme_error_status()). For consistency,
114	 * when we reverse map we use most appropriate NVMe Status code from
115	 * the group of the NVMe staus codes used in the nvme_error_status().
116	 */
117	switch (blk_sts) {
118	case BLK_STS_NOSPC:
119		status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
120		req->error_loc = offsetof(struct nvme_rw_command, length);
121		break;
122	case BLK_STS_TARGET:
123		status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
124		req->error_loc = offsetof(struct nvme_rw_command, slba);
125		break;
126	case BLK_STS_NOTSUPP:
127		req->error_loc = offsetof(struct nvme_common_command, opcode);
128		switch (req->cmd->common.opcode) {
129		case nvme_cmd_dsm:
130		case nvme_cmd_write_zeroes:
131			status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
132			break;
133		default:
134			status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
135		}
136		break;
137	case BLK_STS_MEDIUM:
138		status = NVME_SC_ACCESS_DENIED;
139		req->error_loc = offsetof(struct nvme_rw_command, nsid);
140		break;
141	case BLK_STS_IOERR:
142	default:
143		status = NVME_SC_INTERNAL | NVME_SC_DNR;
144		req->error_loc = offsetof(struct nvme_common_command, opcode);
145	}
146
147	switch (req->cmd->common.opcode) {
148	case nvme_cmd_read:
149	case nvme_cmd_write:
150		req->error_slba = le64_to_cpu(req->cmd->rw.slba);
151		break;
152	case nvme_cmd_write_zeroes:
153		req->error_slba =
154			le64_to_cpu(req->cmd->write_zeroes.slba);
155		break;
156	default:
157		req->error_slba = 0;
158	}
159	return status;
160}
161
162static void nvmet_bio_done(struct bio *bio)
163{
164	struct nvmet_req *req = bio->bi_private;
165
166	nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
167	if (bio != &req->b.inline_bio)
168		bio_put(bio);
169}
170
171#ifdef CONFIG_BLK_DEV_INTEGRITY
172static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
173				struct sg_mapping_iter *miter)
174{
175	struct blk_integrity *bi;
176	struct bio_integrity_payload *bip;
177	struct block_device *bdev = req->ns->bdev;
178	int rc;
179	size_t resid, len;
180
181	bi = bdev_get_integrity(bdev);
182	if (unlikely(!bi)) {
183		pr_err("Unable to locate bio_integrity\n");
184		return -ENODEV;
185	}
186
187	bip = bio_integrity_alloc(bio, GFP_NOIO,
188		min_t(unsigned int, req->metadata_sg_cnt, BIO_MAX_PAGES));
189	if (IS_ERR(bip)) {
190		pr_err("Unable to allocate bio_integrity_payload\n");
191		return PTR_ERR(bip);
192	}
193
194	bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
195	/* virtual start sector must be in integrity interval units */
196	bip_set_seed(bip, bio->bi_iter.bi_sector >>
197		     (bi->interval_exp - SECTOR_SHIFT));
198
199	resid = bip->bip_iter.bi_size;
200	while (resid > 0 && sg_miter_next(miter)) {
201		len = min_t(size_t, miter->length, resid);
202		rc = bio_integrity_add_page(bio, miter->page, len,
203					    offset_in_page(miter->addr));
204		if (unlikely(rc != len)) {
205			pr_err("bio_integrity_add_page() failed; %d\n", rc);
206			sg_miter_stop(miter);
207			return -ENOMEM;
208		}
209
210		resid -= len;
211		if (len < miter->length)
212			miter->consumed -= miter->length - len;
213	}
214	sg_miter_stop(miter);
215
216	return 0;
217}
218#else
219static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
220				struct sg_mapping_iter *miter)
221{
222	return -EINVAL;
223}
224#endif /* CONFIG_BLK_DEV_INTEGRITY */
225
226static void nvmet_bdev_execute_rw(struct nvmet_req *req)
227{
228	int sg_cnt = req->sg_cnt;
229	struct bio *bio;
230	struct scatterlist *sg;
231	struct blk_plug plug;
232	sector_t sector;
233	int op, i, rc;
234	struct sg_mapping_iter prot_miter;
235	unsigned int iter_flags;
236	unsigned int total_len = nvmet_rw_data_len(req) + req->metadata_len;
237
238	if (!nvmet_check_transfer_len(req, total_len))
239		return;
240
241	if (!req->sg_cnt) {
242		nvmet_req_complete(req, 0);
243		return;
244	}
245
246	if (req->cmd->rw.opcode == nvme_cmd_write) {
247		op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
248		if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
249			op |= REQ_FUA;
250		iter_flags = SG_MITER_TO_SG;
251	} else {
252		op = REQ_OP_READ;
253		iter_flags = SG_MITER_FROM_SG;
254	}
255
256	if (is_pci_p2pdma_page(sg_page(req->sg)))
257		op |= REQ_NOMERGE;
258
259	sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
260
261	if (nvmet_use_inline_bvec(req)) {
262		bio = &req->b.inline_bio;
263		bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
264	} else {
265		bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
266	}
267	bio_set_dev(bio, req->ns->bdev);
268	bio->bi_iter.bi_sector = sector;
269	bio->bi_private = req;
270	bio->bi_end_io = nvmet_bio_done;
271	bio->bi_opf = op;
272
273	blk_start_plug(&plug);
274	if (req->metadata_len)
275		sg_miter_start(&prot_miter, req->metadata_sg,
276			       req->metadata_sg_cnt, iter_flags);
277
278	for_each_sg(req->sg, sg, req->sg_cnt, i) {
279		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
280				!= sg->length) {
281			struct bio *prev = bio;
282
283			if (req->metadata_len) {
284				rc = nvmet_bdev_alloc_bip(req, bio,
285							  &prot_miter);
286				if (unlikely(rc)) {
287					bio_io_error(bio);
288					return;
289				}
290			}
291
292			bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
293			bio_set_dev(bio, req->ns->bdev);
294			bio->bi_iter.bi_sector = sector;
295			bio->bi_opf = op;
296
297			bio_chain(bio, prev);
298			submit_bio(prev);
299		}
300
301		sector += sg->length >> 9;
302		sg_cnt--;
303	}
304
305	if (req->metadata_len) {
306		rc = nvmet_bdev_alloc_bip(req, bio, &prot_miter);
307		if (unlikely(rc)) {
308			bio_io_error(bio);
309			return;
310		}
311	}
312
313	submit_bio(bio);
314	blk_finish_plug(&plug);
315}
316
317static void nvmet_bdev_execute_flush(struct nvmet_req *req)
318{
319	struct bio *bio = &req->b.inline_bio;
320
321	if (!nvmet_check_transfer_len(req, 0))
322		return;
323
324	bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
325	bio_set_dev(bio, req->ns->bdev);
326	bio->bi_private = req;
327	bio->bi_end_io = nvmet_bio_done;
328	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
329
330	submit_bio(bio);
331}
332
333u16 nvmet_bdev_flush(struct nvmet_req *req)
334{
335	if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL))
336		return NVME_SC_INTERNAL | NVME_SC_DNR;
337	return 0;
338}
339
340static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
341		struct nvme_dsm_range *range, struct bio **bio)
342{
343	struct nvmet_ns *ns = req->ns;
344	int ret;
345
346	ret = __blkdev_issue_discard(ns->bdev,
347			nvmet_lba_to_sect(ns, range->slba),
348			le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
349			GFP_KERNEL, 0, bio);
350	if (ret && ret != -EOPNOTSUPP) {
351		req->error_slba = le64_to_cpu(range->slba);
352		return errno_to_nvme_status(req, ret);
353	}
354	return NVME_SC_SUCCESS;
355}
356
357static void nvmet_bdev_execute_discard(struct nvmet_req *req)
358{
359	struct nvme_dsm_range range;
360	struct bio *bio = NULL;
361	int i;
362	u16 status;
363
364	for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
365		status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
366				sizeof(range));
367		if (status)
368			break;
369
370		status = nvmet_bdev_discard_range(req, &range, &bio);
371		if (status)
372			break;
373	}
374
375	if (bio) {
376		bio->bi_private = req;
377		bio->bi_end_io = nvmet_bio_done;
378		if (status)
379			bio_io_error(bio);
380		else
381			submit_bio(bio);
382	} else {
383		nvmet_req_complete(req, status);
384	}
385}
386
387static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
388{
389	if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
390		return;
391
392	switch (le32_to_cpu(req->cmd->dsm.attributes)) {
393	case NVME_DSMGMT_AD:
394		nvmet_bdev_execute_discard(req);
395		return;
396	case NVME_DSMGMT_IDR:
397	case NVME_DSMGMT_IDW:
398	default:
399		/* Not supported yet */
400		nvmet_req_complete(req, 0);
401		return;
402	}
403}
404
405static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
406{
407	struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
408	struct bio *bio = NULL;
409	sector_t sector;
410	sector_t nr_sector;
411	int ret;
412
413	if (!nvmet_check_transfer_len(req, 0))
414		return;
415
416	sector = nvmet_lba_to_sect(req->ns, write_zeroes->slba);
417	nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
418		(req->ns->blksize_shift - 9));
419
420	ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
421			GFP_KERNEL, &bio, 0);
422	if (bio) {
423		bio->bi_private = req;
424		bio->bi_end_io = nvmet_bio_done;
425		submit_bio(bio);
426	} else {
427		nvmet_req_complete(req, errno_to_nvme_status(req, ret));
428	}
429}
430
431u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
432{
433	struct nvme_command *cmd = req->cmd;
434
435	switch (cmd->common.opcode) {
436	case nvme_cmd_read:
437	case nvme_cmd_write:
438		req->execute = nvmet_bdev_execute_rw;
439		if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns))
440			req->metadata_len = nvmet_rw_metadata_len(req);
441		return 0;
442	case nvme_cmd_flush:
443		req->execute = nvmet_bdev_execute_flush;
444		return 0;
445	case nvme_cmd_dsm:
446		req->execute = nvmet_bdev_execute_dsm;
447		return 0;
448	case nvme_cmd_write_zeroes:
449		req->execute = nvmet_bdev_execute_write_zeroes;
450		return 0;
451	default:
452		pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
453		       req->sq->qid);
454		req->error_loc = offsetof(struct nvme_common_command, opcode);
455		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
456	}
457}
458