1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe I/O command implementation.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/blkdev.h>
8#include <linux/blk-integrity.h>
9#include <linux/memremap.h>
10#include <linux/module.h>
11#include "nvmet.h"
12
13void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
14{
15	/* Logical blocks per physical block, 0's based. */
16	const __le16 lpp0b = to0based(bdev_physical_block_size(bdev) /
17				      bdev_logical_block_size(bdev));
18
19	/*
20	 * For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN,
21	 * NAWUPF, and NACWU are defined for this namespace and should be
22	 * used by the host for this namespace instead of the AWUN, AWUPF,
23	 * and ACWU fields in the Identify Controller data structure. If
24	 * any of these fields are zero that means that the corresponding
25	 * field from the identify controller data structure should be used.
26	 */
27	id->nsfeat |= 1 << 1;
28	id->nawun = lpp0b;
29	id->nawupf = lpp0b;
30	id->nacwu = lpp0b;
31
32	/*
33	 * Bit 4 indicates that the fields NPWG, NPWA, NPDG, NPDA, and
34	 * NOWS are defined for this namespace and should be used by
35	 * the host for I/O optimization.
36	 */
37	id->nsfeat |= 1 << 4;
38	/* NPWG = Namespace Preferred Write Granularity. 0's based */
39	id->npwg = lpp0b;
40	/* NPWA = Namespace Preferred Write Alignment. 0's based */
41	id->npwa = id->npwg;
42	/* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
43	id->npdg = to0based(bdev_discard_granularity(bdev) /
44			    bdev_logical_block_size(bdev));
45	/* NPDG = Namespace Preferred Deallocate Alignment */
46	id->npda = id->npdg;
47	/* NOWS = Namespace Optimal Write Size */
48	id->nows = to0based(bdev_io_opt(bdev) / bdev_logical_block_size(bdev));
49}
50
51void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
52{
53	if (ns->bdev) {
54		blkdev_put(ns->bdev, NULL);
55		ns->bdev = NULL;
56	}
57}
58
59static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns)
60{
61	struct blk_integrity *bi = bdev_get_integrity(ns->bdev);
62
63	if (bi) {
64		ns->metadata_size = bi->tuple_size;
65		if (bi->profile == &t10_pi_type1_crc)
66			ns->pi_type = NVME_NS_DPS_PI_TYPE1;
67		else if (bi->profile == &t10_pi_type3_crc)
68			ns->pi_type = NVME_NS_DPS_PI_TYPE3;
69		else
70			/* Unsupported metadata type */
71			ns->metadata_size = 0;
72	}
73}
74
75int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
76{
77	int ret;
78
79	/*
80	 * When buffered_io namespace attribute is enabled that means user want
81	 * this block device to be used as a file, so block device can take
82	 * an advantage of cache.
83	 */
84	if (ns->buffered_io)
85		return -ENOTBLK;
86
87	ns->bdev = blkdev_get_by_path(ns->device_path,
88			BLK_OPEN_READ | BLK_OPEN_WRITE, NULL, NULL);
89	if (IS_ERR(ns->bdev)) {
90		ret = PTR_ERR(ns->bdev);
91		if (ret != -ENOTBLK) {
92			pr_err("failed to open block device %s: (%ld)\n",
93					ns->device_path, PTR_ERR(ns->bdev));
94		}
95		ns->bdev = NULL;
96		return ret;
97	}
98	ns->size = bdev_nr_bytes(ns->bdev);
99	ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
100
101	ns->pi_type = 0;
102	ns->metadata_size = 0;
103	if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY_T10))
104		nvmet_bdev_ns_enable_integrity(ns);
105
106	if (bdev_is_zoned(ns->bdev)) {
107		if (!nvmet_bdev_zns_enable(ns)) {
108			nvmet_bdev_ns_disable(ns);
109			return -EINVAL;
110		}
111		ns->csi = NVME_CSI_ZNS;
112	}
113
114	return 0;
115}
116
117void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns)
118{
119	ns->size = bdev_nr_bytes(ns->bdev);
120}
121
122u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
123{
124	u16 status = NVME_SC_SUCCESS;
125
126	if (likely(blk_sts == BLK_STS_OK))
127		return status;
128	/*
129	 * Right now there exists M : 1 mapping between block layer error
130	 * to the NVMe status code (see nvme_error_status()). For consistency,
131	 * when we reverse map we use most appropriate NVMe Status code from
132	 * the group of the NVMe staus codes used in the nvme_error_status().
133	 */
134	switch (blk_sts) {
135	case BLK_STS_NOSPC:
136		status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
137		req->error_loc = offsetof(struct nvme_rw_command, length);
138		break;
139	case BLK_STS_TARGET:
140		status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
141		req->error_loc = offsetof(struct nvme_rw_command, slba);
142		break;
143	case BLK_STS_NOTSUPP:
144		req->error_loc = offsetof(struct nvme_common_command, opcode);
145		switch (req->cmd->common.opcode) {
146		case nvme_cmd_dsm:
147		case nvme_cmd_write_zeroes:
148			status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
149			break;
150		default:
151			status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
152		}
153		break;
154	case BLK_STS_MEDIUM:
155		status = NVME_SC_ACCESS_DENIED;
156		req->error_loc = offsetof(struct nvme_rw_command, nsid);
157		break;
158	case BLK_STS_IOERR:
159	default:
160		status = NVME_SC_INTERNAL | NVME_SC_DNR;
161		req->error_loc = offsetof(struct nvme_common_command, opcode);
162	}
163
164	switch (req->cmd->common.opcode) {
165	case nvme_cmd_read:
166	case nvme_cmd_write:
167		req->error_slba = le64_to_cpu(req->cmd->rw.slba);
168		break;
169	case nvme_cmd_write_zeroes:
170		req->error_slba =
171			le64_to_cpu(req->cmd->write_zeroes.slba);
172		break;
173	default:
174		req->error_slba = 0;
175	}
176	return status;
177}
178
179static void nvmet_bio_done(struct bio *bio)
180{
181	struct nvmet_req *req = bio->bi_private;
182
183	nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
184	nvmet_req_bio_put(req, bio);
185}
186
187#ifdef CONFIG_BLK_DEV_INTEGRITY
188static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
189				struct sg_mapping_iter *miter)
190{
191	struct blk_integrity *bi;
192	struct bio_integrity_payload *bip;
193	int rc;
194	size_t resid, len;
195
196	bi = bdev_get_integrity(req->ns->bdev);
197	if (unlikely(!bi)) {
198		pr_err("Unable to locate bio_integrity\n");
199		return -ENODEV;
200	}
201
202	bip = bio_integrity_alloc(bio, GFP_NOIO,
203					bio_max_segs(req->metadata_sg_cnt));
204	if (IS_ERR(bip)) {
205		pr_err("Unable to allocate bio_integrity_payload\n");
206		return PTR_ERR(bip);
207	}
208
209	/* virtual start sector must be in integrity interval units */
210	bip_set_seed(bip, bio->bi_iter.bi_sector >>
211		     (bi->interval_exp - SECTOR_SHIFT));
212
213	resid = bio_integrity_bytes(bi, bio_sectors(bio));
214	while (resid > 0 && sg_miter_next(miter)) {
215		len = min_t(size_t, miter->length, resid);
216		rc = bio_integrity_add_page(bio, miter->page, len,
217					    offset_in_page(miter->addr));
218		if (unlikely(rc != len)) {
219			pr_err("bio_integrity_add_page() failed; %d\n", rc);
220			sg_miter_stop(miter);
221			return -ENOMEM;
222		}
223
224		resid -= len;
225		if (len < miter->length)
226			miter->consumed -= miter->length - len;
227	}
228	sg_miter_stop(miter);
229
230	return 0;
231}
232#else
233static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
234				struct sg_mapping_iter *miter)
235{
236	return -EINVAL;
237}
238#endif /* CONFIG_BLK_DEV_INTEGRITY */
239
240static void nvmet_bdev_execute_rw(struct nvmet_req *req)
241{
242	unsigned int sg_cnt = req->sg_cnt;
243	struct bio *bio;
244	struct scatterlist *sg;
245	struct blk_plug plug;
246	sector_t sector;
247	blk_opf_t opf;
248	int i, rc;
249	struct sg_mapping_iter prot_miter;
250	unsigned int iter_flags;
251	unsigned int total_len = nvmet_rw_data_len(req) + req->metadata_len;
252
253	if (!nvmet_check_transfer_len(req, total_len))
254		return;
255
256	if (!req->sg_cnt) {
257		nvmet_req_complete(req, 0);
258		return;
259	}
260
261	if (req->cmd->rw.opcode == nvme_cmd_write) {
262		opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
263		if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
264			opf |= REQ_FUA;
265		iter_flags = SG_MITER_TO_SG;
266	} else {
267		opf = REQ_OP_READ;
268		iter_flags = SG_MITER_FROM_SG;
269	}
270
271	if (is_pci_p2pdma_page(sg_page(req->sg)))
272		opf |= REQ_NOMERGE;
273
274	sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
275
276	if (nvmet_use_inline_bvec(req)) {
277		bio = &req->b.inline_bio;
278		bio_init(bio, req->ns->bdev, req->inline_bvec,
279			 ARRAY_SIZE(req->inline_bvec), opf);
280	} else {
281		bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), opf,
282				GFP_KERNEL);
283	}
284	bio->bi_iter.bi_sector = sector;
285	bio->bi_private = req;
286	bio->bi_end_io = nvmet_bio_done;
287
288	blk_start_plug(&plug);
289	if (req->metadata_len)
290		sg_miter_start(&prot_miter, req->metadata_sg,
291			       req->metadata_sg_cnt, iter_flags);
292
293	for_each_sg(req->sg, sg, req->sg_cnt, i) {
294		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
295				!= sg->length) {
296			struct bio *prev = bio;
297
298			if (req->metadata_len) {
299				rc = nvmet_bdev_alloc_bip(req, bio,
300							  &prot_miter);
301				if (unlikely(rc)) {
302					bio_io_error(bio);
303					return;
304				}
305			}
306
307			bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt),
308					opf, GFP_KERNEL);
309			bio->bi_iter.bi_sector = sector;
310
311			bio_chain(bio, prev);
312			submit_bio(prev);
313		}
314
315		sector += sg->length >> 9;
316		sg_cnt--;
317	}
318
319	if (req->metadata_len) {
320		rc = nvmet_bdev_alloc_bip(req, bio, &prot_miter);
321		if (unlikely(rc)) {
322			bio_io_error(bio);
323			return;
324		}
325	}
326
327	submit_bio(bio);
328	blk_finish_plug(&plug);
329}
330
331static void nvmet_bdev_execute_flush(struct nvmet_req *req)
332{
333	struct bio *bio = &req->b.inline_bio;
334
335	if (!bdev_write_cache(req->ns->bdev)) {
336		nvmet_req_complete(req, NVME_SC_SUCCESS);
337		return;
338	}
339
340	if (!nvmet_check_transfer_len(req, 0))
341		return;
342
343	bio_init(bio, req->ns->bdev, req->inline_bvec,
344		 ARRAY_SIZE(req->inline_bvec), REQ_OP_WRITE | REQ_PREFLUSH);
345	bio->bi_private = req;
346	bio->bi_end_io = nvmet_bio_done;
347
348	submit_bio(bio);
349}
350
351u16 nvmet_bdev_flush(struct nvmet_req *req)
352{
353	if (!bdev_write_cache(req->ns->bdev))
354		return 0;
355
356	if (blkdev_issue_flush(req->ns->bdev))
357		return NVME_SC_INTERNAL | NVME_SC_DNR;
358	return 0;
359}
360
361static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
362		struct nvme_dsm_range *range, struct bio **bio)
363{
364	struct nvmet_ns *ns = req->ns;
365	int ret;
366
367	ret = __blkdev_issue_discard(ns->bdev,
368			nvmet_lba_to_sect(ns, range->slba),
369			le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
370			GFP_KERNEL, bio);
371	if (ret && ret != -EOPNOTSUPP) {
372		req->error_slba = le64_to_cpu(range->slba);
373		return errno_to_nvme_status(req, ret);
374	}
375	return NVME_SC_SUCCESS;
376}
377
378static void nvmet_bdev_execute_discard(struct nvmet_req *req)
379{
380	struct nvme_dsm_range range;
381	struct bio *bio = NULL;
382	int i;
383	u16 status;
384
385	for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
386		status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
387				sizeof(range));
388		if (status)
389			break;
390
391		status = nvmet_bdev_discard_range(req, &range, &bio);
392		if (status)
393			break;
394	}
395
396	if (bio) {
397		bio->bi_private = req;
398		bio->bi_end_io = nvmet_bio_done;
399		if (status)
400			bio_io_error(bio);
401		else
402			submit_bio(bio);
403	} else {
404		nvmet_req_complete(req, status);
405	}
406}
407
408static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
409{
410	if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
411		return;
412
413	switch (le32_to_cpu(req->cmd->dsm.attributes)) {
414	case NVME_DSMGMT_AD:
415		nvmet_bdev_execute_discard(req);
416		return;
417	case NVME_DSMGMT_IDR:
418	case NVME_DSMGMT_IDW:
419	default:
420		/* Not supported yet */
421		nvmet_req_complete(req, 0);
422		return;
423	}
424}
425
426static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
427{
428	struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
429	struct bio *bio = NULL;
430	sector_t sector;
431	sector_t nr_sector;
432	int ret;
433
434	if (!nvmet_check_transfer_len(req, 0))
435		return;
436
437	sector = nvmet_lba_to_sect(req->ns, write_zeroes->slba);
438	nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
439		(req->ns->blksize_shift - 9));
440
441	ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
442			GFP_KERNEL, &bio, 0);
443	if (bio) {
444		bio->bi_private = req;
445		bio->bi_end_io = nvmet_bio_done;
446		submit_bio(bio);
447	} else {
448		nvmet_req_complete(req, errno_to_nvme_status(req, ret));
449	}
450}
451
452u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
453{
454	switch (req->cmd->common.opcode) {
455	case nvme_cmd_read:
456	case nvme_cmd_write:
457		req->execute = nvmet_bdev_execute_rw;
458		if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns))
459			req->metadata_len = nvmet_rw_metadata_len(req);
460		return 0;
461	case nvme_cmd_flush:
462		req->execute = nvmet_bdev_execute_flush;
463		return 0;
464	case nvme_cmd_dsm:
465		req->execute = nvmet_bdev_execute_dsm;
466		return 0;
467	case nvme_cmd_write_zeroes:
468		req->execute = nvmet_bdev_execute_write_zeroes;
469		return 0;
470	default:
471		return nvmet_report_invalid_opcode(req);
472	}
473}
474